def correlate_info(data1,data2, NBINS = NBINS, RMIN=1, RMAX=2, BOXSIZE = BOXSIZE, WRAP = WRAP):
    if data1 is not None:
        if RMAX is None:
            RMAX = BOXSIZE

        if WRAP:
            wrap_length = BOXSIZE
        else:
            wrap_length = None

        dataset1 = correlate.points(data1, boxsize = wrap_length)
        dataset2 = correlate.points(data2, boxsize = wrap_length)

        binning = correlate.RBinning(np.logspace(np.log10(RMIN),np.log10(RMAX),NBINS+1))

        DD = correlate.paircount(dataset1,dataset2, binning, np=0)
        DD = DD.sum1
        N=len(dataset1)-1
        
#        if (sum(DD)!=N):
#            print data1,data2
        
        return DD,N
    else:
        return None, None,None
예제 #2
0
def correlate_info(data,
                   NBINS=NBINS,
                   RMIN=RMIN,
                   RMAX=RMAX,
                   BOXSIZE=BOXSIZE,
                   WRAP=WRAP):
    if data is not None:
        if RMAX is None:
            RMAX = BOXSIZE

        if WRAP:
            wrap_length = BOXSIZE
        else:
            wrap_length = None

        dataset = correlate.points(data, boxsize=wrap_length)

        binning = correlate.RBinning(
            np.logspace(np.log10(RMIN), np.log10(RMAX), NBINS + 1))

        #	RR=N**2*numpy.asarray([poiss(rbin[i],rbin[i+1]) for i in range(0,nbins)])
        DD = correlate.paircount(dataset, dataset, binning, np=16)
        DD = DD.sum1

        #        print 'Done correlating'
        r = binning.centers
        return r, DD
    else:
        return None, None
예제 #3
0
def test_unweighted():
    numpy.random.seed(1234)
    pos = numpy.random.uniform(size=(1000, 3))
    pos1 = pos[:, None, :]
    pos2 = pos[None, :, :]
    dist = pos1 - pos2
    dist[dist > 0.5] -= 1.0
    dist[dist < -0.5] += 1.0
    dist = numpy.einsum('ijk,ijk->ij', dist, dist)**0.5

    dataset = correlate.points(pos, boxsize=1.0)

    # use the python point point counting
    binning = correlate.RBinning(numpy.linspace(0, 0.5, 10))

    # use the C node node counting
    binning1 = correlate.FastRBinning(numpy.linspace(0, 0.5, 10))

    dig = binning.edges.searchsorted(dist.flat, side='left')
    truth = numpy.bincount(dig)

    r = correlate.paircount(dataset, dataset, binning, np=0)
    assert_equal(r.sum1, truth[1:-1])

    r1 = correlate.paircount(dataset, dataset, binning1, np=0)
    assert_equal(r1.sum1, truth[1:-1])
예제 #4
0
def test_cluster():
    numpy.random.seed(1234)
    dec = numpy.arcsin(numpy.random.uniform(-1, 1, size=100000)) / numpy.pi * 180
    ra = numpy.random.uniform(0, 2 * numpy.pi, size=100000) / numpy.pi * 180

    # testing bootstrap 
    for area, rand, in sphere.bootstrap(4, (ra, dec), 41252.96 / len(dec)):
        pass

    dataset = sphere.points(ra, dec)

    r = cluster.fof(dataset, 0.00001, np=None)

    assert r.N == len(dataset)

    binning = sphere.AngularBinning(numpy.linspace(0, 1.0, 10))
    binningR = correlate.RBinning(binning.edges)

    r = correlate.paircount(dataset, dataset, binning=binning, usefast=True)
    r1 = correlate.paircount(dataset, dataset, binning=binning, usefast=False)

    r2 = correlate.paircount(dataset, dataset, binning=binningR, usefast=True)

    assert_equal(r1.sum1, r2.sum1)
    assert_equal(r1.sum1, r.sum1)
    assert_allclose(
    r.sum1,
    numpy.diff(2 * numpy.pi * (1 - numpy.cos(numpy.radians(binning.angular_edges)))) / ( 4 * numpy.pi) * len(ra) ** 2, rtol=10e-2)
예제 #5
0
def test_simple():
    numpy.random.seed(1234)
    pos = numpy.random.uniform(size=(10, 3))
    dataset = correlate.points(pos, boxsize=1.0)
    binning = correlate.RBinning(numpy.linspace(0.5, 10))
    r = correlate.paircount(dataset, dataset, binning, np=0)

    r1 = correlate.paircount(dataset, dataset, binning, usefast=True, np=0)
    assert_equal( r.sum1, r1.sum1)
예제 #6
0
def test_field():
    numpy.random.seed(1234)
    pos = numpy.random.uniform(size=(1000, 3))
    dataset = correlate.field(pos, value=numpy.ones(len(pos)), 
            boxsize=1.0, weights=numpy.ones(len(pos)))
    binning = correlate.RBinning(numpy.linspace(0, 0.5, 10))
    r = correlate.paircount(dataset, dataset, binning, np=0)

    assert_allclose(r.sum1, r.sum2)
예제 #7
0
def test_weighted():
    numpy.random.seed(1234)
    pos = numpy.random.uniform(size=(1000, 3))
    datasetw = correlate.points(pos, boxsize=1.0, weights=numpy.ones(len(pos)))
    dataset = correlate.points(pos, boxsize=1.0)
    binning = correlate.RBinning(numpy.linspace(0, 0.5, 10))
    r = correlate.paircount(datasetw, datasetw, binning, np=0)
    r1 = correlate.paircount(dataset, dataset, binning, np=0)

    assert_equal(r.sum1, r1.sum1)
예제 #8
0
def reference_2pcf_s(sedges,position1,weight1,position2=None,weight2=None):
    """Reference pair counting via kdcount"""
    tree1 = correlate.points(position1,boxsize=None,weights=weight1)
    factor = 1.
    if position2 is None:
        tree2 = tree1
        factor = 1./2.
    else: tree2 = correlate.points(position2,boxsize=None,weights=weight2)
    bins = correlate.RBinning(np.asarray(sedges))
    pc = correlate.paircount(tree1,tree2,bins,np=0,usefast=False,compute_mean_coords=True)
    return factor*pc.sum1
예제 #9
0
def test_simple():
    numpy.random.seed(1234)
    pos = numpy.random.uniform(size=(10, 3))
    dataset = correlate.points(pos, boxsize=1.0)

    # use the python point point counting
    binning = correlate.RBinning(numpy.linspace(0, 0.5, 10))

    # use the C node node counting
    binning1 = correlate.FastRBinning(numpy.linspace(0, 0.5, 10))

    r = correlate.paircount(dataset, dataset, binning, np=0)
    r1 = correlate.paircount(dataset, dataset, binning1, np=0)
    assert_equal(r.sum1, r1.sum1)
예제 #10
0
def test_cross():
    numpy.random.seed(1234)
    pos1 = numpy.random.uniform(size=(10000, 2))
    pos2 = numpy.random.uniform(size=(10000, 2)) * 0.3
    dataset1 = correlate.points(pos1, boxsize=None)
    dataset2 = correlate.points(pos2, boxsize=None)
    binning = correlate.RBinning(numpy.linspace(0, 0.1, 10))
    r1 = correlate.paircount(dataset1, dataset2, binning, np=0, usefast=False)
    r2 = correlate.paircount(dataset1, dataset2, binning, np=0, usefast=True)
    assert_equal(r1.sum1, r2.sum1)
    r3 = correlate.paircount(dataset1, dataset2, binning, np=4, usefast=False)
    assert_equal(r1.sum1, r3.sum1)
    r4 = correlate.paircount(dataset1, dataset2, binning, np=4, usefast=True)
    assert_equal(r1.sum1, r4.sum1)
예제 #11
0
def test_paircount():
    numpy.random.seed(1234)
    data = 1.0 * ((-numpy.arange(4).reshape(-1, 1)) % 2)
    data = correlate.points(data)
    bsfun = lambda x: numpy.int32(x.pos[:, 0])
    policy = bootstrap.policy(bsfun, data)
    binning=correlate.RBinning(numpy.linspace(0, 100, 2, endpoint=True))

    def estimator( x, y):
        r = correlate.paircount(x, y, binning, np=0)
        return r.fullsum1
    result = policy.run(estimator, data, data)
    L, R = policy.resample(result, numpy.arange(2))

    assert_array_equal(L, (4, 4))
    assert_array_equal(R, (8, 8, 0))
예제 #12
0
def reference_paircount(pos1, w1, redges, boxsize, pos2=None, w2=None, los=2):
    """Reference pair counting via kdcount"""
    # make the trees
    tree1 = correlate.points(pos1, boxsize=boxsize, weights=w1)
    if pos2 is None:
        tree2 = tree1
    else:
        tree2 = correlate.points(pos2, boxsize=boxsize, weights=w2)

    # do the paircount
    bins = correlate.RBinning(redges)
    pc = correlate.paircount(tree1,
                             tree2,
                             bins,
                             np=0,
                             compute_mean_coords=True)
    return numpy.nan_to_num(pc.pair_counts), numpy.nan_to_num(
        pc.mean_centers), pc.sum1
예제 #13
0
def test_channels():
    numpy.random.seed(1234)
    pos = numpy.random.uniform(size=(1000, 3))
    datasetw = correlate.points(pos, boxsize=1.0, weights=numpy.ones(len(pos)))
    dataset = correlate.points(pos, boxsize=1.0)

    binning_mc1 = correlate.FlatSkyMultipoleBinning(numpy.linspace(0, 0.5, 10),
                                                    ells=[0, 0, 0],
                                                    los=0)
    binning_mc2 = correlate.MultipoleBinning(numpy.linspace(0, 0.5, 10),
                                             ells=[0, 0, 0])
    binning = correlate.RBinning(numpy.linspace(0, 0.5, 10))

    r_mc1 = correlate.paircount(datasetw, datasetw, binning_mc1, np=0)
    r_mc2 = correlate.paircount(datasetw, datasetw, binning_mc2, np=0)
    r1 = correlate.paircount(dataset, dataset, binning, np=0)

    assert_equal(r_mc1.sum1[0], r1.sum1)
    assert_equal(r_mc2.sum1[0], r1.sum1)
예제 #14
0
def test_cross():
    numpy.random.seed(1234)
    pos1 = numpy.random.uniform(size=(10000, 2))
    pos2 = numpy.random.uniform(size=(10000, 2)) * 0.3
    dataset1 = correlate.points(pos1, boxsize=None)
    dataset2 = correlate.points(pos2, boxsize=None)

    # use the python point point counting
    binning = correlate.RBinning(numpy.linspace(0, 0.5, 10))

    # use the C node node counting
    binning1 = correlate.FastRBinning(numpy.linspace(0, 0.5, 10))

    r1 = correlate.paircount(dataset1, dataset2, binning, np=0)
    r2 = correlate.paircount(dataset1, dataset2, binning1, np=0)
    assert_equal(r1.sum1, r2.sum1)
    r3 = correlate.paircount(dataset1, dataset2, binning, np=4)
    assert_equal(r1.sum1, r3.sum1)
    r4 = correlate.paircount(dataset1, dataset2, binning1, np=4)
    assert_equal(r1.sum1, r4.sum1)
예제 #15
0
def test_cluster():
    numpy.random.seed(1234)
    dec = numpy.arcsin(numpy.random.uniform(-1, 1,
                                            size=100000)) / numpy.pi * 180
    ra = numpy.random.uniform(0, 2 * numpy.pi, size=100000) / numpy.pi * 180

    # testing bootstrap
    for area, rand, in sphere.bootstrap(4, (ra, dec), 41252.96 / len(dec)):
        pass

    dataset = sphere.points(ra, dec)

    r = cluster.fof(dataset, 0.00001, np=None)

    assert r.N == len(dataset)

    binning = sphere.FastAngularBinning(numpy.linspace(0, 1.0, 10))
    binning1 = sphere.AngularBinning(numpy.linspace(0, 1.0, 10))
    binningR = correlate.RBinning(binning.edges)

    r = correlate.paircount(dataset, dataset, binning=binning)
    r1 = correlate.paircount(dataset,
                             dataset,
                             binning=binning1,
                             compute_mean_coords=True)

    r2 = correlate.paircount(dataset, dataset, binning=binningR)

    # make sure mean_centers compute angular centers
    for i, val in enumerate(r1.mean_centers):
        assert binning.angular_edges[i] < val < binning.angular_edges[i + 1]
    assert_equal(r1.sum1, r2.sum1)
    assert_equal(r1.sum1, r.sum1)
    assert_allclose(
        r.sum1,
        numpy.diff(2 * numpy.pi *
                   (1 - numpy.cos(numpy.radians(binning.angular_edges)))) /
        (4 * numpy.pi) * len(ra)**2,
        rtol=10e-2)
예제 #16
0
def main():
    comm = MPI.COMM_WORLD
    SNAP, LABEL = None, None
    if comm.rank == 0:
        SNAP = files.Snapshot(ns.snapfilename, files.TPMSnapshotFile)
        LABEL = files.Snapshot(ns.halolabel, files.HaloLabelFile)

    SNAP = comm.bcast(SNAP)
    LABEL = comm.bcast(LABEL)

    Ntot = sum(SNAP.npart)
    assert Ntot == sum(LABEL.npart)

    h = files.HaloFile(ns.halocatalogue)

    N = h.read_mass()

    N0 = Ntot - sum(N[1:])
    # halos are assigned to ranks 0, 1, 2, 3 ...
    halorank = numpy.arange(len(N)) % comm.size
    # but non halos are special we will fix it later.
    halorank[0] = -1

    NonhaloStart = comm.rank * int(N0) // comm.size
    NonhaloEnd = (comm.rank + 1) * int(N0) // comm.size

    myNtotal = numpy.sum(N[halorank == comm.rank],
                         dtype='i8') + (NonhaloEnd - NonhaloStart)

    print("Rank %d NonhaloStart %d NonhaloEnd %d myNtotal %d" %
          (comm.rank, NonhaloStart, NonhaloEnd, myNtotal))

    data = numpy.empty(myNtotal,
                       dtype=[
                           ('Position', ('f4', 3)),
                           ('Label', ('i4')),
                           ('Rank', ('i4')),
                       ])

    allNtotal = comm.allgather(myNtotal)
    start = sum(allNtotal[:comm.rank])
    end = sum(allNtotal[:comm.rank + 1])
    data['Position'] = SNAP.read("Position", start, end)
    data['Label'] = LABEL.read("Label", start, end)
    data['Rank'] = halorank[data['Label']]
    # now assign ranks to nonhalo particles
    nonhalomask = (data['Label'] == 0)

    nonhalocount = comm.allgather(nonhalomask.sum())

    data['Rank'][nonhalomask] = (sum(nonhalocount[:comm.rank]) +
                                 numpy.arange(nonhalomask.sum())) % comm.size

    mpsort.sort(data, orderby='Rank')

    arg = data['Label'].argsort()
    data = data[arg]

    ul = numpy.unique(data['Label'])

    bins = correlate.RBinning(40. / ns.boxsize, Nbins=ns.Nmesh)
    sum1 = numpy.zeros(len(bins.centers))

    for l in ul:
        if l == 0: continue
        start = data['Label'].searchsorted(l, side='left')
        end = data['Label'].searchsorted(l, side='right')
        pos = data['Position'][start:end]
        dataset = correlate.points(pos, boxsize=1.0)
        result = correlate.paircount(dataset, dataset, bins, np=0)
        sum1 += result.sum1
        if l % 1000 == 0:
            print l

    sum1 = comm.allreduce(sum1, MPI.SUM)
    Ntot = sum(SNAP.npart)
    RR = 4. / 3 * numpy.pi * numpy.diff(bins.edges**3) * (1.0 * Ntot * Ntot)

    k = numpy.arange(ns.Nmesh // 2) * 2 * numpy.pi / ns.boxsize
    # asymtotically zero at r. The mean doesn't matter as
    # we don't use zero k mode anyways.
    k, p = corrfrompower(bins.centers * ns.boxsize, sum1 / RR, R=k)
    # inverse FT factor
    p *= (2 * numpy.pi)**3

    if comm.rank == 0:

        if ns.output != '-':
            ff = open(ns.output, 'w')
            ff2 = open(ns.output + '.xi', 'w')
            with ff2:
                numpy.savetxt(ff2, zip(bins.centers, sum1 / RR - 1.0))
        else:
            ff = stdout
        with ff:
            #        numpy.savetxt(ff, zip(bins.centers, sum1 / RR - 1.0))
            numpy.savetxt(ff, zip(k, p))
예제 #17
0
def compute_brutal_corr(datasources,
                        redges,
                        Nmu=0,
                        comm=None,
                        subsample=1,
                        los='z',
                        poles=[]):
    r"""
    Compute the correlation function by direct pair summation, either as a function
    of separation (`R`) or as a function of separation and line-of-sight angle (`R`, `mu`)
    
    The estimator used to compute the correlation function is:
    
    .. math:: 
        
        \xi(r, \mu) = DD(r, \mu) / RR(r, \mu) - 1.
    
    where `DD` is the number of data-data pairs, and `RR` is the number of random-random pairs,
    which is determined solely by the binning used, assuming a constant number density
    
    Parameters
    ----------
    datasources : list of DataSource objects
        the list of data instances from which the 3D correlation will be computed
    redges : array_like
        the bin edges for the `R` variable
    Nmu : int, optional
        the number of desired `mu` bins, where `mu` is the cosine 
        of the angle from the line-of-sight. Default is `0`, in 
        which case the correlation function is binned as a function of `R` only
    comm : MPI.Communicator, optional
        the communicator to pass to the ``ParticleMesh`` object. If not
        provided, ``MPI.COMM_WORLD`` is used
    subsample : int, optional
        downsample the input datasources by choosing 1 out of every `N` points. 
        Default is `1` (no subsampling).
    los : str, {'x', 'y', 'z'}, optional
        the dimension to treat as the line-of-sight; default is 'z'.
    poles : list of int, optional
        integers specifying the multipoles to compute from the 2D correlation function
        
    Returns
    -------
    pc : :class:`kdcount.correlate.paircount`
        the pair counting instance 
    xi : array_like
        the correlation function result; if `poles` supplied, the shape is 
        `(len(redges)-1, len(poles))`, otherwise, the shape is either `(len(redges)-1, )`
        or `(len(redges)-1, Nmu)`
    RR : array_like
        the number of random-random pairs (used as normalization of the data-data pairs)
    """
    from pmesh.domain import GridND
    from kdcount import correlate

    # some setup
    if los not in "xyz": raise ValueError("`los` must be `x`, `y`, or `z`")
    los = "xyz".index(los)
    poles = numpy.array(poles)
    Rmax = redges[-1]
    if comm is None: comm = MPI.COMM_WORLD

    # determine processor division for domain decomposition
    for Nx in range(int(comm.size**0.3333) + 1, 0, -1):
        if comm.size % Nx == 0: break
    else:
        Nx = 1
    for Ny in range(int(comm.size**0.5) + 1, 0, -1):
        if (comm.size // Nx) % Ny == 0: break
    else:
        Ny = 1
    Nz = comm.size // Nx // Ny
    Nproc = [Nx, Ny, Nz]

    # log some info
    if comm.rank == 0:
        logger.info('Nproc = %s' % str(Nproc))
        logger.info('Rmax = %g' % Rmax)

    # domain decomposition
    grid = [
        numpy.linspace(0,
                       datasources[0].BoxSize[i],
                       Nproc[i] + 1,
                       endpoint=True) for i in range(3)
    ]
    domain = GridND(grid, comm=comm)

    # read position for field #1
    with datasources[0].open() as stream:
        [[pos1]] = stream.read(['Position'], full=True)
    pos1 = pos1[comm.rank * subsample // comm.size::subsample]
    N1 = comm.allreduce(len(pos1))

    # read position for field #2
    if len(datasources) > 1:
        with datasources[1].open() as stream:
            [[pos2]] = stream.read(['Position'], full=True)
        pos2 = pos2[comm.rank * subsample // comm.size::subsample]
        N2 = comm.allreduce(len(pos2))
    else:
        pos2 = pos1
        N2 = N1

    # exchange field #1 positions
    layout = domain.decompose(pos1, smoothing=0)
    pos1 = layout.exchange(pos1)
    if comm.rank == 0: logger.info('exchange pos1')

    # exchange field #2 positions
    if Rmax > datasources[0].BoxSize[0] * 0.25:
        pos2 = numpy.concatenate(comm.allgather(pos2), axis=0)
    else:
        layout = domain.decompose(pos2, smoothing=Rmax)
        pos2 = layout.exchange(pos2)
    if comm.rank == 0: logger.info('exchange pos2')

    # initialize the trees to hold the field points
    tree1 = correlate.points(pos1, boxsize=datasources[0].BoxSize)
    tree2 = correlate.points(pos2, boxsize=datasources[0].BoxSize)

    # log the sizes of the trees
    logger.info('rank %d correlating %d x %d' %
                (comm.rank, len(tree1), len(tree2)))
    if comm.rank == 0: logger.info('all correlating %d x %d' % (N1, N2))

    # use multipole binning
    if len(poles):
        bins = correlate.FlatSkyMultipoleBinning(redges,
                                                 poles,
                                                 los,
                                                 compute_mean_coords=True)
    # use (R, mu) binning
    elif Nmu > 0:
        bins = correlate.FlatSkyBinning(redges,
                                        Nmu,
                                        los,
                                        compute_mean_coords=True)
    # use R binning
    else:
        bins = correlate.RBinning(redges, compute_mean_coords=True)

    # do the pair counting
    # have to set usefast = False to get mean centers, or exception thrown
    pc = correlate.paircount(tree2, tree1, bins, np=0, usefast=False)
    pc.sum1[:] = comm.allreduce(pc.sum1)

    # get the mean bin values, reducing from all ranks
    pc.pair_counts[:] = comm.allreduce(pc.pair_counts)
    with numpy.errstate(invalid='ignore'):
        if bins.Ndim > 1:
            for i in range(bins.Ndim):
                pc.mean_centers[i][:] = comm.allreduce(
                    pc.mean_centers_sum[i]) / pc.pair_counts
        else:
            pc.mean_centers[:] = comm.allreduce(
                pc.mean_centers_sum[0]) / pc.pair_counts

    # compute the random pairs from the fractional volume
    RR = 1. * N1 * N2 / datasources[0].BoxSize.prod()
    if Nmu > 0:
        dr3 = numpy.diff(pc.edges[0]**3)
        dmu = numpy.diff(pc.edges[1])
        RR *= 2. / 3. * numpy.pi * dr3[:, None] * dmu[None, :]
    else:
        RR *= 4. / 3. * numpy.pi * numpy.diff(pc.edges**3)

    # return the correlation and the pair count object
    xi = (1. * pc.sum1 / RR) - 1.0
    if len(poles):
        xi = xi.T  # makes ell the second axis
        xi[:, poles != 0] += 1.0  # only monopole gets the minus one

    return pc, xi, RR
예제 #18
0
HM = galaxy.open('galaxy_host_mass')[:]
hostid = galaxy.open('galaxy_host_id')[:]
tag = galaxy.open('central_satellite_tag')[:]
galaxy_positions = galaxy.open('galaxy_center_of_mass')[:]

lcuts = numpy.array([7.5, 8.0, 8.5, 9.0, 9.5, 10.0, 10.5])
lcuts += 0.25

for lcut in reversed(lcuts):
    mask = SM > 10**lcut
    data_s = galaxy_positions[mask]

    N = len(data_s)
    r, DD = correlate_info(data_s)

    binning = correlate.RBinning(
        np.logspace(np.log10(RMIN), np.log10(RMAX), NBINS + 1))
    rbin = binning.edges
    RR = (N**2 - N) * np.asarray(
        [poiss(rbin[i], rbin[i + 1]) for i in range(0, NBINS)])

    xi = DD / RR - 1
    dxi = np.sqrt(DD) / RR
    r = binning.centers

    label_DD = './galaxy_correlation_functions/DD_z-%.1f_SM_cut_galaxy_COM.pickle' % (
        z)
    label_xi = './galaxy_correlation_functions/xi_z%.1f_%.1f_SM_cut_galaxy_COM.pickle' % (
        z, lcut)

    pickle.dump([r, DD], open(label_DD, 'w'))
    pickle.dump([r, xi, dxi, N], open(label_xi, 'w'))
예제 #19
0
def get_halo_density_profile(output_path,
                             p_type,
                             desired_redshift_of_selected_halo,
                             index_of_selected_halo,
                             min_edge,
                             max_edge,
                             Nbins,
                             CENTER_AROUND='POTENTIAL_MINIMUM',
                             p_id=0):
    from kdcount import correlate

    def min_dis(median_position, position, box_size):
        pos_1 = position - median_position
        pos_2 = position - median_position + boxsize
        pos_3 = position - median_position - boxsize
        new_position_options = numpy.array([pos_1, pos_2, pos_3])
        get_minimum_distance = numpy.argmin(numpy.abs(new_position_options))
        return new_position_options[get_minimum_distance]

    boxsize = get_box_size(output_path)
    particle_property = 'Position'
    group_positions, output_redshift = get_particle_property_within_groups(
        output_path, particle_property, p_type,
        desired_redshift_of_selected_halo, index_of_selected_halo)

    particle_property = 'Mass'
    group_mass, output_redshift = get_particle_property_within_groups(
        output_path, particle_property, p_type,
        desired_redshift_of_selected_halo, index_of_selected_halo)
    particle_property = 'Potential'
    group_potential, output_redshift = get_particle_property_within_groups(
        output_path, particle_property, p_type,
        desired_redshift_of_selected_halo, index_of_selected_halo)
    if (CENTER_AROUND == 'MOST_MASSIVE_BLACKHOLE'):

        particle_property = 'ID'

        bh_IDs, output_redshift = get_particle_property_within_groups(
            output_path, particle_property, 5,
            desired_redshift_of_selected_halo, index_of_selected_halo)

        particle_property = 'Position'

        bh_positions, output_redshift = get_particle_property_within_groups(
            output_path, particle_property, 5,
            desired_redshift_of_selected_halo, index_of_selected_halo)
        particle_property = 'BlackholeMass'

        bh_masses, output_redshift = get_particle_property_within_groups(
            output_path, particle_property, 5,
            desired_redshift_of_selected_halo, index_of_selected_halo)
        print("Calculating density around BH with ID:",
              (bh_IDs[bh_masses == numpy.amax(bh_masses)])[0])
        center = (bh_positions[bh_masses == numpy.amax(bh_masses)])[0]
    if (CENTER_AROUND == 'POTENTIAL_MINIMUM'):
        center = (
            group_positions[group_potential == numpy.amin(group_potential)])[0]
    transposed_group_positions = numpy.transpose(group_positions)
    vectorized_min_dis = numpy.vectorize(min_dis)
    x_dis = vectorized_min_dis(center[0], transposed_group_positions[0],
                               boxsize)
    y_dis = vectorized_min_dis(center[1], transposed_group_positions[1],
                               boxsize)
    z_dis = vectorized_min_dis(center[2], transposed_group_positions[2],
                               boxsize)
    log_distances = numpy.log10(numpy.sqrt(x_dis**2 + y_dis**2 + z_dis**2))

    log_distance_bins = numpy.linspace(min_edge, max_edge, Nbins)
    binning = correlate.RBinning(log_distance_bins)
    bin_edges = binning.edges
    bin_centers = binning.centers
    mass_distribution = []
    for i in range(0, len(bin_edges) - 1):
        left = bin_edges[i]
        right = bin_edges[i + 1]
        mask = (log_distances > left) & (log_distances < right)
        mass_inside_bin = numpy.sum(group_mass[mask])
        mass_distribution.append(mass_inside_bin)

    mass_distribution = numpy.array(mass_distribution)
    mass_density = mass_distribution / 4. / 3.14 / (10**bin_centers)**3 / (
        (numpy.diff(bin_centers))[0]) / numpy.log(10)
    return bin_centers, mass_distribution, mass_density
def poiss(rmin,rmax):
    p=4./3*scipy.pi*(rmax**3-rmin**3)/BOXSIZE**3
    return p


rho_crit=2.775e11
mids=[12.5,12.0,11.5,11.0]
lcuts=[41,41.5,42,42.5,43,43.5,44,44.5]
width=0.25
for snapshot in reversed(snapshots):
    for mid in mids:
        for lcut in lcuts:
            rvir=(10**mid/(4./3*3.14*200*rho_crit*0.2814))**(1./3)
            DD,N=construct_halo_mass_bin(snapshot)
            y_cen=correlate.RBinning(y_space).centers

    	    dy=y_cen*numpy.diff(numpy.log(y_cen))[0]
    	    dr=rvir*dy
    	    r_cen=rvir*y_cen
    	    pre_factor=4.*3.14
#    print da

#    	    print sum(no_of_sat)
    	    density=DD/dr/r_cen**2/pre_factor
    	    density_err=numpy.sqrt(DD)/dr/r_cen**2/pre_factor
    	    pickle.dump([y_cen,r_cen,DD,density,density_err,Nhalo],open('raw_satellite_histograms_z_%.2f_mid_%.1f_lcut_%.1f.pickle'%(z,mid,lcut),'w'))