示例#1
0
def mcmc_mpi(Nwalkers,
             Nchains,
             observables=['nbar', 'xi'],
             data_dict={
                 'Mr': 21,
                 'b_normal': 0.25
             },
             prior_name='first_try',
             mcmcrun=None):
    '''
    Standard MCMC implementaion
    
    Parameters
    -----------
    - Nwalker : 
        Number of walkers
    - Nchains : 
        Number of MCMC chains   
    - observables : 
        list of observables. Options are: ['nbar','xi'],['nbar','gmf'],['xi']
    - data_dict : dictionary that specifies the observation keywords
    '''
    #Initializing the vector of observables and inverse covariance matrix
    if observables == ['xi']:
        fake_obs = Data.data_xi(**data_dict)
        #fake_obs_icov = Data.data_inv_cov('xi', **data_dict)
        fake_obs_icov = Data.data_cov(inference='mcmc', **data_dict)[1:16,
                                                                     1:16]
    if observables == ['nbar', 'xi']:
        fake_obs = np.hstack(
            [Data.data_nbar(**data_dict),
             Data.data_xi(**data_dict)])
        fake_obs_icov = Data.data_cov(inference='mcmc', **data_dict)[:16, :16]
    if observables == ['nbar', 'gmf']:
        ##### FIRST BIN OF GMF DROPPED ###############
        # CAUTION: hardcoded
        fake_obs = np.hstack(
            [Data.data_nbar(**data_dict),
             Data.data_gmf(**data_dict)[1:]])
        fake_obs_icov = np.zeros((10, 10))
        #print Data.data_cov(**data_dict)[17: , 17:].shape

        # Covariance matrix being adjusted accordingly
        fake_obs_icov[1:, 1:] = Data.data_cov(inference='mcmc',
                                              **data_dict)[17:, 17:]
        fake_obs_icov[0, 1:] = Data.data_cov(inference='mcmc',
                                             **data_dict)[0, 17:]
        fake_obs_icov[1:, 0] = Data.data_cov(inference='mcmc',
                                             **data_dict)[17:, 0]
        fake_obs_icov[0, 0] = Data.data_cov(inference='mcmc', **data_dict)[0,
                                                                           0]

    # True HOD parameters
    data_hod_dict = Data.data_hod_param(Mr=data_dict['Mr'])
    data_hod = np.array([
        data_hod_dict['logM0'],  # log M0 
        np.log(data_hod_dict['sigma_logM']),  # log(sigma)
        data_hod_dict['logMmin'],  # log Mmin
        data_hod_dict['alpha'],  # alpha
        data_hod_dict['logM1']  # log M1
    ])
    Ndim = len(data_hod)

    # Priors
    prior_min, prior_max = PriorRange(prior_name)
    prior_range = np.zeros((len(prior_min), 2))
    prior_range[:, 0] = prior_min
    prior_range[:, 1] = prior_max

    # mcmc chain output file
    chain_file = ''.join([
        util.mcmc_dir(),
        util.observable_id_flag(observables), '.', mcmcrun, '.mcmc_chain.dat'
    ])
    #print chain_file

    if os.path.isfile(chain_file) and continue_chain:
        print 'Continuing previous MCMC chain!'
        sample = np.loadtxt(chain_file)
        Nchain = Niter - (len(sample) / Nwalkers
                          )  # Number of chains left to finish
        if Nchain > 0:
            pass
        else:
            raise ValueError
        print Nchain, ' iterations left to finish'

        # Initializing Walkers from the end of the chain
        pos0 = sample[-Nwalkers:]
    else:
        # new chain
        f = open(chain_file, 'w')
        f.close()
        Nchain = Niter

        # Initializing Walkers
        random_guess = data_hod
        pos0 = np.repeat(random_guess, Nwalkers).reshape(Ndim, Nwalkers).T + \
                         5.e-2 * np.random.randn(Ndim * Nwalkers).reshape(Nwalkers, Ndim)
        #print pos0.shape
    # Initializing MPIPool
    pool = MPIPool()
    if not pool.is_master():
        pool.wait()
        sys.exit(0)

    # Initializing the emcee sampler
    hod_kwargs = {
        'prior_range': prior_range,
        'data': fake_obs,
        'data_icov': fake_obs_icov,
        'observables': observables,
        'Mr': data_dict['Mr']
    }
    sampler = emcee.EnsembleSampler(Nwalkers,
                                    Ndim,
                                    lnPost,
                                    pool=pool,
                                    kwargs=hod_kwargs)

    # Initializing Walkers
    for result in sampler.sample(pos0, iterations=Nchain, storechain=False):
        position = result[0]
        #print position
        f = open(chain_file, 'a')
        for k in range(position.shape[0]):
            output_str = '\t'.join(position[k].astype('str')) + '\n'
            f.write(output_str)
        f.close()

    pool.close()
示例#2
0
def mcmc_mpi(Nwalkers, Niters, Mr, prior_name = 'first_try', pois = False): 
    '''
    Parameters
    -----------
    - Nwalker : 
        Number of walkers
    - Nchains : 
        Number of MCMC chains   
    '''
    #data and covariance matrix
    fake_obs_icov = Data.load_covariance(Mr , pois = False)
    fake_obs = Data.load_data(Mr)
        
    # True HOD parameters
    data_hod = Data.load_dechod_random_guess(Mr)
    Ndim = len(data_hod)

    # Priors
    prior_min, prior_max = PriorRange(prior_name , Mr)
    prior_range = np.zeros((len(prior_min),2))
    prior_range[:,0] = prior_min
    prior_range[:,1] = prior_max
    
    # mcmc chain output file 
    chain_file_name = ''.join([util.mcmc_dir(),'group_nopoisson_mcmc_chain_Mr',str(Mr),'.hdf5'])
 

    if os.path.isfile(chain_file_name) and continue_chain:   
        print 'Continuing previous MCMC chain!'
        sample = h5py.File(chain_file_name , "r") 
        Nchains = Niters - len(sample) # Number of chains left to finish 
        if Nchains > 0: 
            pass
        else: 
            raise ValueError
        print Nchains, ' iterations left to finish'

        # Initializing Walkers from the end of the chain 
        pos0 = sample[-Nwalkers:]
    else:
        # new chain
        print "chain_file_name=" , chain_file_name
 
        sample_file = h5py.File(chain_file_name , 'w')
        sample_file.create_dataset("mcmc",(Niters, Nwalkers, Ndim), data = np.zeros((Niters, Nwalkers , Ndim)))
        sample_file.close()
         
        # Initializing Walkers
        random_guess = data_hod
        pos0 = np.repeat(random_guess, Nwalkers).reshape(Ndim, Nwalkers).T + \
                         5.e-2 * np.random.randn(Ndim * Nwalkers).reshape(Nwalkers, Ndim)
    print "initial position of the walkers = " , pos0.shape
    # Initializing MPIPool
    pool = MPIPool(loadbalance=True)
    if not pool.is_master():
        pool.wait()
        sys.exit(0)

    # Initializing the emcee sampler
    hod_kwargs = {
            'prior_range': prior_range, 
            'data': fake_obs, 
            'data_icov': fake_obs_icov, 
            'Mr': Mr
            }
    sampler = emcee.EnsembleSampler(Nwalkers, Ndim, lnPost, pool=pool, kwargs=hod_kwargs)

    cnt = 0

    # Initializing Walkers 
    for result in sampler.sample(pos0, iterations = Niters, storechain=False):
        position = result[0]
        sample_file = h5py.File(chain_file_name)
        sample_file["mcmc"][cnt] = position
        sample_file.close()
        print "iteration=" , cnt
        cnt += 1
        pass
    pool.close()
示例#3
0
def mcmc_mpi(
    Nwalkers,
    Nchains,
    observables=["nbar", "xi"],
    data_dict={"Mr": 21, "b_normal": 0.25},
    prior_name="first_try",
    mcmcrun=None,
):
    """
    Standard MCMC implementaion
    
    Parameters
    -----------
    - Nwalker : 
        Number of walkers
    - Nchains : 
        Number of MCMC chains   
    - observables : 
        list of observables. Options are: ['nbar','xi'],['nbar','gmf'],['xi']
    - data_dict : dictionary that specifies the observation keywords
    """
    # Initializing the vector of observables and inverse covariance matrix
    if observables == ["xi"]:
        fake_obs = Data.data_xi(**data_dict)
        # fake_obs_icov = Data.data_inv_cov('xi', **data_dict)
        fake_obs_icov = Data.data_cov(inference="mcmc", **data_dict)[1:16, 1:16]
    if observables == ["nbar", "xi"]:
        fake_obs = np.hstack([Data.data_nbar(**data_dict), Data.data_xi(**data_dict)])
        fake_obs_icov = Data.data_cov(inference="mcmc", **data_dict)[:16, :16]
    if observables == ["nbar", "gmf"]:
        ##### FIRST BIN OF GMF DROPPED ###############
        # CAUTION: hardcoded
        fake_obs = np.hstack([Data.data_nbar(**data_dict), Data.data_gmf(**data_dict)[1:]])
        fake_obs_icov = np.zeros((10, 10))
        # print Data.data_cov(**data_dict)[17: , 17:].shape

        # Covariance matrix being adjusted accordingly
        fake_obs_icov[1:, 1:] = Data.data_cov(inference="mcmc", **data_dict)[17:, 17:]
        fake_obs_icov[0, 1:] = Data.data_cov(inference="mcmc", **data_dict)[0, 17:]
        fake_obs_icov[1:, 0] = Data.data_cov(inference="mcmc", **data_dict)[17:, 0]
        fake_obs_icov[0, 0] = Data.data_cov(inference="mcmc", **data_dict)[0, 0]

    # True HOD parameters
    data_hod_dict = Data.data_hod_param(Mr=data_dict["Mr"])
    data_hod = np.array(
        [
            data_hod_dict["logM0"],  # log M0
            np.log(data_hod_dict["sigma_logM"]),  # log(sigma)
            data_hod_dict["logMmin"],  # log Mmin
            data_hod_dict["alpha"],  # alpha
            data_hod_dict["logM1"],  # log M1
        ]
    )
    Ndim = len(data_hod)

    # Priors
    prior_min, prior_max = PriorRange(prior_name)
    prior_range = np.zeros((len(prior_min), 2))
    prior_range[:, 0] = prior_min
    prior_range[:, 1] = prior_max

    # mcmc chain output file
    chain_file = "".join([util.mcmc_dir(), util.observable_id_flag(observables), ".", mcmcrun, ".mcmc_chain.dat"])
    # print chain_file

    if os.path.isfile(chain_file) and continue_chain:
        print "Continuing previous MCMC chain!"
        sample = np.loadtxt(chain_file)
        Nchain = Niter - (len(sample) / Nwalkers)  # Number of chains left to finish
        if Nchain > 0:
            pass
        else:
            raise ValueError
        print Nchain, " iterations left to finish"

        # Initializing Walkers from the end of the chain
        pos0 = sample[-Nwalkers:]
    else:
        # new chain
        f = open(chain_file, "w")
        f.close()
        Nchain = Niter

        # Initializing Walkers
        random_guess = data_hod
        pos0 = np.repeat(random_guess, Nwalkers).reshape(Ndim, Nwalkers).T + 5.0e-2 * np.random.randn(
            Ndim * Nwalkers
        ).reshape(Nwalkers, Ndim)
        # print pos0.shape
    # Initializing MPIPool
    pool = MPIPool()
    if not pool.is_master():
        pool.wait()
        sys.exit(0)

    # Initializing the emcee sampler
    hod_kwargs = {
        "prior_range": prior_range,
        "data": fake_obs,
        "data_icov": fake_obs_icov,
        "observables": observables,
        "Mr": data_dict["Mr"],
    }
    sampler = emcee.EnsembleSampler(Nwalkers, Ndim, lnPost, pool=pool, kwargs=hod_kwargs)

    # Initializing Walkers
    for result in sampler.sample(pos0, iterations=Nchain, storechain=False):
        position = result[0]
        # print position
        f = open(chain_file, "a")
        for k in range(position.shape[0]):
            output_str = "\t".join(position[k].astype("str")) + "\n"
            f.write(output_str)
        f.close()

    pool.close()