コード例 #1
0
        theta[i] = priordict[parnames[i]].ppf(x)

    return theta


# Define PolyChord settings
settings = PolyChordSettings(
    nDims,
    nDerived,
)
settings.do_clustering = args_params.noclust
settings.nlive = nDims * args_params.nlive
settings.base_dir = base_dir
settings.file_root = 'hd40307_k{}'.format(nplanets)  # modelpath[12:-3]
settings.num_repeats = nDims * args_params.nrep
settings.precision_criterion = args_params.prec
settings.read_resume = False

# Change settings if resume is true
if args_params.resume:
    settings.read_resume = args_params.resume
    settings.base_dir = dirname + prev_run

# Run PolyChord
output = PPC.run_polychord(logLikelihood, nDims, nDerived, settings, prior)

# Parameter names
# latexnames = [r'\sigma_J', r'C']
# for j in range(nplanets):
#     latexnames.extend(
#         [fr'K_{j}', fr'P_{j}', fr'e_{j}', fr'\omega_{j}', fr'M_{j}'])
コード例 #2
0
def PolyChordrun(LogLikelihood,n_dims,n_params,**kwargs):
    """
    Function author: yuanfang
      PolyChord settings

      For full details of nested sampling and PolyChord, please refer to:
      arxiv:1506.00171[astro-ph.IM], 0809.34371[astro-ph]
           

    Parameters
    ----------
    nDims: int
        Dimensionality of the model, i.e. the number of physical parameters.

    nDerived: int
        The number of derived parameters (can be 0).


    Keyword arguments
    -----------------
    nlive: int
        (Default: nDims*25)
        The number of live points.
        Increasing nlive increases the accuracy of posteriors and evidences,
        and proportionally increases runtime ~ O(nlive).

    num_repeats : int
        (Default: nDims*5)
        The number of slice slice-sampling steps to generate a new point.
        Increasing num_repeats increases the reliability of the algorithm.
        Typically
        * for reliable evidences need num_repeats ~ O(5*nDims).
        * for reliable posteriors need num_repeats ~ O(nDims)

    nprior : int
        (Default: nlive)
        The number of prior samples to draw before starting compression.

    do_clustering : boolean
        (Default: True)
        Whether or not to use clustering at run time.

    feedback : {0,1,2,3}
        (Default: 1)
        How much command line feedback to give
        # Degree of feedback to provide
        #-1 | nothing
        # 0 | just header and tail
        # 1 | run time evidences and clustering information
        # 2 | fancy output
        # 3 | verbose

    precision_criterion : float
        (Default: 0.001)
        Termination criterion. Nested sampling terminates when the evidence
        contained in the live points is precision_criterion fraction of the
        total evidence.

    max_ndead : int
        (Default: -1)
        Alternative termination criterion. Stop after max_ndead iterations.
        Set negative to ignore (default).

    boost_posterior : float
        (Default: 0.0)
        Increase the number of posterior samples produced.  This can be set
        arbitrarily high, but you won't be able to boost by more than
        num_repeats
        Warning: in high dimensions PolyChord produces _a lot_ of posterior
        samples. You probably don't need to change this

    posteriors : boolean
        (Default: True)
        Produce (weighted) posterior samples. Stored in <root>.txt.

    equals : boolean
        (Default: True)
        Produce (equally weighted) posterior samples. Stored in
        <root>_equal_weights.txt

    cluster_posteriors : boolean
        (Default: True)
        Produce posterior files for each cluster?
        Does nothing if do_clustering=False.

    write_resume : boolean
        (Default: True)
        Create a resume file.

    read_resume : boolean
        (Default: True)
        Read from resume file.

    write_stats : boolean
        (Default: True)
        Write an evidence statistics file.

    write_live : boolean
        (Default: True)
        Write a live points file.

    write_dead : boolean
        (Default: True)
        Write a dead points file.

    write_dead : boolean
        (Default: True)
        Write a prior points file.

    update_files : int
        (Default: nlive)
        How often to update the files in <base_dir>.

    base_dir : string
        (Default: 'chains')
        Where to store output files.

    file_root : string
        (Default: 'test')
        Root name of the files produced.

    grade_frac : List[float]
        (Default: 1)
        The amount of time to spend in each speed.

    grade_dims : List[int]
        (Default: 1)
        The number of parameters within each speed.
    """    

    try:
        import math
        from numpy import pi, log, sqrt
        import PyPolyChord
        from PyPolyChord.settings import PolyChordSettings
        from PyPolyChord.priors import UniformPrior
    except ImportError:
        print("INFO(Scanner):PyPolyChord module import error.exit...")
        sys.exit() 
    

    # pass settings to  polychord
    nDerived=n_params-n_dims
    settings = PolyChordSettings(n_dims, nDerived)
    # settings.nlive = kwargs.pop('nlive', n_dims*25)
    settings.nlive = kwargs.pop('n_live_points', n_dims*25)
    # settings.num_repeats = kwargs.pop('num_repeats', n_dims*5)
    settings.num_repeats = kwargs.pop('num_repeats', n_dims*2) # proposed by polychord eggbox
    settings.nprior = kwargs.pop('nprior', -1)
    settings.do_clustering = kwargs.pop('do_clustering', True)
    settings.feedback = kwargs.pop('feedback', 1)
    settings.precision_criterion = kwargs.pop('precision_criterion', 0.001)
    settings.max_ndead = kwargs.pop('max_ndead', -1)
    settings.boost_posterior = kwargs.pop('boost_posterior', 0.0)
    settings.posteriors = kwargs.pop('posteriors', True)
    settings.equals = kwargs.pop('equals', True)
    settings.cluster_posteriors = kwargs.pop('cluster_posteriors', True)
    settings.write_resume = kwargs.pop('write_resume', True)
    settings.write_paramnames = kwargs.pop('write_paramnames', False)
    settings.read_resume = kwargs.pop('read_resume', True)
    settings.write_stats = kwargs.pop('write_stats', True)
    settings.write_live = kwargs.pop('write_live', True)
    settings.write_dead = kwargs.pop('write_dead', True)
    settings.write_prior = kwargs.pop('write_prior', True)
    settings.update_files = kwargs.pop('update_files', settings.nlive)
    settings.base_dir = kwargs.pop('base_dir', 'chains')
    settings.file_root = kwargs.pop('file_root', 'test')
    settings.grade_dims = kwargs.pop('grade_dims', [n_dims])
    settings.max_ndead=kwargs.pop('max_ndead ',-1)
    ES=kwargs.pop('ES')
    # settings.grade_frac = kwargs.pop('grade_frac', [1.0]*len(self.grade_dims))

    if kwargs:
        raise TypeError('Unexpected **kwargs in Contours constructor: %r' % kwargs)
    
    # to speed up, set do_clustring = false
    # settings.do_clustering=False 

    def my_prior(hypercube):
        ''' 
        theta: the hypercube for input parameters
        len(theta)=n_dims    
        Get the phiscal parameters from hypercube
        '''
        
        theta=[0.0]*n_dims
        for i,name in enumerate(ES.InPar):
            if ES.InputPar[name][1].lower() == 'flat':
                min = float(ES.InputPar[name][2])
                max = float(ES.InputPar[name][3])
                theta[i] = hypercube[i] * (max - min) + min 
            elif ES.InputPar[name][1].lower() == 'log':
                min = math.log10(float(ES.InputPar[name][2]))
                max = math.log10(float(ES.InputPar[name][3]))
                theta[i] = 10.0**(hypercube[i]*(max - min) + min )
            else:
                sf.ErrorStop( 'Not ready. Only "flat" and "log" prior can be used.' )
#        print 'my_prior: theta=',theta
        return theta
    
    def my_Loglikelihood(theta):
        phi=[0.0]*nDerived
        derived=[0.0]*nDerived
        theta.extend(derived)
        cube=theta
        logL=LogLikelihood(cube,n_dims,n_params)
        # print 'my_Loglikelihood after log',cube
        phi=cube[n_dims:]
#        print 'my loglikelihood: phi=',phi
#        print 'my log likelihood: LogL=',logL
        return logL,phi

        
    output = PyPolyChord.run_polychord(my_Loglikelihood, n_dims, nDerived, settings, my_prior)