コード例 #1
0
ファイル: mcmc.py プロジェクト: adadams/Eccentric-Atmospheres
# ### MCMC LIKELIHOOD SEARCH
# ##### First define a (log) prior function that unpacks a set of parameters.

def mcmc_prior(parameters):
    p, r, T, a = parameters
    lnp = N.where(p>10*U.hr/planet.pseudosynchronous_period(), 0, -N.inf)
    lnp = N.where(r>0, lnp, -N.inf)
    lnp = N.where(T>0, lnp, -N.inf)
    lnp = N.where(0<=a, lnp, -N.inf)
    lnp = N.where(a<=1, lnp, -N.inf)
    return lnp


starting_position = {'3p6': {'values': N.array([0.78, 19.54, 996.4, 0.203]),
                             'units': N.array([planet.pseudosynchronous_period(), U.hr, U.K, 1])},
'4p5': {'values': N.array([0.78, 9.40, 1082.2, 0.611]),
                             'units': N.array([planet.pseudosynchronous_period(), U.hr, U.K, 1])},
'8p0': {'values': N.array([0.78, 1.40, 1188.6, 0.693]),
                             'units': N.array([planet.pseudosynchronous_period(), U.hr, U.K, 1])}}

num_walkers = 1
num_steps = 1500
step_size = [0.05, 1, 100, 0.01]

print('Starting MCMC optimization.')
print('{0} walker(s) executing {1} steps per band.'.format(num_walkers, num_steps))
print('Step sizes: {0}'.format(step_size))

simultaneous = False
コード例 #2
0
from data.bandpass.response import light_curve

# ##### Import the likelihood calculation routine.

from stats.gaussian import log_likelihood

# ##### Specify the spatial and time resolution for the calculations, including the number of orbits to run.

planet.set_resolution(longitude_resolution=72,
                      latitude_resolution=36,
                      time_resolution=200,
                      num_orbits=5)

# ##### Specify ranges to be used for the grid in parameter space to be sampled. Make sure that the rotation period is the first parameter. For the blackbody model one also specifies the radiative timescale at 1000 K, the "nightside" (baseline) temperature of the planet, and the global Bond albedo.

prot = N.linspace(0.1, 2.5, num=50) * planet.pseudosynchronous_period()
t1000 = N.linspace(2, 100, num=50)
#t1000[0] += 0.001
Tn = N.linspace(1145.93, 2000, num=1) * U.K
albedo = N.linspace(0.3829, 1.0, num=1)

parameters = [prot, t1000 * U.hr, Tn, albedo]

#Get a list of the search lengths along each parameter dimension, as well as the axis with the greatest number of elements and the total number of search elements.
search_dims = [len(l) for l in parameters]
max_axis = N.argmax(search_dims)
num_elements = N.prod(search_dims)

#The above information allows us to determine how we should handle the search parallelization (in a broadcasting sense). Running each point in the grid in series is certainly the slowest, but running everything broadcast as one single array can throw memory errors if the number of elements is too high. Here we set an estimate of the maximum number of elements that will feasibly run at once, and break the original array into components of at most this size. We choose to break the array along the axis of greatest length (if multiple are equal, we choose the first of those).
max_blocksize = 100
num_chunks = int(N.ceil(num_elements / max_blocksize))