def test_attributes():
    sp = SampledParameter('sample', norm(0.0, 1.0))
    name = sp.name
    assert sp.name == 'sample'
    prior = sp.prior_dist
    n = norm(0.0, 1.0)
    assert isinstance(sp.prior_dist, type(n))
Example #2
0
def test_metropoliscomponentwisehardnsrejection_func_call():
    sps = list([SampledParameter('test', norm(0., 1.))])
    s = MetropolisComponentWiseHardNSRejection(iterations=10, tuning_cycles=2)

    def loglikelihood(point):
        return 1.

    new_point, log_l = s(sps, loglikelihood, np.array([0.5]), 2.)
Example #3
0
The likelihood landscape has an egg carton-like shape; see slide 15 from:
http://www.nbi.dk/~koskinen/Teaching/AdvancedMethodsInAppliedStatistics2016/Lecture14_MultiNest.pdf

"""
import numpy as np
from scipy.stats import uniform
from gleipnir.sampled_parameter import SampledParameter
from gleipnir.polychord import PolyChordNestedSampling

# Number of paramters to sample is 1
ndim = 2
# Set up the list of sampled parameters: the prior is Uniform(0:10*pi) --
# we are using a fixed uniform prior from scipy.stats
sampled_parameters = [
    SampledParameter(name=i, prior=uniform(loc=0.0, scale=10.0 * np.pi))
    for i in range(ndim)
]


# Define the loglikelihood function
def loglikelihood(sampled_parameter_vector):
    chi = (np.cos(sampled_parameter_vector)).prod()
    return (2. + chi)**5


if __name__ == '__main__':

    # Set up the list of sampled parameters: the prior is Uniform(0:10*pi) --
    # we are using a fixed uniform prior from scipy.stats
    sampled_parameters = [
Example #4
0
    def __init__(self,
                 model,
                 observable_data,
                 timespan,
                 solver=pysb.simulator.ScipyOdeSimulator,
                 solver_kwargs=None,
                 nest_it=None,
                 builder=None):
        """Inits the NestedSampleIt."""
        if solver_kwargs is None:
            solver_kwargs = dict()
        self.model = model
        self.observable_data = observable_data
        self.timespan = timespan
        self.solver = solver
        self.solver_kwargs = solver_kwargs
        # self.ns_version = None
        self._ns_kwargs = None
        self._like_data = dict()
        self._data = dict()
        self._data_mask = dict()
        for observable_key in observable_data.keys():
            self._like_data[observable_key] = norm(
                loc=observable_data[observable_key][0],
                scale=observable_data[observable_key][1])
            self._data[observable_key] = observable_data[observable_key][0]
            self._data_mask[observable_key] = observable_data[observable_key][
                2]
            # print(observable_data[observable_key][2])
            if observable_data[observable_key][2] is None:
                self._data_mask[observable_key] = range(len(self.timespan))
        self._model_solver = solver(self.model,
                                    tspan=self.timespan,
                                    **solver_kwargs)
        if nest_it is not None:
            parm_mask = nest_it.mask(model.parameters)
            self._sampled_parameters = [
                SampledParameter(parm.name, nest_it[parm.name])
                for i, parm in enumerate(model.parameters) if parm_mask[i]
            ]
            self._rate_mask = parm_mask
        elif builder is not None:
            pnames = [parm.name for parm in builder.estimate_params]
            self._rate_mask = [(parm.name in pnames)
                               for parm in model.parameters]
            self._sampled_parameters = [
                SampledParameter(parm.name,
                                 builder.priors[pnames.index(parm.name)])
                for i, parm in enumerate(model.parameters)
                if self._rate_mask[i]
            ]
        else:
            params = list()
            for rule in model.rules:
                if rule.rate_forward:
                    params.append(rule.rate_forward)
                if rule.rate_reverse:
                    params.append(rule.rate_reverse)
            rate_mask = [model.parameters.index(param) for param in params]
            self._sampled_parameters = [
                SampledParameter(
                    param.name,
                    uniform(loc=np.log10(param.value) - 2.0, scale=4.0))
                for i, param in enumerate(model.parameters) if i in rate_mask
            ]
            self._rate_mask = rate_mask

        self._param_values = np.array(
            [param.value for param in model.parameters])
        return
Example #5
0
 def sampled_parameters(self):
     return [
         SampledParameter(name, self.parm[name]) for name in self.keys()
     ]
Example #6
0
]  #pydream3

rates_of_interest_mask = [
    i in idx_pars_calibrate for i, par in enumerate(model.parameters)
]

# Index of Initial conditions of Arrestin
arrestin_idx = [44]
jnk3_initial_value = 0.6  # total jnk3
jnk3_initial_idxs = [47, 48, 49]
kcat_idx = [36, 37]

param_values = np.array([p.value for p in model.parameters])

sampled_parameters = [
    SampledParameter(
        i, uniform(loc=np.log10(5E-8), scale=np.log10(1.9E3) - np.log10(5E-8)))
    for i, pa in enumerate(param_values[rates_of_interest_mask])
]

# # We calibrate the pMKK4 - Arrestin-3 reverse reaction rate. We have experimental data
# # for this interaction and know that the k_r varies from 160 to 1068 (standard deviation)
sampled_parameters[0] = SampledParameter(
    0, uniform(loc=np.log10(120), scale=np.log10(1200) - np.log10(120)))
sampled_parameters[6] = SampledParameter(
    6, uniform(loc=np.log10(28), scale=np.log10(280) - np.log10(28)))


def evaluate_cycles(pars1):
    boxes = np.zeros(4)
    box1 = (pars1[21]/pars1[20]) * (pars1[23]/pars1[22]) * (1 / (pars1[1] / pars1[0])) * \
           (1 / (pars1[5]/pars1[4]))
Example #7
0
import pytest
import numpy as np
from numpy import exp, log, pi
from scipy.stats import uniform
from scipy.special import erf
from gleipnir.sampled_parameter import SampledParameter
from gleipnir.multinest import MultiNestNestedSampling
import os
import glob

# Number of paramters to sample is 5
ndim = 5
# Set up the list of sampled parameters: the prior is Uniform(-5:5) --
# we are using a fixed uniform prior from scipy.stats
sampled_parameters = [
    SampledParameter(name=i, prior=uniform(loc=-5.0, scale=10.0))
    for i in range(ndim)
]
# Set the active point population size
population_size = 100


# Define the loglikelihood function
def loglikelihood(sampled_parameter_vector):
    const = -0.5 * np.log(2 * np.pi)
    return -0.5 * np.sum(sampled_parameter_vector**2) + ndim * const


width = 10.0

Example #8
0
from numpy import exp, log, pi
from scipy.stats import uniform
from scipy.special import erf
from gleipnir.sampled_parameter import SampledParameter
from gleipnir.nestedsampling import NestedSampling
from gleipnir.nestedsampling.samplers import MetropolisComponentWiseHardNSRejection
from gleipnir.nestedsampling.stopping_criterion import NumberOfIterations
import os
import glob


# Number of paramters to sample is 5
ndim = 5
# Set up the list of sampled parameters: the prior is Uniform(-5:5) --
# we are using a fixed uniform prior from scipy.stats
sampled_parameters = [SampledParameter(name=i, prior=uniform(loc=-5.0,scale=10.0)) for i in range(ndim)]
# Set the active point population size
population_size = 20

sampler = MetropolisComponentWiseHardNSRejection(iterations=10, tuning_cycles=1)
stopping_criterion = NumberOfIterations(120)

# Define the loglikelihood function
def loglikelihood(sampled_parameter_vector):
    const = -0.5*np.log(2*np.pi)
    return -0.5*np.sum(sampled_parameter_vector**2) + ndim * const

width = 10.0
def analytic_log_evidence(ndim, width):
      lZ = (ndim * np.log(erf(0.5*width/np.sqrt(2)))) - (ndim * np.log(width))
      return lZ
Example #9
0
#     return logp_data
def loglikelihood(position):
    Y = np.copy(position)
    param_values[rates_mask] = 10**Y
    sim = solver.run(param_values=param_values).all
    #    return -np.inf
    # sim = solver.run(param_values=param_values).all
    logp_data = np.sum(like_data.logpdf(sim['A_dimer']))
    if np.isnan(logp_data):
        logp_data = -np.inf
    return logp_data


if __name__ == '__main__':
    sampled_parameters = list()
    sp_kf = SampledParameter('kf', norm(loc=np.log10(0.001), scale=1.))
    sampled_parameters.append(sp_kf)
    sp_kr = SampledParameter('kr', norm(loc=np.log10(1.0), scale=1.))
    sampled_parameters.append(sp_kr)
    # Setup the Nested Sampling run
    n_params = len(sampled_parameters)
    population_size = 100
    # Setup the sampler to use when updating points during the NS run --
    # Here we are using an implementation of the Metropolis Monte Carlo algorithm
    # with component-wise trial moves and augmented acceptance criteria that adds a
    # hard rejection constraint for the NS likelihood boundary.
    sampler = MetropolisComponentWiseHardNSRejection(iterations=20,
                                                     tuning_cycles=1)
    # Setup the stopping criterion for the NS run -- We'll use a fixed number of
    # iterations: 10*population_size
    stopping_criterion = NumberOfIterations(10 * population_size)
Example #10
0
data_x = np.array([1., 2., 3.])
data_y = np.array([1.4, 1.7, 4.1])
data_yerr = np.array([0.2, 0.15, 0.2])

# Define the loglikelihood function
def loglikelihood(theta):
    y = theta[1] * data_x + theta[0]
    chisq = np.sum(((data_y - y) / data_yerr)**2)
    return -chisq / 2.

if __name__ == '__main__':

    # Set up the list of sampled parameters: the prior is Uniform(-5:5) --
    # we are using a fixed uniform prior from scipy.stats
    parm_names = list(['m', 'b'])
    sampled_parameters = [SampledParameter(name=p, prior=uniform(loc=-5.0,scale=10.0)) for p in parm_names]

    # Set the active point population size
    population_size = 100
    # Setup the Nested Sampling run
    n_params = len(sampled_parameters)
    print("Sampling a total of {} parameters".format(n_params))
    #population_size = 10
    print("Will use NS population size of {}".format(population_size))
    # Construct the Nested Sampler
    DNS = DNest4NestedSampling(sampled_parameters,
                               loglikelihood,
                               population_size,
                               num_steps=1000)
    #print(PCNS.likelihood(np.array([1.0])))
    #quit()
def test_initialization():
    sp = SampledParameter('sample', norm(0.0, 1.0))
    return
def test_func_invcdf():
    sp = SampledParameter('sample', norm(0.0, 1.0))
    invcdf = sp.invcdf(0.5)
    assert np.isclose(invcdf, 0.0)
def test_func_prior():
    sp = SampledParameter('sample', norm(0.0, 1.0))
    prior = sp.prior(0.5)
    assert np.isclose(prior, 0.3520653267642995)
def test_func_logprior():
    sp = SampledParameter('sample', norm(0.0, 1.0))
    logprior = sp.logprior(0.5)
    assert np.isclose(logprior, -1.0439385332046727)
def test_func_rvs():
    sp = SampledParameter('sample', norm(0.0, 1.0))
    rvs = sp.rvs(10)
    assert len(rvs) == 10