Beispiel #1
0
def make_model(n_fmesh=11, fmesh_is_obsmesh=False):
    x = np.arange(-1., 1., .1)

    # Prior parameters of C
    nu = pm.Uniform('nu', 1., 3, value=1.5)
    phi = pm.Lognormal('phi', mu=.4, tau=1, value=1)
    theta = pm.Lognormal('theta', mu=.5, tau=1, value=1)

    # The covariance dtrm C is valued as a Covariance object.
    @pm.deterministic
    def C(eval_fun=gp.matern.euclidean, diff_degree=nu, amp=phi, scale=theta):
        return gp.NearlyFullRankCovariance(eval_fun,
                                           diff_degree=diff_degree,
                                           amp=amp,
                                           scale=scale)

    # Prior parameters of M
    a = pm.Normal('a', mu=1., tau=1., value=1)
    b = pm.Normal('b', mu=.5, tau=1., value=0)
    c = pm.Normal('c', mu=2., tau=1., value=0)

    # The mean M is valued as a Mean object.
    def linfun(x, a, b, c):
        return a * x**2 + b * x + c

    @pm.deterministic
    def M(eval_fun=linfun, a=a, b=b, c=c):
        return gp.Mean(eval_fun, a=a, b=b, c=c)

    # The actual observation locations
    actual_obs_locs = np.linspace(-.8, .8, 4)

    if fmesh_is_obsmesh:
        o = actual_obs_locs
        fmesh = o
    else:
        # The unknown observation locations
        o = pm.Normal('o', actual_obs_locs, 1000., value=actual_obs_locs)
        fmesh = np.linspace(-1, 1, n_fmesh)

    # The GP submodel
    sm = gp.GPSubmodel('sm', M, C, fmesh)

    # Observation variance
    V = pm.Lognormal('V', mu=-1, tau=1, value=.0001)
    observed_values = pm.rnormal(actual_obs_locs**2, 10000)

    # The data d is just array-valued. It's normally distributed about GP.f(obs_x).
    d = pm.Normal('d',
                  mu=sm.f(o),
                  tau=1. / V,
                  value=observed_values,
                  observed=True)

    return locals()
Beispiel #2
0
    def test_airline(self):
        # Create problem data.
        E = 6
        C = 2
        P = 6

        u = numpy.random.randint(10, 20, E)
        p = numpy.random.rand(P)

        d_mu = 100 * numpy.ones(P)  # Not used
        d_Sigma = 0.01 * numpy.eye(P)  # Not used

        m = 10  # Num samples

        A = [self.get_A(i, E, P) for i in range(C)]

        # Create optimization variables.
        x = [NonNegative(E) for i in range(C)]
        y = NonNegative(P)

        # Create second stage problem.
        capacity = [A[i] * y <= x[i] for i in range(C)]
        d = RandomVariable(pymc.Lognormal(name="d", mu=0, tau=1, size=P))
        p2 = Problem(Minimize(-y.T * p), [y <= d] + capacity)
        Q = partial_optimize(p2, [y], [x[0], x[1]])

        # Create and solve first stage problem.
        p1 = Problem(Minimize(expectation(Q, m)), [sum(x) <= u])
        p1.solve()

        self.assert_feas(p1)
Beispiel #3
0
 def _lognormal_concentration_prior(name, stated_concentration, uncertainty,
                                    unit):
     """Define a pymc prior for a concentration, using micromolar units
     :rtype : pymc.Lognormal
     """
     return pymc.Lognormal(
         name,
         mu=log(stated_concentration / unit),
         tau=1.0 / log(1.0 + (uncertainty / stated_concentration)**2),
         value=stated_concentration / unit)
def make_model():
    import cPickle as pickle
    with open('reaction_kinetics_data.pickle', 'rb') as fd:
        data = pickle.load(fd)
    y_obs = data['y_obs']
    # The priors for the reaction rates:
    k1 = pymc.Lognormal('k1', mu=2, tau=1./(10. ** 2), value=5.)
    k2 = pymc.Lognormal('k2', mu=4, tau=1./(10. ** 2), value=5.)
    # The noise term
    #sigma = pymc.Uninformative('sigma', value=1.)
    sigma = pymc.Exponential('sigma', beta=1.)
    # The forward model
    re_solver = ReactionKineticsSolver()
    @pymc.deterministic
    def model_output(value=None, k1=k1, k2=k2):
        return re_solver(k1, k2)
    # The likelihood term
    @pymc.stochastic(observed=True)
    def output(value=y_obs, mod_out=model_output, sigma=sigma, gamma=1.):
        return gamma * pymc.normal_like(y_obs, mu=mod_out, tau=1/sigma ** 2)
    return locals()
Beispiel #5
0
def make_model():
    #c = pymc.Container([pymc.Lognormal('x', mu=math.log(1.), tau=1.),
    #                    pymc.Lognormal('z', mu=math.log(2.), tau=3.)])
    #x = pymc.Exponential('x', beta=0.1)
    x = pymc.Lognormal('x',
                       mu=np.array([math.log(1.), math.log(2.)]),
                       tau=np.array([1., 3.]))

    @pymc.stochastic(observed=True)
    def y(value=0.01, x=x):
        return pymc.lognormal_like(value, mu=x[0], tau=1.)

    return locals()
Beispiel #6
0
    def test_nested_initvals(self):
        # See issue #5168
        with pm.Model() as pmodel:
            one = pm.LogNormal("one",
                               mu=np.log(1),
                               sigma=1e-5,
                               initval="prior")
            two = pm.Lognormal("two",
                               mu=np.log(one * 2),
                               sigma=1e-5,
                               initval="prior")
            three = pm.LogNormal("three",
                                 mu=np.log(two * 2),
                                 sigma=1e-5,
                                 initval="prior")
            four = pm.LogNormal("four",
                                mu=np.log(three * 2),
                                sigma=1e-5,
                                initval="prior")
            five = pm.LogNormal("five",
                                mu=np.log(four * 2),
                                sigma=1e-5,
                                initval="prior")
            six = pm.LogNormal("six",
                               mu=np.log(five * 2),
                               sigma=1e-5,
                               initval="prior")

        ip_vals = list(
            make_initial_point_fn(model=pmodel,
                                  return_transformed=True)(0).values())
        assert np.allclose(np.exp(ip_vals), [1, 2, 4, 8, 16, 32], rtol=1e-3)

        ip_vals = list(
            make_initial_point_fn(model=pmodel,
                                  return_transformed=False)(0).values())
        assert np.allclose(ip_vals, [1, 2, 4, 8, 16, 32], rtol=1e-3)

        pmodel.initial_values[four] = 1

        ip_vals = list(
            make_initial_point_fn(model=pmodel,
                                  return_transformed=True)(0).values())
        assert np.allclose(np.exp(ip_vals), [1, 2, 4, 1, 2, 4], rtol=1e-3)

        ip_vals = list(
            make_initial_point_fn(model=pmodel,
                                  return_transformed=False)(0).values())
        assert np.allclose(ip_vals, [1, 2, 4, 1, 2, 4], rtol=1e-3)
Beispiel #7
0
    def __init__(self, name, stated_concentration, uncertainty_percent):
        """
        :param name: str
        :param stated_concentration: float, mM
        :param uncertainty: float, 0 < uncertainty < 1
        """
        m = stated_concentration
        uncertainty = stated_concentration * uncertainty_percent
        v = uncertainty**2
        model = pymc.Lognormal(name,
                               mu=numpy.log(m / numpy.sqrt(1 + (v / (m**2)))),
                               tau=1.0 / numpy.log(1 + (v / (m**2))),
                               value=m)

        setattr(self, name, model)
Beispiel #8
0
    fillings.append(row.filling)

    dos_sub = row.data['dos_sub'][1][0] + row.data['dos_sub'][1][1]
    dos_sub_energy = row.data['dos_sub'][0]
    dos_sub = interpolate_nrg(dos_sub, dos_sub_energy, energy)
    dos_ds.append(dos_sub)

    dos_ads = row.data['dos_ads'][1][0] + row.data['dos_ads'][1][1]
    dos_ads_energy = row.data['dos_ads'][0]
    dos_ads = interpolate_nrg(dos_ads, dos_ads_energy, energy)
    dos_adss.append(dos_ads)

# priors
dE_0 = pm.Normal('dE_0', -3.25, 1, value=-3.25)
eps_a = pm.Normal('eps_a', -5.0, 1, value=-5.0)
delta_0 = pm.Lognormal('delta_0', 1, 0.25, value=1.0)
alpha = pm.Uniform('alpha', 0, 1.0, value=0.036)
beta = pm.Lognormal('beta', 2, 1, value=2.1)

var_1 = pm.InverseGamma('var_1', 2.0, 0.05, value=0.05)
var_2 = pm.InverseGamma('var_2', 2.0, 0.1, value=0.1)

lamb = .01


@pm.stochastic(observed=True)
def custom_stochastic(eps_a=eps_a,
                      beta=beta,
                      delta_0=delta_0,
                      alpha=alpha,
                      dE_0=dE_0,
Beispiel #9
0
    def test_dc_power(self):
        # Load problem data.
        fp = "pf_dc/pf_dc.mat"
        data = scipy.io.loadmat(fp)

        A = data.get("A")
        n, E = A.shape

        gen_idxes = data.get("gen_idxes")
        wind_idxes = data.get("wind_idxes")
        load_idxes = data.get("load_idxes")

        num_gens = gen_idxes.size
        num_winds = wind_idxes.size
        num_loads = load_idxes.size

        gen_idxes = gen_idxes.reshape(
            num_gens) - 1  # "-1" to switch from Matlab to Python indexing
        wind_idxes = wind_idxes.reshape(num_winds) - 1
        load_idxes = load_idxes.reshape(num_loads) - 1

        c_g = data.get("c_g")
        temp = c_g[0]
        c_g[0] = c_g[1]
        c_g[1] = temp

        c_w = data.get("c_w")

        p = data.get("p").reshape(n)
        p_orig = p

        u_lines = 1  # Note: depending on the choice of this
        # (and the realizations of p_w below, the
        # problem may or may not be feasible, so be
        # careful
        u_gens = 1

        m = 100  # Num samples

        # Create optimization variables.
        p_g1, p_g2 = NonNegative(), NonNegative()
        z = NonNegative(num_winds)
        p_lines = Variable(E)
        p_w = RandomVariable(
            pymc.Lognormal(name="p_w", mu=1, tau=1, size=num_winds))

        # Create second stage problem.
        p_g = vstack(p_g1, p_g2)
        p = vstack(p_g1, p_g2, p[load_idxes[:-1]], p_w - z, p[load_idxes[-1]])
        p2 = Problem(Minimize(p_g.T * c_g + z.T * c_w), [
            A * p_lines == p, p_g <= u_gens, z <= p_w,
            abs(p_lines) <= u_lines
        ])
        Q = partial_optimize(p2, [p_g2, z, p_lines], [p_g1])

        # Create and solve first stage problem.
        p1 = Problem(Minimize(expectation(Q, m)))
        p1.solve()

        # Plot results.
        B_binary = data.get("B_binary")

        coords = data.get("coords")
        coords[0, 0] += 0.1  # Drawing was getting clipped in Python...

        # Draw edges between vertices.
        fig = pyplot.figure()

        for i in range(n - 1):
            for j in range(i + 1, n):
                if B_binary[i, j] == 1:
                    pyplot.plot((coords[i, 0], coords[j, 0]),
                                (coords[i, 1], coords[j, 1]), '-k')

        # Draw symbols and power generation/consumption for each vertex.
        lognorm_mean = math.exp(1 + pow(1, 2) / 2.0)

        fs = 16
        shift_x = 0
        shift_y = 0.125
        for i in range(n):

            if i in gen_idxes:
                pyplot.plot(coords[i, 0],
                            coords[i, 1],
                            color="crimson",
                            marker="s",
                            markersize=12)

                if i == 0:
                    pyplot.text(coords[i, 0] + shift_x,
                                coords[i, 1] + shift_y,
                                "{0:.2f}".format(p_g1.value),
                                fontsize=fs)
                else:
                    pyplot.text(coords[i, 0] + shift_x,
                                coords[i, 1] + shift_y,
                                "sec. stg.",
                                fontsize=fs)

            elif i in wind_idxes:
                pyplot.plot(coords[i, 0],
                            coords[i, 1],
                            color="blue",
                            marker="s",
                            markersize=12)
                pyplot.text(coords[i, 0] + shift_x,
                            coords[i, 1] + shift_y,
                            "{0:.2f}".format(lognorm_mean),
                            fontsize=fs)

            else:
                pyplot.plot(coords[i, 0], coords[i, 1], 'ko')
                pyplot.text(coords[i, 0] + shift_x,
                            coords[i, 1] + shift_y,
                            "{0:.2f}".format(p_orig[i]),
                            fontsize=fs)

        # pyplot.axis([0, 4, 0, 3])
        pyplot.axis("off")
        fig.savefig("grid.png", bbox_inches="tight")

        # Check results.
        self.assert_feas(p1)
Beispiel #10
0
r = pm.Normal('r', rmu, rtau)

qcv = pm.Uniform('qcv', eps, 1., value=[0.1] * U)
qmu = pm.Uniform('qmu', eps, 2., value=[1] * U)
qtau = pm.Lambda('qtau', lambda cv=qcv, mu=qmu: (cv * mu)**-2)
q = pm.Normal('q', qmu, qtau)

# No catch observation model -- assumed known with no error
rem = pm.Lambda('rem', lambda C=CAT, K=K: C / K)

# Biomass process model
b0_mu = pm.Uniform('b0_mu', b0x / 2., 1.25, value=b0x)
bcv = pm.Uniform('bcv', eps, 1., value=[0.1] * U)
btau = pm.Lambda('btau', lambda cv=bcv, mu=b0_mu: (cv * mu)**-2)

b[0, :] = pm.Lognormal('b0', mu=b0_mu, tau=btau, value=b_inits[0, :])
for j in range(U):
    for i in range(1, N):
        bmean = pm.Lambda("bmean",
                          lambda B=b[i - 1, j], R=r[j], C=rem[i - 1, j], eps=
                          eps: np.log(max(B + R * B * (1. - B) - C, eps)))
        b[i, j] = pm.Lognormal('b%i' % i,
                               mu=bmean,
                               tau=btau[j],
                               value=b_inits[i, j])

# Biomass observation model
#   require a fall / spring surveys correction
#   spring surveys from 1998 to 2003
#   want B to represent the total biomass available in fishing year y
#   when surveys are conducted in fall, Btot(t) = Bsurvey(t) + removals(t)
Beispiel #11
0
    def make_pymc_model():
        # data
        madata = 1

        # parameters
        cs = pymc.Normal('CS', mu=0., tau=1., plot=False)
        m = pymc.Normal('M', mu=mmean, tau=1./mstd**2, plot=False)
        Sre = pymc.Weibull('Sre', alpha=wblc, beta=wblscale, plot=False)
        Na = pymc.Lognormal('Na', mu=logNamean, tau=1./logNastd**2, plot=False)
        a0 = pymc.Normal('A0', mu=a0mean, tau=1./a0std**2, plot=True)

        #transformed parameters
        @pymc.deterministic(name='C', plot=False)
        def C(cs=cs, m=m):
            um = (m-mmean)/mstd
            uLogC = -np.sqrt(rolnR**2)*um-np.sqrt(1-rolnR**2)*cs
            Cout = np.exp(logCmean+uLogC*logCstd)
            return Cout

        @pymc.deterministic(name='Kappa', plot=False)
        def kappa(C=C, Sre=Sre, m=m, Na=Na):
            kout = C*(Sre**m)*(G**m)*(np.pi**(m/2.))*Na
            return kout
        aarray = np.empty(life+1, dtype=object)
        aarray[0] = a0
        for i in np.arange(1,life+1):
            @pymc.deterministic(name='A{}'.format(i), plot=True)
            def ai(kappa=kappa, m=m, ap=aarray[i-1]):
                # aiout = ap+1e3*kappa*(ap*1e-3)**(m/2.)
                if m==2:
                    aiout = ap*np.exp(kappa*1.)
                else:
                    tmp = 1.0-m/2.
                    if ap<=0: ap=1e-12
                    diff = kappa*1.*tmp+ap**tmp
                    if diff>=0:
                        aiout = diff**(1./tmp)
                    else:
                        aiout = acrit
                    if aiout>acrit:
                        aiout = acrit
                return aiout
            aarray[i] = ai


        # observable variable
        mivalue = madata
        if mivalue is None:
            obsvalue = False
            mivalue = 0
        else:
            obsvalue = True
        @pymc.stochastic(name='M{}'.format(i), plot=False, dtype=float, observed=obsvalue)
        def Mi(value=mivalue, ai=aarray[3]):
            def logp(value, ai):
                pod = 1.-stats.norm.cdf((np.log(ai)-lmd)/beta)
                pinrange = stats.norm.cdf(2.0619, loc=ai, scale=sigmae)-\
                        stats.norm.cdf(1.8243, loc=ai, scale=sigmae)
                if value == 0:
                    p = (1.-pod)+pod*(1.-pinrange)
                    return np.log(p)
                else:
                    return np.log(pod*pinrange)
            def random(ai):
                pod = 1.-stats.norm.cdf((np.log(ai)-lmd)/beta)
                pinrange = stats.norm.cdf(2.0619, loc=ai, scale=sigmae)-\
                        stats.norm.cdf(1.8243, loc=ai, scale=sigmae)
                if np.random.rand()<=pod:
                    return stats.bernoulli.rvs(p=pinrange,size=1)
                else:
                    return 0

        return locals()
Beispiel #12
0
inv.mmin, inv.mmax = [], []

for name, lower, upper, initial, sigma, dist in zip(m_name, m_min, m_max,
                                                    m_init, sigmam, pdist):
    sigma = sigma / 2
    if upper != lower:
        logger.debug(
            'Sample {} with a {} distribution, and inititial value of {} and an uncertainty of {}'
            .format(name, dist, initial, sigma))
        if dist is 'Gaus':
            sigma = sigma / 2
            p = pymc.Normal(name, mu=initial, tau=1. / sigma**2, value=initial)
        elif dist is 'Logn':
            frac = (sigma * 2) / 5
            p = pymc.Lognormal(name,
                               mu=math.log(initial),
                               tau=1. / (0.125 * frac)**2)
        else:
            p = pymc.Uniform(name, lower, upper, value=initial)

        inv.Sampled.append(name)
        inv.Priors.append(p)
        inv.mu.append(initial)
        inv.tau.append(sigma)
        inv.mmin.append(lower)
        inv.mmax.append(upper)

    elif upper == lower:
        logger.debug('{} has a sigma null. Not sampled'.format(name))
        inv.Fixed.append(name)
Beispiel #13
0
import pymc as pm
import numpy as np
import scipy.special as ss
"""
dd as dirichlet_distribution
dp as dirichilet_processs
"""

DD_PRIOR_MEAN = 10.
DD_PRIOR_STD = 1.0
DD_PRIOR_CORRELATION = 0.5

MAX_DIMENSION = 6

dp_prior = pm.Lognormal('dp_prior_mean', mu=0, tau=4)
dd_prior_mean = pm.Lognormal('dd_prior_mean', mu=3, tau=4)

dd_prior = {}
m_list = {}
cov_list = {}
for i in range(2, MAX_DIMENSION + 1):
    m_list[i] = np.array([dd_prior_mean] * i)
    cov_list[i] = np.array(
        [[DD_PRIOR_STD * DD_PRIOR_STD * DD_PRIOR_CORRELATION] * i] * i)
    for j in range(i):
        cov_list[i][j][j] = DD_PRIOR_STD * DD_PRIOR_STD


@pm.stochastic
def dirichlet_distribution_prior2(value=np.array([DD_PRIOR_MEAN] * 2)):
Created on Thu Aug 31 22:46:49 2017

@author: Edward Coen
"""
import pymc as pm
import cv2
import numpy as np
import scipy.stats as st

ITER_NUM = 10
PRIOR_MEAN = 10
PRIOR_STD = 1
PRIOR_CORRELATION = 0.0
N_DIMENSION = 2

prior_mean = pm.Lognormal('prior_mean', mu=0, tau=4)
PRIOR_COV = np.array(
    [[PRIOR_STD * PRIOR_STD * PRIOR_CORRELATION] * N_DIMENSION] * N_DIMENSION)
for i in range(N_DIMENSION):
    PRIOR_COV[i][i] = PRIOR_STD * PRIOR_STD

proportion_array = np.array([0.9])
for i in range(ITER_NUM):

    @pm.stochastic
    def dirichlet_prior(value=np.array([PRIOR_MEAN] * N_DIMENSION)):
        if np.any(value <= 0):
            return -np.inf
        return pm.distributions.mv_normal_cov_like(value,
                                                   mu=np.array([prior_mean] *
                                                               N_DIMENSION),
Beispiel #15
0
def make_model(Pstated,
               dPstated,
               Lstated,
               dLstated,
               top_complex_fluorescence=None,
               top_ligand_fluorescence=None,
               bottom_complex_fluorescence=None,
               bottom_ligand_fluorescence=None,
               DG_prior='uniform',
               concentration_priors='lognormal',
               use_primary_inner_filter_correction=True,
               use_secondary_inner_filter_correction=True,
               assay_volume=100e-6,
               well_area=0.1586,
               epsilon_ex=None,
               depsilon_ex=None,
               epsilon_em=None,
               depsilon_em=None,
               ligand_ex_absorbance=None,
               ligand_em_absorbance=None,
               link_top_and_bottom_sigma=True):
    """
    Build a PyMC model for an assay that consists of N wells of protein:ligand at various concentrations and an additional N wells of ligand in buffer, with the ligand at the same concentrations as the corresponding protein:ligand wells.

    Parameters
    ----------
    Pstated : numpy.array of N values
       Stated protein concentrations for all protein:ligand wells of assay. Units of molarity.
    dPstated : numpy.array of N values
       Absolute uncertainty in stated protein concentrations for all wells of assay. Units of molarity.
       Uncertainties currently cannot be zero.
    Lstated : numpy.array of N values
       Stated ligand concentrations for all protein:ligand and ligand wells of assay, which must be the same with and without protein. Units of molarity.
    dLstated : numpy.array of N values
       Absolute uncertainty in stated protein concentrations for all wells of assay. Units of molarity.
       Uncertainties currently cannot be zero
    top_complex_fluorecence : numpy.array of N values, optional, default=None
       Fluorescence intensity (top) for protein:ligand mixture.
    top_ligand_fluorescence : numpy.array of N values, optional, default=None
       Fluorescence intensity (top) for ligand control.
    bottom_complex_fluorescence: numpy.array of N values, optional, default=None
       Fluorescence intensity (bottom) for protein:ligand mixture.
    bottom_ligand_fluorescence : numpy.array of N values, optional, default=None
       Fluorescence intensity (bottom) for ligand control.
    DG_prior : str, optional, default='uniform'
       Prior to use for reduced free energy of binding (DG): 'uniform' (uniform over reasonable range), or 'chembl' (ChEMBL-inspired distribution); default: 'uniform'
    concentration_priors : str, optional, default='lognormal'
       Prior to use for protein and ligand concentrations. Available options are ['lognormal', 'normal'].
    use_primary_inner_filter_correction : bool, optional, default=True
       If true, will infer ligand extinction coefficient epsilon and apply primary inner filter correction to attenuate excitation light.
    use_secondary_inner_filter_correction : bool, optional, default=True
       If true, will infer ligand extinction coefficient epsilon and apply secondary inner filter correction to attenuate excitation light.
    assay_volume : float, optional, default=100e-6
       Assay volume. Units of L. Default 100 uL.
    well_area : float, optional, default=0.1586
       Well area. Units of cm^2. Default 0.1586 cm^2, for half-area plate.
    epsilon_ex, depsilon_ex : float, optional, default=None
       Orthogonal measurement of ligand extinction coefficient at excitation wavelength (and uncertainty). If None, will use a uniform prior.
    epsilon_em, depsilon_em : float, optional, default=None
       Orthogonal measurement of ligand extinction coefficient at excitation wavelength (and uncertainty). If None, will use a uniform prior.
    ligand_ex_absorbance : np.array of N values, optional, default=None
       Ligand absorbance measurement for excitation wavelength.
    ligand_em_absorbance : np.array of N values, optional, default=None
       Ligand absorbance measurement for emission wavelength.
    link_top_and_bottom_sigma : bool, optional, default=True
       If True, will link top and bottom fluorescence uncertainty sigma.

    Returns
    -------
    pymc_model : dict
       A dict mapping variable names to onbjects that can be used as a PyMC model object.

    Examples
    --------
    Create a simple model

    >>> N = 12 # 12 wells per series of protein:ligand or ligand alone
    >>> Pstated = np.ones([N], np.float64) * 1e-6
    >>> Lstated = 20.0e-6 / np.array([10**(float(i)/2.0) for i in range(N)])
    >>> dPstated = 0.10 * Pstated
    >>> dLstated = 0.08 * Lstated
    >>> top_complex_fluorescence = np.array([ 689., 683., 664., 588., 207., 80., 28., 17., 10., 11., 10., 10.], np.float32)
    >>> top_ligand_fluorescence = np.array([ 174., 115., 57., 20., 7., 6., 6., 6., 6., 7., 6., 7.], np.float32)
    >>> from pymcmodels import make_model
    >>> pymc_model = make_model(Pstated, dPstated, Lstated, dLstated, top_complex_fluorescence=top_complex_fluorescence, top_ligand_fluorescence=top_ligand_fluorescence)

    """

    # Compute path length.
    path_length = assay_volume * 1000 / well_area  # cm, needed for inner filter effect corrections

    # Compute number of samples.
    N = len(Lstated)

    # Check input.
    # TODO: Check fluorescence and absorbance measurements for correct dimensions.
    if (len(Pstated) != N):
        raise Exception('len(Pstated) [%d] must equal len(Lstated) [%d].' %
                        (len(Pstated), len(Lstated)))
    if (len(dPstated) != N):
        raise Exception('len(dPstated) [%d] must equal len(Lstated) [%d].' %
                        (len(dPstated), len(Lstated)))
    if (len(dLstated) != N):
        raise Exception('len(dLstated) [%d] must equal len(Lstated) [%d].' %
                        (len(dLstated), len(Lstated)))

    # Note whether we have top or bottom fluorescence measurements.
    top_fluorescence = (top_complex_fluorescence is not None) or (
        top_ligand_fluorescence is not None
    )  # True if any top fluorescence measurements provided
    bottom_fluorescence = (bottom_complex_fluorescence is not None) or (
        bottom_ligand_fluorescence is not None
    )  # True if any bottom fluorescence measurements provided

    # Create an empty dict to hold the model.
    model = dict()

    # Prior on binding free energies.
    if DG_prior == 'uniform':
        DeltaG = pymc.Uniform(
            'DeltaG', lower=DG_min,
            upper=DG_max)  # binding free energy (kT), uniform over huge range
    elif DG_prior == 'chembl':
        DeltaG = pymc.Normal(
            'DeltaG', mu=0, tau=1. / (12.5**2)
        )  # binding free energy (kT), using a Gaussian prior inspured by ChEMBL
    else:
        raise Exception(
            "DG_prior = '%s' unknown. Must be one of 'uniform' or 'chembl'." %
            DG_prior)
    # Add to model.
    model['DeltaG'] = DeltaG

    # Create priors on true concentrations of protein and ligand.
    if concentration_priors == 'lognormal':
        Ptrue = pymc.Lognormal(
            'Ptrue',
            mu=np.log(Pstated**2 / np.sqrt(dPstated**2 + Pstated**2)),
            tau=np.sqrt(np.log(1.0 + (dPstated / Pstated)**2))**(
                -2))  # protein concentration (M)
        Ltrue = pymc.Lognormal(
            'Ltrue',
            mu=np.log(Lstated**2 / np.sqrt(dLstated**2 + Lstated**2)),
            tau=np.sqrt(np.log(1.0 + (dLstated / Lstated)**2))**(
                -2))  # ligand concentration (M)
        Ltrue_control = pymc.Lognormal(
            'Ltrue_control',
            mu=np.log(Lstated**2 / np.sqrt(dLstated**2 + Lstated**2)),
            tau=np.sqrt(np.log(1.0 + (dLstated / Lstated)**2))**(
                -2))  # ligand concentration (M)
    elif concentration_priors == 'gaussian':
        # Warning: These priors could lead to negative concentrations.
        Ptrue = pymc.Normal('Ptrue', mu=Pstated,
                            tau=dPstated**(-2))  # protein concentration (M)
        Ltrue = pymc.Normal('Ltrue', mu=Lstated,
                            tau=dLstated**(-2))  # ligand concentration (M)
        Ltrue_control = pymc.Normal(
            'Ltrue_control', mu=Lstated,
            tau=dLstated**(-2))  # ligand concentration (M)
    else:
        raise Exception(
            "concentration_priors = '%s' unknown. Must be one of ['lognormal', 'normal']."
            % concentration_priors)
    # Add to model.
    model['Ptrue'] = Ptrue
    model['Ltrue'] = Ltrue
    model['Ltrue_control'] = Ltrue_control

    # extinction coefficient
    if use_primary_inner_filter_correction:
        if epsilon_ex:
            model['epsilon_ex'] = pymc.Lognormal(
                'epsilon_ex',
                mu=np.log(epsilon_ex**2 /
                          np.sqrt(depsilon_ex**2 + epsilon_ex**2)),
                tau=np.sqrt(np.log(1.0 + (depsilon_ex / epsilon_ex)**2))**
                (-2))  # prior is centered on measured extinction coefficient
        else:
            model['epsilon_ex'] = pymc.Uniform(
                'epsilon_ex', lower=0.0, upper=1000e3, value=70000.0
            )  # extinction coefficient or molar absorptivity for ligand, units of 1/M/cm

    if use_secondary_inner_filter_correction:
        if epsilon_em:
            model['epsilon_em'] = pymc.Lognormal(
                'epsilon_em',
                mu=np.log(epsilon_em**2 /
                          np.sqrt(depsilon_em**2 + epsilon_em**2)),
                tau=np.sqrt(np.log(1.0 + (depsilon_em / epsilon_em)**2))**
                (-2))  # prior is centered on measured extinction coefficient
        else:
            model['epsilon_em'] = pymc.Uniform(
                'epsilon_em', lower=0.0, upper=1000e3, value=0.0
            )  # extinction coefficient or molar absorptivity for ligand, units of 1/M/cm

    # Min and max observed fluorescence.
    Fmax = 0.0
    Fmin = 1e6
    if top_complex_fluorescence is not None:
        Fmax = max(Fmax, top_complex_fluorescence.max())
        Fmin = min(Fmin, top_complex_fluorescence.min())
    if top_ligand_fluorescence is not None:
        Fmax = max(Fmax, top_ligand_fluorescence.max())
        Fmin = min(Fmin, top_ligand_fluorescence.min())
    if bottom_complex_fluorescence is not None:
        Fmax = max(Fmax, bottom_complex_fluorescence.max())
        Fmin = min(Fmin, bottom_complex_fluorescence.min())
    if bottom_ligand_fluorescence is not None:
        Fmax = max(Fmax, bottom_ligand_fluorescence.max())
        Fmin = min(Fmin, bottom_ligand_fluorescence.min())

    # Compute initial guesses for fluorescence quantum yield quantities.
    F_plate_guess = Fmin
    F_buffer_guess = Fmin / path_length
    F_L_guess = (Fmax - Fmin) / Lstated.max()
    F_P_guess = 0.0
    F_P_guess = Fmin / Pstated.min()
    F_PL_guess = (Fmax - Fmin) / min(Pstated.max(), Lstated.max())

    # Priors on fluorescence intensities of complexes (later divided by a factor of Pstated for scale).
    model['F_plate'] = pymc.Uniform('F_plate',
                                    lower=0.0,
                                    upper=Fmax,
                                    value=F_plate_guess)  # plate fluorescence
    model['F_buffer'] = pymc.Uniform(
        'F_buffer', lower=0.0, upper=Fmax / path_length,
        value=F_buffer_guess)  # buffer fluorescence
    model['F_PL'] = pymc.Uniform('F_PL',
                                 lower=0.0,
                                 upper=2 * Fmax /
                                 min(Pstated.max(), Lstated.max()),
                                 value=F_PL_guess)  # complex fluorescence
    model['F_P'] = pymc.Uniform('F_P',
                                lower=0.0,
                                upper=2 * (Fmax / Pstated).max(),
                                value=F_P_guess)  # protein fluorescence
    model['F_L'] = pymc.Uniform('F_L',
                                lower=0.0,
                                upper=2 * (Fmax / Lstated).max(),
                                value=F_L_guess)  # ligand fluorescence

    # Unknown experimental measurement error.
    if top_fluorescence:
        model['log_sigma_top'] = pymc.Uniform('log_sigma_top',
                                              lower=-10,
                                              upper=np.log(Fmax),
                                              value=np.log(5))
        model['sigma_top'] = pymc.Lambda(
            'sigma_top',
            lambda log_sigma=model['log_sigma_top']: np.exp(log_sigma))
        model['precision_top'] = pymc.Lambda(
            'precision_top',
            lambda log_sigma=model['log_sigma_top']: np.exp(-2 * log_sigma))

    if bottom_fluorescence:
        if top_fluorescence and bottom_fluorescence and link_top_and_bottom_sigma:
            # Use the same log_sigma for top and bottom fluorescence
            model['log_sigma_bottom'] = pymc.Lambda(
                'log_sigma_bottom',
                lambda log_sigma_top=model['log_sigma_top']: log_sigma_top)
        else:
            model['log_sigma_bottom'] = pymc.Uniform('log_sigma_bottom',
                                                     lower=-10,
                                                     upper=np.log(Fmax),
                                                     value=np.log(5))
        model['sigma_bottom'] = pymc.Lambda(
            'sigma_bottom',
            lambda log_sigma=model['log_sigma_bottom']: np.exp(log_sigma))
        model['precision_bottom'] = pymc.Lambda(
            'precision_bottom',
            lambda log_sigma=model['log_sigma_bottom']: np.exp(-2 * log_sigma))

    if top_fluorescence and bottom_fluorescence:
        # Gain that attenuates bottom fluorescence relative to top.
        # TODO: Replace this with plate absorbance?
        log_gain_guess = -np.log(
            (top_complex_fluorescence.max() - top_complex_fluorescence.min()) /
            (bottom_complex_fluorescence.max() -
             bottom_complex_fluorescence.min()))
        model['log_gain_bottom'] = pymc.Uniform(
            'log_gain_bottom', lower=-6.0, upper=6.0, value=log_gain_guess
        )  # plate material absorbance at emission wavelength
        model['gain_bottom'] = pymc.Lambda(
            'gain_bottom',
            lambda log_gain_bottom=model['log_gain_bottom']: np.exp(
                log_gain_bottom))
    elif (not top_fluorescence) and bottom_fluorescence:
        model['log_gain_bottom'] = 0.0  # no gain
        model['gain_bottom'] = pymc.Lambda(
            'gain_bottom',
            lambda log_gain_bottom=model['log_gain_bottom']: np.exp(
                log_gain_bottom))

    if top_fluorescence:
        model['log_sigma_abs'] = pymc.Uniform('log_sigma_abs',
                                              lower=-10,
                                              upper=0,
                                              value=np.log(0.01))
        model['sigma_abs'] = pymc.Lambda(
            'sigma_abs',
            lambda log_sigma=model['log_sigma_abs']: np.exp(log_sigma))
        model['precision_abs'] = pymc.Lambda(
            'precision_abs',
            lambda log_sigma=model['log_sigma_abs']: np.exp(-2 * log_sigma))

    # Fluorescence model.
    from assaytools.bindingmodels import TwoComponentBindingModel

    if hasattr(model, 'epsilon_ex'):
        epsilon_ex = model['epsilon_ex']
    else:
        epsilon_ex = 0.0

    if hasattr(model, 'epsilon_em'):
        epsilon_em = model['epsilon_em']
    else:
        epsilon_em = 0.0

    if top_complex_fluorescence is not None:

        @pymc.deterministic
        def top_complex_fluorescence_model(F_plate=model['F_plate'],
                                           F_buffer=model['F_buffer'],
                                           F_PL=model['F_PL'],
                                           F_P=model['F_P'],
                                           F_L=model['F_L'],
                                           Ptrue=Ptrue,
                                           Ltrue=Ltrue,
                                           DeltaG=DeltaG,
                                           epsilon_ex=epsilon_ex,
                                           epsilon_em=epsilon_em):
            [P_i, L_i,
             PL_i] = TwoComponentBindingModel.equilibrium_concentrations(
                 DeltaG, Ptrue[:], Ltrue[:])
            IF_i = inner_filter_effect_attenuation(epsilon_ex,
                                                   epsilon_em,
                                                   path_length,
                                                   L_i,
                                                   geometry='top')
            IF_i_plate = np.exp(
                -(epsilon_ex + epsilon_em) * path_length *
                L_i)  # inner filter effect applied only to plate
            Fmodel_i = IF_i[:] * (
                F_PL * PL_i + F_L * L_i + F_P * P_i +
                F_buffer * path_length) + IF_i_plate * F_plate
            return Fmodel_i

        # Add to model.
        model[
            'top_complex_fluorescence_model'] = top_complex_fluorescence_model
        model['top_complex_fluorescence'] = pymc.Normal(
            'top_complex_fluorescence',
            mu=model['top_complex_fluorescence_model'],
            tau=model['precision_top'],
            size=[N],
            observed=True,
            value=top_complex_fluorescence)  # observed data

    if top_ligand_fluorescence is not None:

        @pymc.deterministic
        def top_ligand_fluorescence_model(F_plate=model['F_plate'],
                                          F_buffer=model['F_buffer'],
                                          F_L=model['F_L'],
                                          Ltrue=Ltrue,
                                          epsilon_ex=epsilon_ex,
                                          epsilon_em=epsilon_em):
            IF_i = inner_filter_effect_attenuation(epsilon_ex,
                                                   epsilon_em,
                                                   path_length,
                                                   Ltrue,
                                                   geometry='top')
            IF_i_plate = np.exp(
                -(epsilon_ex + epsilon_em) * path_length *
                Ltrue)  # inner filter effect applied only to plate
            Fmodel_i = IF_i[:] * (
                F_L * Ltrue + F_buffer * path_length) + IF_i_plate * F_plate
            return Fmodel_i

        # Add to model.
        model['top_ligand_fluorescence_model'] = top_ligand_fluorescence_model
        model['top_ligand_fluorescence'] = pymc.Normal(
            'top_ligand_fluorescence',
            mu=model['top_ligand_fluorescence_model'],
            tau=model['precision_top'],
            size=[N],
            observed=True,
            value=top_ligand_fluorescence)  # observed data

    if bottom_complex_fluorescence is not None:

        @pymc.deterministic
        def bottom_complex_fluorescence_model(
                F_plate=model['F_plate'],
                F_buffer=model['F_buffer'],
                F_PL=model['F_PL'],
                F_P=model['F_P'],
                F_L=model['F_L'],
                Ptrue=Ptrue,
                Ltrue=Ltrue,
                DeltaG=DeltaG,
                epsilon_ex=epsilon_ex,
                epsilon_em=epsilon_em,
                log_gain_bottom=model['log_gain_bottom']):
            [P_i, L_i,
             PL_i] = TwoComponentBindingModel.equilibrium_concentrations(
                 DeltaG, Ptrue[:], Ltrue[:])
            IF_i = inner_filter_effect_attenuation(epsilon_ex,
                                                   epsilon_em,
                                                   path_length,
                                                   L_i,
                                                   geometry='bottom')
            IF_i_plate = np.exp(
                -epsilon_ex * path_length *
                L_i)  # inner filter effect applied only to plate
            Fmodel_i = IF_i[:] * (F_PL * PL_i + F_L * L_i + F_P * P_i +
                                  F_buffer * path_length) * np.exp(
                                      log_gain_bottom) + IF_i_plate * F_plate
            return Fmodel_i

        # Add to model.
        model[
            'bottom_complex_fluorescence_model'] = bottom_complex_fluorescence_model
        model['bottom_complex_fluorescence'] = pymc.Normal(
            'bottom_complex_fluorescence',
            mu=model['bottom_complex_fluorescence_model'],
            tau=model['precision_bottom'],
            size=[N],
            observed=True,
            value=bottom_complex_fluorescence)  # observed data

    if bottom_ligand_fluorescence is not None:

        @pymc.deterministic
        def bottom_ligand_fluorescence_model(
                F_plate=model['F_plate'],
                F_buffer=model['F_buffer'],
                F_PL=model['F_PL'],
                F_P=model['F_P'],
                F_L=model['F_L'],
                Ltrue=Ltrue,
                epsilon_ex=epsilon_ex,
                epsilon_em=epsilon_em,
                log_gain_bottom=model['log_gain_bottom']):
            IF_i = inner_filter_effect_attenuation(epsilon_ex,
                                                   epsilon_em,
                                                   path_length,
                                                   Ltrue,
                                                   geometry='bottom')
            IF_i_plate = np.exp(
                -epsilon_ex * path_length *
                Ltrue)  # inner filter effect applied only to plate
            Fmodel_i = IF_i[:] * (F_L * Ltrue +
                                  F_buffer * path_length) * np.exp(
                                      log_gain_bottom) + IF_i_plate * F_plate
            return Fmodel_i

        # Add to model.
        model[
            'bottom_ligand_fluorescence_model'] = bottom_ligand_fluorescence_model
        model['bottom_ligand_fluorescence'] = pymc.Normal(
            'bottom_ligand_fluorescence',
            mu=model['bottom_ligand_fluorescence_model'],
            tau=model['precision_bottom'],
            size=[N],
            observed=True,
            value=bottom_ligand_fluorescence)  # observed data

    if ligand_ex_absorbance is not None:
        model['plate_abs_ex'] = pymc.Uniform('plate_abs_ex',
                                             lower=0.0,
                                             upper=1.0,
                                             value=ligand_ex_absorbance.min())

        @pymc.deterministic
        def ligand_ex_absorbance_model(Ltrue=Ltrue,
                                       epsilon_ex=epsilon_ex,
                                       plate_abs_ex=epsilon_em):
            Fmodel_i = (
                1.0 - np.exp(-epsilon_ex * path_length * Ltrue)) + plate_abs_ex
            return Fmodel_i

        # Add to model.
        model['ligand_ex_absorbance_model'] = ligand_ex_absorbance_model
        model['ligand_ex_absorbance'] = pymc.Normal(
            'ligand_ex_absorbance',
            mu=model['ligand_ex_absorbance_model'],
            tau=model['precision_abs'],
            size=[N],
            observed=True,
            value=ligand_ex_absorbance)  # observed data

    if ligand_em_absorbance is not None:
        model['plate_abs_em'] = pymc.Uniform('plate_abs_em',
                                             lower=0.0,
                                             upper=1.0,
                                             value=ligand_em_absorbance.min())

        @pymc.deterministic
        def ligand_em_absorbance_model(Ltrue=Ltrue,
                                       epsilon_em=model['epsilon_em'],
                                       plate_abs_em=model['plate_abs_em']):
            Fmodel_i = (
                1.0 - np.exp(-epsilon_em * path_length * Ltrue)) + plate_abs_em
            return Fmodel_i

        # Add to model.
        model['ligand_em_absorbance_model'] = ligand_em_absorbance_model
        model['ligand_em_absorbance'] = pymc.Normal(
            'ligand_em_absorbance',
            mu=model['ligand_em_absorbance_model'],
            tau=model['precision_abs'],
            size=[N],
            observed=True,
            value=ligand_em_absorbance)  # observed data

    # Promote this to a full-fledged PyMC model.
    pymc_model = pymc.Model(model)

    # Return the pymc model
    return pymc_model
Beispiel #16
0
    labelMatrix = generateMatrix.getAchievementDetail(isSub=True, subjectName="数学")
    np.save("labelMatrix", labelMatrix)


# 得到二分矩阵的行数和列数(分别表示题目数量和学生数量)

numQuestion, numPeople = labelMatrix.shape
print(numQuestion, numPeople)
# 初始化theta,logA,b的值
theta_initial = np.zeros((NUM_THETAS, numPeople))
a_initial = np.ones((numQuestion, NUM_THETAS))
b_initial = np.zeros((numQuestion, NUM_THETAS))

# 构建参数theta,logA,b的建议分布采样, value是采样前的初始值
theta = pm.Normal("theta", mu=0, tau=1.1 ** 2, value=theta_initial)
a = pm.Lognormal("a", mu=0, tau=0.3 ** 2, value= a_initial)
b = pm.Normal("b", mu=0, tau=0.3 ** 2, value=b_initial)


@pm.deterministic  # 将先验标为一个定值
def irtModel(theta=theta, a=a, b=b):
    bs = np.repeat(b, numPeople, 1)
    prob = 1.0 / (1.0 + np.exp(D * (bs - np.dot(a, theta))))

    return prob


# 得到预测的结果,在这里irtModel代表了先验,value代表了观测值,labels是后验分布
labels = pm.Bernoulli("labels", p=irtModel, value=labelMatrix, observed=True)

mcmc = pm.MCMC([a, b, theta, labels])
Beispiel #17
0
    def __init__(self,
                 experiments,
                 receptor,
                 V0,
                 concentration_uncertainty=0.10,
                 verbose=False):
        """
      ARGUMENTS

      experiments (list of Experiment) -
      receptor (string) - name of receptor species
      V0 (float) - calorimeter sample cell volume

      OPTIONAL ARGUMENTS
      
      concentration_uncertainty (float) - relative uncertainty in concentrations

      """

        self.verbose = verbose

        # Store temperature.
        # NOTE: Right now, there can only be one.
        self.temperature = experiments[0].temperature  # temperature (kelvin)
        self.beta = 1.0 / (kB * self.temperature
                           )  # inverse temperature 1/(kcal/mol)

        # Store copy of experiments.
        self.experiments = copy.deepcopy(experiments)
        if verbose: print "%d experiments" % len(self.experiments)

        # Store sample cell volume.
        self.V0 = V0

        # Store the name of the receptor.
        self.receptor = receptor
        if verbose:
            print "species '%s' will be treated as receptor" % self.receptor

        # Make a list of names of all molecular species.
        self.species = set()  # all molecular species
        for experiment in experiments:
            self.species.update(experiment.sample_cell_concentrations.keys())
            self.species.update(experiment.syringe_concentrations.keys())
        if verbose: print "species: ", self.species

        # Make a list of all ligands.
        self.ligands = copy.deepcopy(self.species)
        self.ligands.remove(receptor)
        if verbose: print "ligands: ", self.ligands

        # Create a list of all stochastics.
        self.stochastics = list()

        # Create a prior for thermodynamic parameters of binding for each ligand-receptor interaction.
        DeltaG_min = -40.  # (kcal/mol)
        DeltaG_max = +40.  # (kcal/mol)
        DeltaH_min = -100.  # (kcal/mol)
        DeltaH_max = +100.  # (kcal/mol)
        self.thermodynamic_parameters = dict()
        for ligand in self.ligands:
            name = "DeltaG of %s * %s" % (self.receptor, ligand)
            x = pymc.Uniform(name,
                             lower=DeltaG_min,
                             upper=DeltaG_max,
                             value=0.0)
            self.thermodynamic_parameters[name] = x
            self.stochastics.append(x)
            name = "DeltaH of %s * %s" % (self.receptor, ligand)
            x = pymc.Uniform(name,
                             lower=DeltaH_min,
                             upper=DeltaH_max,
                             value=0.0)
            self.thermodynamic_parameters[name] = x
            self.stochastics.append(x)
        if verbose:
            print "thermodynamic parameters:"
            print self.thermodynamic_parameters

        # DEBUG: Set initial thermodynamic parameters to literature values.
        self.thermodynamic_parameters[
            "DeltaG of HIV protease * acetyl pepstatin"].value = -9.0
        self.thermodynamic_parameters[
            "DeltaH of HIV protease * acetyl pepstatin"].value = +6.8
        self.thermodynamic_parameters[
            "DeltaG of HIV protease * KNI-10033"].value = -14.870
        self.thermodynamic_parameters[
            "DeltaH of HIV protease * KNI-10033"].value = -8.200
        self.thermodynamic_parameters[
            "DeltaG of HIV protease * KNI-10075"].value = -14.620
        self.thermodynamic_parameters[
            "DeltaH of HIV protease * KNI-10075"].value = -12.120

        # Determine min and max range for log_sigma (log of instrument heat measurement error)
        # TODO: This should depend on a number of factors, like integration time, heat signal, etc.?
        sigma_guess = 0.0
        for experiment in self.experiments:
            sigma_guess += experiment.observed_injection_heats[-4:].std()
        sigma_guess /= float(len(self.experiments))
        log_sigma_guess = log(sigma_guess)
        log_sigma_min = log_sigma_guess - 10
        log_sigma_max = log_sigma_guess + 5
        self.log_sigma = pymc.Uniform('log_sigma',
                                      lower=log_sigma_min,
                                      upper=log_sigma_max,
                                      value=log_sigma_guess)
        self.stochastics.append(self.log_sigma)
        tau = pymc.Lambda(
            'tau', lambda log_sigma=self.log_sigma: exp(-2.0 * log_sigma))
        self.stochastics.append(tau)

        # Define priors for unknowns for each experiment.
        for (index, experiment) in enumerate(self.experiments):
            # Number of observations
            experiment.ninjections = experiment.observed_injection_heats.size
            if verbose:
                print "Experiment %d has %d injections" % (
                    index, experiment.ninjections)

            # Heat of dilution / mixing
            # We allow the heat of dilution/mixing to range in observed range of heats, plus a larger margin of the range of oberved heats.
            max_heat = experiment.observed_injection_heats.max()
            min_heat = experiment.observed_injection_heats.min()
            heat_interval = max_heat - min_heat
            last_heat = experiment.observed_injection_heats[
                -1]  # last injection heat provides a good initial guess for heat of dilution/mixing
            experiment.DeltaH_0 = pymc.Uniform("DeltaH_0 for experiment %d" %
                                               index,
                                               lower=min_heat - heat_interval,
                                               upper=max_heat + heat_interval,
                                               value=last_heat)
            self.stochastics.append(experiment.DeltaH_0)

            # True concentrations
            experiment.true_sample_cell_concentrations = dict()
            for species, concentration in experiment.sample_cell_concentrations.iteritems(
            ):
                x = pymc.Lognormal(
                    "initial sample cell concentration of %s in experiment %d"
                    % (species, index),
                    mu=log(concentration),
                    tau=1.0 / log(1.0 + concentration_uncertainty**2),
                    value=concentration)
                experiment.true_sample_cell_concentrations[species] = x
                self.stochastics.append(x)

            experiment.true_syringe_concentrations = dict()
            for species, concentration in experiment.syringe_concentrations.iteritems(
            ):
                x = pymc.Lognormal(
                    "initial syringe concentration of %s in experiment %d" %
                    (species, index),
                    mu=log(concentration),
                    tau=1.0 / log(1.0 + concentration_uncertainty**2),
                    value=concentration)
                experiment.true_syringe_concentrations[species] = x
                self.stochastics.append(x)

            # Add species not explicitly listed with zero concentration.
            for species in self.species:
                if species not in experiment.true_sample_cell_concentrations:
                    experiment.true_sample_cell_concentrations[species] = 0.0
                if species not in experiment.true_syringe_concentrations:
                    experiment.true_syringe_concentrations[species] = 0.0

            # True injection heats
            experiment.true_injection_heats = pymc.Lambda(
                "true injection heats for experiment %d" % index,
                lambda experiment=experiment, sample_cell_concentrations=
                experiment.true_sample_cell_concentrations,
                syringe_concentrations=experiment.true_syringe_concentrations,
                DeltaH_0=experiment.DeltaH_0, thermodynamic_parameters=self.
                thermodynamic_parameters: self.expected_injection_heats(
                    experiment, sample_cell_concentrations,
                    syringe_concentrations, DeltaH_0, thermodynamic_parameters
                ))
            self.stochastics.append(experiment.true_injection_heats)

            # Observed injection heats
            experiment.observation = pymc.Normal(
                "observed injection heats for experiment %d" % index,
                mu=experiment.true_injection_heats,
                tau=tau,
                observed=True,
                value=experiment.observed_injection_heats)
            self.stochastics.append(experiment.observation)

        # Create sampler.
        print "Creating sampler..."
        mcmc = pymc.MCMC(self.stochastics, db='ram')
        #db = pymc.database.pickle.load('MCMC.pickle') # DEBUG
        #mcmc = pymc.MCMC(self.stochastics, db=db)
        for stochastic in self.stochastics:
            print stochastic
            try:
                mcmc.use_step_method(pymc.Metropolis, stochastic)
            except:
                pass
        mcmc.use_step_method(
            RescalingStep, {
                'Ls':
                self.experiments[0].
                true_syringe_concentrations['acetyl pepstatin'],
                'P0':
                self.experiments[0].
                true_sample_cell_concentrations['HIV protease'],
                'DeltaH':
                self.thermodynamic_parameters[
                    'DeltaH of HIV protease * acetyl pepstatin'],
                'DeltaG':
                self.thermodynamic_parameters[
                    'DeltaG of HIV protease * acetyl pepstatin']
            }, self.beta)

        mcmc.use_step_method(
            RescalingStep, {
                'Ls':
                self.experiments[1].true_syringe_concentrations['KNI-10033'],
                'P0':
                self.experiments[1].
                true_sample_cell_concentrations['HIV protease'],
                'DeltaH':
                self.
                thermodynamic_parameters['DeltaH of HIV protease * KNI-10033'],
                'DeltaG':
                self.
                thermodynamic_parameters['DeltaG of HIV protease * KNI-10033']
            }, self.beta)

        mcmc.use_step_method(
            RescalingStep, {
                'Ls':
                self.experiments[2].true_syringe_concentrations['KNI-10075'],
                'P0':
                self.experiments[2].
                true_sample_cell_concentrations['HIV protease'],
                'DeltaH':
                self.
                thermodynamic_parameters['DeltaH of HIV protease * KNI-10075'],
                'DeltaG':
                self.
                thermodynamic_parameters['DeltaG of HIV protease * KNI-10075']
            }, self.beta)

        self.mcmc = mcmc
Beispiel #18
0
    def __init__(self, experiments, receptor, concentration_uncertainty=0.10):
        """
        ARGUMENTS

        experiments (list of Experiment) -
        instrument Instrument that experiment was carried out in (has to be one)
        receptor (string) - name of receptor species
        OPTIONAL ARGUMENTS
        concentration_uncertainty (float) - relative uncertainty in concentrations

        """
        # Store temperature.
        # NOTE: Right now, there can only be one.
        self.temperature = experiments[0].temperature  # temperature (kelvin)
        self.beta = 1.0 / (ureg.molar_gas_constant * self.temperature
                           )  # inverse temperature 1/(kcal/mol)

        # Store copy of experiments.
        self.experiments = experiments
        logging.info("%d experiments" % len(self.experiments))

        # Store sample cell volume.
        self.V0 = self.experiments[0].cell_volume

        # Store the name of the receptor.
        self.receptor = receptor
        logging.info("species '%s' will be treated as receptor" %
                     self.receptor)

        # Make a list of names of all molecular species.
        self.species = set()  # all molecular species
        for experiment in experiments:
            self.species.update(experiment.cell_concentration.keys())
            self.species.update(experiment.syringe_concentration.keys())
        logging.info("species: %s" % self.species)

        # Make a list of all ligands.
        self.ligands = copy.deepcopy(self.species)
        self.ligands.remove(receptor)
        logging.info("ligands: %s" % self.ligands)

        # Create a list of all stochastics.
        self.stochastics = list()

        # Create a prior for thermodynamic parameters of binding for each ligand-receptor interaction.
        DeltaG_min = -40.  # (kcal/mol)
        DeltaG_max = +40.  # (kcal/mol)
        DeltaH_min = -100.  # (kcal/mol)
        DeltaH_max = +100.  # (kcal/mol)
        self.thermodynamic_parameters = dict()
        for ligand in self.ligands:
            name = "DeltaG of %s * %s" % (self.receptor, ligand)
            x = pymc.Uniform(name,
                             lower=DeltaG_min,
                             upper=DeltaG_max,
                             value=0.0)
            self.thermodynamic_parameters[name] = x
            self.stochastics.append(x)
            name = "DeltaH of %s * %s" % (self.receptor, ligand)
            x = pymc.Uniform(name,
                             lower=DeltaH_min,
                             upper=DeltaH_max,
                             value=0.0)
            self.thermodynamic_parameters[name] = x
            self.stochastics.append(x)
        logging.debug("thermodynamic parameters:")
        logging.debug(self.thermodynamic_parameters)

        # # TODO: add option to set initial thermodynamic parameters to literature values.
        # self.thermodynamic_parameters["DeltaG of protein * ligand"].value = -9.0
        # self.thermodynamic_parameters["DeltaH of HIV protease * acetyl pepstatin"].value = +6.8

        # Determine min and max range for log_sigma (log of instrument heat measurement error)
        # TODO: This should depend on a number of factors, like integration time, heat signal, etc.?
        sigma_guess = 0.0
        for experiment in self.experiments:
            sigma_guess += experiment.observed_injection_heats[:-4].std()
        sigma_guess /= float(len(self.experiments))
        log_sigma_guess = log(sigma_guess /
                              Quantity('microcalorie'))  #remove unit
        log_sigma_min = log_sigma_guess - 10
        log_sigma_max = log_sigma_guess + 5
        self.log_sigma = pymc.Uniform('log_sigma',
                                      lower=log_sigma_min,
                                      upper=log_sigma_max,
                                      value=log_sigma_guess)
        self.stochastics.append(self.log_sigma)
        tau = pymc.Lambda(
            'tau', lambda log_sigma=self.log_sigma: exp(-2.0 * log_sigma))
        self.stochastics.append(tau)

        # Define priors for unknowns for each experiment.
        for (index, experiment) in enumerate(self.experiments):
            # Number of observations
            experiment.ninjections = experiment.observed_injection_heats.size
            logging.info("Experiment %d has %d injections" %
                         (index, experiment.ninjections))

            # Heat of dilution / mixing
            # We allow the heat of dilution/mixing to range in observed range of heats, plus a larger margin of the range of oberved heats.
            max_heat = experiment.observed_injection_heats.max()
            min_heat = experiment.observed_injection_heats.min()
            heat_interval = max_heat - min_heat
            last_heat = experiment.observed_injection_heats[
                -1]  # last injection heat provides a good initial guess for heat of dilution/mixing
            experiment.DeltaH_0 = pymc.Uniform("DeltaH_0 for experiment %d" %
                                               index,
                                               lower=min_heat - heat_interval,
                                               upper=max_heat + heat_interval,
                                               value=last_heat)
            self.stochastics.append(experiment.DeltaH_0)

            # True concentrations
            experiment.true_cell_concentration = dict()
            for species, concentration in experiment.cell_concentration.iteritems(
            ):
                x = pymc.Lognormal(
                    "initial sample cell concentration of %s in experiment %d"
                    % (species, index),
                    mu=log(concentration / Quantity('millimole per liter')),
                    tau=1.0 / log(1.0 + concentration_uncertainty**2),
                    value=concentration / Quantity('millimole per liter'))
                experiment.true_cell_concentration[species] = x
                self.stochastics.append(x)

            experiment.true_syringe_concentration = dict()
            for species, concentration in experiment.syringe_concentration.iteritems(
            ):
                x = pymc.Lognormal(
                    "initial syringe concentration of %s in experiment %d" %
                    (species, index),
                    mu=log(concentration / Quantity('millimole per liter')),
                    tau=1.0 / log(1.0 + concentration_uncertainty**2),
                    value=concentration / Quantity('millimole per liter'))
                experiment.true_syringe_concentration[species] = x
                self.stochastics.append(x)

            # Add species not explicitly listed with zero concentration.
            for species in self.species:
                if species not in experiment.true_cell_concentration:
                    experiment.true_cell_concentration[species] = 0.0
                if species not in experiment.true_syringe_concentration:
                    experiment.true_syringe_concentration[species] = 0.0

            # True injection heats
            experiment.true_injection_heats = pymc.Lambda(
                "true injection heats for experiment %d" % index,
                lambda ligands=self.ligands, receptor=self.receptor, V0=self.
                V0, N=experiment.ninjections, volumes=experiment.
                injection_volumes, beta=self.beta, cell_concentration=
                experiment.true_cell_concentration, syringe_concentration=
                experiment.true_syringe_concentration, DeltaH_0=experiment.
                DeltaH_0, thermodynamic_parameters=self.
                thermodynamic_parameters: self.expected_injection_heats(
                    ligands, receptor, V0, N, volumes, beta,
                    cell_concentration, syringe_concentration, DeltaH_0,
                    thermodynamic_parameters))
            self.stochastics.append(experiment.true_injection_heats)

            # Observed injection heats
            experiment.observation = pymc.Normal(
                "observed injection heats for experiment %d" % index,
                mu=experiment.true_injection_heats,
                tau=tau,
                observed=True,
                value=experiment.observed_injection_heats)
            self.stochastics.append(experiment.observation)

        # Create sampler.
        print("Creating sampler...")
        mcmc = pymc.MCMC(self.stochastics, db='ram')

        for stochastic in self.stochastics:
            # print stochastic
            try:
                mcmc.use_step_method(pymc.Metropolis, stochastic)
            except:
                pass

        for experiment in self.experiments:
            for ligand in self.ligands:
                if isinstance(experiment.true_syringe_concentration[ligand],
                              pymc.distributions.Lognormal):
                    mcmc.use_step_method(
                        RescalingStep, {
                            'Ls':
                            experiment.true_syringe_concentration[ligand],
                            'P0':
                            experiment.true_cell_concentration[receptor],
                            'DeltaH':
                            self.thermodynamic_parameters['DeltaH of %s * %s' %
                                                          (receptor, ligand)],
                            'DeltaG':
                            self.thermodynamic_parameters['DeltaG of %s * %s' %
                                                          (receptor, ligand)]
                        }, self.beta)

        self.mcmc = mcmc
Beispiel #19
0
    def __init__(self, experiment):

        # Determine number of observations.
        self.N = experiment.number_of_injections

        self.DeltaVn = Quantity(numpy.zeros(self.N), ureg.microliter)
        # Store injection volumes
        for inj, injection in enumerate(experiment.injections):
            self.DeltaVn[inj] = injection.volume

        # Store calorimeter properties.
        self.V0 = experiment.cell_volume

        # Extract properties from experiment
        self.experiment = experiment

        if not len(experiment.syringe_concentration) == 1:
            raise ValueError(
                'TwoComponent model only supports one component in the syringe, found %d'
                % len(experiment.syringe_concentration))

        # python 2/3 compatibility
        try:
            Ls_stated = experiment.syringe_concentration.itervalues().next()
        except AttributeError:
            Ls_stated = next(iter(experiment.syringe_concentration.values()))

        if not len(experiment.cell_concentration) == 1:
            raise ValueError(
                'TwoComponent model only supports one component in the cell, found %d'
                % len(experiment.cell_concentration))

        # python 2/3 compatibility
        try:
            P0_stated = experiment.cell_concentration.itervalues().next()
        except AttributeError:
            P0_stated = next(iter(experiment.cell_concentration.values()))

        # Store temperature.
        self.temperature = experiment.target_temperature  # (kelvin)
        self.beta = 1.0 / (ureg.molar_gas_constant * self.temperature
                           )  # inverse temperature 1/(kcal/mol)

        # Compute uncertainties in stated concentrations.
        dP0 = 0.10 * P0_stated  # uncertainty in protein stated concentration (M) - 10% error
        dLs = 0.10 * Ls_stated  # uncertainty in ligand stated concentration (M) - 10% error

        # Determine guesses for initial values
        q_n = Quantity(numpy.zeros(len(experiment.injections)),
                       'microcalorie / mole')
        for inj, injection in enumerate(experiment.injections):
            q_n[inj] = injection.evolved_heat / (
                numpy.sum(injection.titrant)
            )  # TODO reduce to one titrant (from .itc file)

        # TODO better initial guesses for everything
        log_sigma_guess = log(
            q_n[-4:].std() / (ureg.microcalorie /
                              ureg.mole))  # review: how can we do this better?
        DeltaG_guess = -8.3 * ureg.kilocalorie / ureg.mol  # todo add option to supply literature values
        DeltaH_guess = -12.0 * ureg.kilocalorie / ureg.mol

        #Assume the last injection has the best guess for H0
        DeltaH_0_guess = q_n[-1]  # ucal/injection

        # Determine min and max range for log_sigma
        log_sigma_min = log_sigma_guess - 10
        log_sigma_max = log_sigma_guess + 5

        # Determine range for priors for thermodynamic parameters.
        DeltaG_min = -40. * ureg.kilocalorie / ureg.mol  # (kcal/mol)
        DeltaG_max = +40. * ureg.kilocalorie / ureg.mol  # (kcal/mol)
        DeltaH_min = -100. * ureg.kilocalorie / ureg.mol  # (kcal/mol)
        DeltaH_max = +100. * ureg.kilocalorie / ureg.mol  # (kcal/mol)
        # review dH = dQ, so is the unit of H0 cal, or cal/mol? Where does the mol come into the equation.
        heat_interval = (q_n.max() - q_n.min())
        DeltaH_0_min = q_n.min() - heat_interval  # (cal/mol)
        DeltaH_0_max = q_n.max() + heat_interval  # (cal/mol)

        # Define priors for concentrations.
        # TODO convert everything to a consistent unit system
        # review check out all the units to make sure that they're appropriate
        self.P0 = pymc.Lognormal('P0',
                                 mu=log(P0_stated / ureg.micromolar),
                                 tau=1.0 / log(1.0 + (dP0 / P0_stated)**2),
                                 value=P0_stated / ureg.micromolar)
        self.Ls = pymc.Lognormal('Ls',
                                 mu=log(Ls_stated / ureg.micromolar),
                                 tau=1.0 / log(1.0 + (dLs / Ls_stated)**2),
                                 value=Ls_stated / ureg.micromolar)

        # Define priors for thermodynamic quantities.
        self.log_sigma = pymc.Uniform('log_sigma',
                                      lower=log_sigma_min,
                                      upper=log_sigma_max,
                                      value=log_sigma_guess)
        self.DeltaG = pymc.Uniform(
            'DeltaG',
            lower=DeltaG_min / (ureg.kilocalorie / ureg.mol),
            upper=DeltaG_max / (ureg.kilocalorie / ureg.mol),
            value=DeltaG_guess / (ureg.kilocalorie / ureg.mol))
        self.DeltaH = pymc.Uniform(
            'DeltaH',
            lower=DeltaH_min / (ureg.kilocalorie / ureg.mol),
            upper=DeltaH_max / (ureg.kilocalorie / ureg.mol),
            value=DeltaH_guess / (ureg.kilocalorie / ureg.mol))
        # review make sure we get the units right on this.
        self.DeltaH_0 = pymc.Uniform('DeltaH_0',
                                     lower=DeltaH_0_min / ureg.microcalorie,
                                     upper=DeltaH_0_max / ureg.microcalorie,
                                     value=DeltaH_0_guess / ureg.microcalorie)

        q_n_model = pymc.Lambda(
            'q_n_model',
            lambda P0=self.P0, Ls=self.Ls, DeltaG=self.DeltaG, DeltaH=self.
            DeltaH, DeltaH_0=self.DeltaH_0: self.expected_injection_heats(
                self.V0, self.DeltaVn, P0, Ls, DeltaG, DeltaH, DeltaH_0, self.
                beta, self.N))
        tau = pymc.Lambda('tau',
                          lambda log_sigma=self.log_sigma: self.tau(log_sigma))

        # Review doublecheck equation
        q_ns = Quantity(numpy.zeros(experiment.number_of_injections),
                        'microcalorie / mole')
        for inj, injection in enumerate(experiment.injections):
            q_ns[inj] = injection.evolved_heat / injection.titrant

        # Define observed data.
        self.q_n_obs = pymc.Normal('q_n',
                                   mu=q_n_model,
                                   tau=tau,
                                   observed=True,
                                   value=q_ns /
                                   Quantity('microcalorie / mole'))

        # Create sampler.

        mcmc = pymc.MCMC(self, db='ram')
        mcmc.use_step_method(pymc.Metropolis, self.DeltaG)
        mcmc.use_step_method(pymc.Metropolis, self.DeltaH)
        mcmc.use_step_method(pymc.Metropolis, self.DeltaH_0)

        if P0_stated > Quantity('0.0 molar') and Ls_stated > Quantity(
                '0.0 molar'):
            mcmc.use_step_method(
                RescalingStep, {
                    'Ls': self.Ls,
                    'P0': self.P0,
                    'DeltaH': self.DeltaH,
                    'DeltaG': self.DeltaG
                }, self.beta)
        elif experiment.cell_concentration > Quantity('0.0 molar'):
            mcmc.use_step_method(pymc.Metropolis, self.P0)
        elif experiment.syringe_concentration > Quantity('0.0 molar'):
            mcmc.use_step_method(pymc.Metropolis, self.Ls)

        self.mcmc = mcmc
        return
Beispiel #20
0
import pymc as pm
import pymc.gp as gp
from pymc.gp.cov_funs import matern
import numpy as np
import matplotlib.pyplot as pl

from numpy.random import normal

x = np.arange(-1., 1., .1)

# Prior parameters of C
diff_degree = pm.Uniform('diff_degree', 1., 3)
amp = pm.Lognormal('amp', mu=.4, tau=1.)
scale = pm.Lognormal('scale', mu=.5, tau=1.)


# The covariance dtrm C is valued as a Covariance object.
@pm.deterministic
def C(eval_fun=gp.matern.euclidean,
      diff_degree=diff_degree,
      amp=amp,
      scale=scale):
    return gp.NearlyFullRankCovariance(eval_fun,
                                       diff_degree=diff_degree,
                                       amp=amp,
                                       scale=scale)


# Prior parameters of M
a = pm.Normal('a', mu=1., tau=1.)
b = pm.Normal('b', mu=.5, tau=1.)
Beispiel #21
0
        return -np.inf     
    return (-0.5 - num_partition) * np.log(np.array(value).sum())

@pm.stochastic
def prior_for_dirichlet_distribution_partition5(num_partition = 5, value = [1.] * 5):
    if np.any(value <= 0):
        return -np.inf     
    return (-0.5 - num_partition) * np.log(np.array(value).sum())

@pm.stochastic
def prior_for_dirichlet_distribution_partition6(num_partition = 6, value = [1.] * 6):
    if np.any(value <= 0):
        return -np.inf     
    return (-0.5 - num_partition) * np.log(np.array(value).sum())

dirichlet_process_prior = pm.Lognormal('dirichlet_process_prior', mu = 0, tau = 4)

@pm.stochastic(dtype = int)
def cut_1(value = np.array([[1, 5, 0, 0, 0, 0, 0, 2, 1]] * DATA_SIZE):  
    p_all = 0
    for i in range(DATA_SIZE):
        parent_node_index = value[i][7]
        cut_partition_num = value[i][8]
        rest_partion_num = 6 - parent_node_index - cut_partition_num
        rest_index_start = parent_node_inde + cut_partition_num
        next_parent_node_index = range(parent_node_index, rest_index_start)[value[i][9]]
        
        
        if (value[i][7] ==  np.min(np.where(ORIGIN > 1))) \
            and (value[i][next_parent_node_index] == np.min(np.where(value[i] > 1))) \
            and (list(value[i][next_parent_node_index + 1 : next_parent_node_index + 1 + rest_partion_num]) == list(ORIGIN[parent_node_index + 1:])
Created on Mon Sep 04 18:17:59 2017

@author: Edward Coen
"""

import pymc as pm
import numpy as np
import scipy.special as ss

PRIOR_STD = 1
PRIOR_CORRELATION = 0.5

MAX_DIMENSION = 6

dirichilet_process_prior = pm.Lognormal('dirichilet_process_prior_mean',
                                        mu=0,
                                        tau=1)
dirichilet_distribution_prior_mean = pm.Lognormal(
    'dirichilet_distribution_prior_mean', mu=3, tau=1.0 / 4)

a = pm.Beta('aa',
            alpha=dirichilet_process_prior,
            beta=dirichilet_distribution_prior_mean)
mcmc2 = pm.MCMC(
    [dirichilet_process_prior, dirichilet_distribution_prior_mean, a])
mcmc2.sample(1000, 0, 1)
pm.Matplot.plot(mcmc2)
dirichilet_distribution_prior_std = pm.Uniform(
    'dirichilet_distribution_prior_std', lower=0.99, upper=1.001)
dirichilet_distribution_prior_correlation = pm.Uniform(
    'dirichilet_distribution_prior_correlation', lower=0.499, upper=0.501)
Beispiel #23
0
    def __init__(self, Ls_stated, P0_stated, q_n_observed, DeltaVn,
                 temperature, V0):

        # Determine number of observations.
        self.N = q_n_observed.size

        # Store injection volumes
        if not numpy.iterable(DeltaVn):
            self.DeltaVn = numpy.ones([self.N], numpy.float64) * DeltaVn
        else:
            self.DeltaVn = numpy.array(DeltaVn)

        # Store calorimeter properties.
        self.V0 = V0

        # Store temperature.
        self.temperature = temperature  # temperature (kelvin)
        self.beta = 1.0 / (kB * temperature
                           )  # inverse temperature 1/(kcal/mol)

        # Compute uncertainties in stated concentrations.
        dP0 = 0.10 * P0_stated  # uncertainty in protein stated concentration (M) - 10% error
        dLs = 0.10 * Ls_stated  # uncertainty in ligand stated concentration (M) - 10% error

        # Determine guesses for initial values
        log_sigma_guess = log(q_n_observed[-4:].std())  # cal/injection
        DeltaG_guess = -8.3  # kcal/mol
        DeltaH_guess = -12.0  # kcal/mol
        DeltaH_0_guess = q_n_observed[-1]  # cal/injection

        # Determine min and max range for log_sigma
        log_sigma_min = log_sigma_guess - 10
        log_sigma_max = log_sigma_guess + 5

        # Determine range for priors for thermodynamic parameters.
        DeltaG_min = -40.  # (kcal/mol)
        DeltaG_max = +40.  # (kcal/mol)
        DeltaH_min = -100.  # (kcal/mol)
        DeltaH_max = +100.  # (kcal/mol)
        heat_interval = q_n_observed.max() - q_n_observed.min()
        DeltaH_0_min = q_n_observed.min() - heat_interval  # (cal/mol)
        DeltaH_0_max = q_n_observed.max() + heat_interval  # (cal/mol)

        # Define priors for concentrations.
        #self.P0 = pymc.Normal('P0', mu=P0_stated, tau=1.0/dP0**2, value=P0_stated)
        #self.Ls = pymc.Normal('Ls', mu=Ls_stated, tau=1.0/dLs**2, value=Ls_stated)
        self.P0 = pymc.Lognormal('P0',
                                 mu=log(P0_stated),
                                 tau=1.0 / log(1.0 + (dP0 / P0_stated)**2),
                                 value=P0_stated)
        self.Ls = pymc.Lognormal('Ls',
                                 mu=log(Ls_stated),
                                 tau=1.0 / log(1.0 + (dLs / Ls_stated)**2),
                                 value=Ls_stated)

        # Define priors for thermodynamic quantities.
        self.log_sigma = pymc.Uniform('log_sigma',
                                      lower=log_sigma_min,
                                      upper=log_sigma_max,
                                      value=log_sigma_guess)
        self.DeltaG = pymc.Uniform('DeltaG',
                                   lower=DeltaG_min,
                                   upper=DeltaG_max,
                                   value=DeltaG_guess)
        self.DeltaH = pymc.Uniform('DeltaH',
                                   lower=DeltaH_min,
                                   upper=DeltaH_max,
                                   value=DeltaH_guess)
        self.DeltaH_0 = pymc.Uniform('DeltaH_0',
                                     lower=DeltaH_0_min,
                                     upper=DeltaH_0_max,
                                     value=DeltaH_0_guess)

        # Deterministic functions.
        q_n_model = pymc.Lambda(
            'q_n_model',
            lambda P0=self.P0, Ls=self.Ls, DeltaG=self.DeltaG, DeltaH=self.
            DeltaH, DeltaH_0=self.DeltaH_0, q_n_obs=
            self.DeltaH_0: self.expected_injection_heats(
                P0, Ls, DeltaG, DeltaH, DeltaH_0, q_n_obs))
        tau = pymc.Lambda('tau',
                          lambda log_sigma=self.log_sigma: self.tau(log_sigma))

        # Define observed data.
        self.q_n_obs = pymc.Normal('q_n',
                                   mu=q_n_model,
                                   tau=tau,
                                   observed=True,
                                   value=q_n_observed)

        # Create sampler.
        mcmc = pymc.MCMC(self, db='ram')
        mcmc.use_step_method(pymc.Metropolis, self.DeltaG)
        mcmc.use_step_method(pymc.Metropolis, self.DeltaH)
        mcmc.use_step_method(pymc.Metropolis, self.DeltaH_0)
        mcmc.use_step_method(pymc.Metropolis, self.P0)
        mcmc.use_step_method(pymc.Metropolis, self.Ls)
        mcmc.use_step_method(
            RescalingStep, {
                'Ls': self.Ls,
                'P0': self.P0,
                'DeltaH': self.DeltaH,
                'DeltaG': self.DeltaG
            }, self.beta)
        self.mcmc = mcmc
        return
Beispiel #24
0
fermiindex = np.argmax(ergy >= 0)

# load in data
dos_d = np.loadtxt('dos_d.txt')
y1_data = np.loadtxt('dos_ads.txt')
y2_data = np.load('E.npy')

# priors
Initeffadse = -5.0  #-5.0
Initbeta = 2.1  #2.0
Initdelta = 1.0  #1.0
InitAlpha = 0.036  #.5
InitEsp = -3.25  #-2.0

effadse = pm.Normal('effadse', -5.0, 1, value=Initeffadse)
beta = pm.Lognormal('beta', 2, 1, value=Initbeta)
delta = pm.Lognormal('delta', 1, 0.25, value=Initdelta)
alpha = pm.Uniform('alpha', 0, 1.0, value=InitAlpha)
Esp = pm.Normal('Esp', -3.25, 1, value=InitEsp)

var_1 = pm.InverseGamma('var_1', 2.0, 0.05, value=0.05)
var_2 = pm.InverseGamma('var_2', 2.0, 0.1, value=0.1)

a = len(ergy)


@pm.stochastic(observed=True)
def custom_stochastic(effadse=effadse,
                      beta=beta,
                      delta=delta,
                      alpha=alpha,