Beispiel #1
0
 def likefunc(self,n,s,dels,delb):
     """log-likelihood function for individual parameter points in the model.
     Contains the two nuisance parameters dels and delb, which
     parameterise the systematic errors. Marginalise these out to be
     Bayesian, or profile them to be pseudo-frequentist (they still
     have priors).
     The parameter 's' (signal mean) should then be the only free 
     parameter left. 
     Args:
     n - observed number of events
     i - which signal region we are currently looking at
     dels - systematic error parameter for signal
     delb - systematic error parameter for background
     s - expected number of events due to signal
     ssys - estimated gaussian uncertainty on expected number of signal events (effectively a prior)
     b - expected number of events due to background
     bsys - estimated gaussian uncertainty on expected number of background events (effectively a prior)
     bstat - estimated "statistical" gaussian uncertainty on expected number of background events (also effectively a prior)
     K - signal efficiency scaling factor
     """
     #bsystot = np.sqrt(self.bsys**2 + self.bstat**2)                             # assume priors are independent
     siglike = logpoissonlike(n,self.sK*s*(1+dels*self.ssys)+self.b*(1+delb*self.bsystot))  # poisson signal + background log likelihood
     #Need to change the scaling of the prior to match the simulated data.
     #Makes no difference to inferences.
     Pdels = pymc.normal_like(dels,0,1) #+ 0.5*np.log(2*np.pi)            #standard normal gaussian log prior on dels
     Pdelb = pymc.normal_like(delb,0,1) #+ 0.5*np.log(2*np.pi)            #standard normal gaussian log prior on delb
     
     if siglike + Pdels + Pdelb < -1e200:
         print dels, delb
         print siglike,Pdels,Pdelb, self.sK*s*(1+dels*self.ssys)+self.b*(1+delb*self.bsystot)
         raise
     
     return siglike + Pdels + Pdelb
Beispiel #2
0
def plot_ratio_analysis(
        data_samples=(100, ), dataset_samples=(100, ), datasets=100):
    x, y = np.meshgrid(data_samples, dataset_samples)
    z = np.empty(x.shape, dtype=np.float)

    for i, data_sample in enumerate(data_samples):
        for j, dataset_sample in enumerate(dataset_samples):
            data = np.random.randn(x[j, i])
            errors = []
            sl_sum = 0
            pt_sum = 0
            for rep in range(1, 200):
                # Chose two random mu pts
                mu1 = (np.random.rand() - .5) * 3
                mu2 = (np.random.rand() - .5) * 3

                # Evaluate true likelihood
                pt1 = pm.normal_like(data, mu=mu1, tau=1)
                pt2 = pm.normal_like(data, mu=mu2, tau=1)

                ptr = pt1 / pt2
                pt_sum += pt1
                pt_sum += pt2

                #print ptr

                # Evaluate synth likelihood
                ps1 = synth_likelihood(data,
                                       mu1,
                                       1,
                                       dataset_samples=y[j, i],
                                       samples=datasets)
                ps2 = synth_likelihood(data,
                                       mu2,
                                       1,
                                       dataset_samples=y[j, i],
                                       samples=datasets)

                sl_sum += ps1
                sl_sum += ps2

                pts = ps1 / ps2
                #print pts

                errors.append((pts - ptr)**2)
            print pt_sum
            print sl_sum
            z[j, i] = np.mean(errors)
            print x[j, i], y[j, i], z[j, i]

    print x
    print y
    print z
    cont = plt.contourf(x, y, z)

    plt.colorbar(cont)
    plt.xlabel('Number of samples per dataset')
    plt.ylabel('Size of input data.')
Beispiel #3
0
def plot_erroranalysis(
        data_samples=(10, ), dataset_samples=(10, ), datasets=200):
    x = dataset_samples
    y = np.empty(x.shape, dtype=np.float)

    for data_sample in data_samples:
        data = np.random.randn(data_sample)

        for i, dataset_sample in enumerate(dataset_samples):
            errors = []
            sl_sum = 0
            pt_sum = 0
            for rep in range(1, 400):
                # Chose two random mu pts
                mu1 = 0
                mu2 = (np.random.rand() - .5)

                # Evaluate true likelihood
                pt1 = pm.normal_like(data, mu=mu1, tau=1**-2)
                pt2 = pm.normal_like(data, mu=mu2, tau=1**-2)

                ptr = pt1 / pt2
                pt_sum += pt1
                pt_sum += pt2

                #print ptr

                # Evaluate synth likelihood
                ps1 = synth_likelihood(data,
                                       mu1,
                                       1,
                                       dataset_samples=x[i],
                                       samples=datasets)
                ps2 = synth_likelihood(data,
                                       mu2,
                                       1,
                                       dataset_samples=x[i],
                                       samples=datasets)

                sl_sum += ps1
                sl_sum += ps2

                pts = ps1 / ps2
                #print pts

                errors.append((pts - ptr)**2)
            print pt_sum
            print sl_sum
            y[i] = np.mean(errors)

        plt.plot(x, y, label='%i' % data_sample)

    plt.xlabel('Number of samples per dataset')
    plt.ylabel('MSE')
    plt.legend()
Beispiel #4
0
def X(value=X_true, K=K, A=A, mu = mu_x_init, tau = tau_x_init):
    """Autoregression"""

    # Initial data
    logp=normal_like(value[:K], mu, tau)

    # Difference equation
    for i in xrange(K,T):
        logp += normal_like(value[i], sum(A[:K]*value[i-K:i]), 1.)

    return logp
def test_states_missing():
    """ Test that the state sequence step function properly handles missing
    observations.
    """
    np.random.seed(2352523)

    model_true = simple_state_seq_model()

    trans_mat_true = model_true['trans_mat_obs']
    mu_true = model_true['mu'].value
    states_true = model_true['states_rv'].value
    y_true = model_true['y_rv'].value

    N_obs_half = model_true['N_obs'] // 2
    y_mask = np.arange(model_true['N_obs']) > N_obs_half
    y_obs = np.ma.masked_array(y_true, mask=y_mask)

    model_test = simple_state_seq_model(y_obs=y_obs)

    mcmc_step = pymc.MCMC(model_test)
    mcmc_step.draw_from_prior()

    mcmc_step.use_step_method(HMMStatesStep, model_test['states_rv'])

    (states_step, ) = mcmc_step.step_method_dict[model_test['states_rv']]
    assert isinstance(states_step, HMMStatesStep)

    #
    # First, let's check that the logp function is working.
    # In this case, that also means missing values were forward-filled.
    #
    ind = np.arange(np.alen(y_obs))
    ind[y_obs.mask] = 0

    y_logps_0_true = np.array([
        pymc.normal_like(y, model_test['mu_vals'][0], 100)
        for t, y in enumerate(y_obs)
    ])
    y_logps_0_true[y_obs.mask] = 0.
    y_logps_1_true = np.array([
        pymc.normal_like(y, model_test['mu_vals'][1], 100)
        for t, y in enumerate(y_obs)
    ])
    y_logps_1_true[y_obs.mask] = 0.

    states_step.compute_y_logp()
    y_logps_est = states_step.y_logp_vals

    assert_allclose(y_logps_est[0], y_logps_0_true)
    assert_allclose(y_logps_est[1], y_logps_1_true)

    mcmc_step.sample(2 * model_true['N_obs'])

    assert_hpd(mcmc_step.states_rv, states_true, alpha=0.1)
def X(value=X_true, K=K, A=A, mu = mu_x_init, tau = tau_x_init):
    """Autoregression"""

    # Initial data
    logp=normal_like(value[:K], mu, tau)

    # Difference equation
    for i in xrange(K,T):
        logp += normal_like(value[i], sum(A[:K]*value[i-K:i]), 1.)

    return logp
 def X_obs(pi=pi, sigma=sigma, value=X):
     logp = mc.normal_like(pl.array(value).ravel(), 
                           (pl.ones([N,J*T])*pl.array(pi).ravel()).ravel(), 
                           (pl.ones([N,J*T])*pl.array(sigma).ravel()).ravel()**-2)
     return logp
     
     logp = pl.zeros(N)
     for n in range(N):
         logp[n] = mc.normal_like(pl.array(value[n]).ravel(),
                                  pl.array(pi+beta).ravel(),
                                  pl.array(sigma).ravel()**-2)
     return mc.flib.logsum(logp - pl.log(N))
def test_states_trans_steps():
    """Test sampling of mixture states and transition matrices
    exclusively and simultaneously.  I.e. no regression terms/means
    or variances to estimate, only the state sequence and transition
    probability matrix.
    """

    np.random.seed(2352523)

    model_true = simple_state_trans_model()

    trans_mat_true = model_true['trans_mat_rv'].value
    mu_true = model_true['mu'].value
    states_true = model_true['states_rv'].value
    y_obs = model_true['y_rv'].value

    model_test = simple_state_trans_model(y_obs=y_obs)

    mcmc_step = pymc.MCMC(model_test)
    mcmc_step.draw_from_prior()

    mcmc_step.use_step_method(HMMStatesStep, mcmc_step.states_rv)
    (states_step, ) = mcmc_step.step_method_dict[mcmc_step.states_rv]
    assert isinstance(states_step, HMMStatesStep)

    mcmc_step.use_step_method(TransProbMatStep, mcmc_step.trans_mat_rv)
    (trans_mat_step, ) = mcmc_step.step_method_dict[mcmc_step.trans_mat_rv]
    assert isinstance(trans_mat_step, TransProbMatStep)

    #
    # First, let's check that the logp function is working.
    #
    y_logps_0_true = np.array([
        pymc.normal_like(y_obs[t], model_test['mu_vals'][0], 100)
        for t in xrange(model_test['N_obs'])
    ])
    y_logps_1_true = np.array([
        pymc.normal_like(y_obs[t], model_test['mu_vals'][1], 100)
        for t in xrange(model_test['N_obs'])
    ])

    states_step.compute_y_logp()
    y_logps_est = states_step.y_logp_vals

    assert_allclose(y_logps_est[0], y_logps_0_true)
    assert_allclose(y_logps_est[1], y_logps_1_true)

    mcmc_step.sample(2000)

    assert_hpd(mcmc_step.states_rv, states_true, alpha=0.1)
    assert_hpd(mcmc_step.trans_mat_rv, trans_mat_true, alpha=0.1)
Beispiel #9
0
    def X_obs(pi=pi, sigma=sigma, value=X):
        logp = mc.normal_like(
            pl.array(value).ravel(),
            (pl.ones([N, J * T]) * pl.array(pi).ravel()).ravel(),
            (pl.ones([N, J * T]) * pl.array(sigma).ravel()).ravel()**-2)
        return logp

        logp = pl.zeros(N)
        for n in range(N):
            logp[n] = mc.normal_like(
                pl.array(value[n]).ravel(),
                pl.array(pi + beta).ravel(),
                pl.array(sigma).ravel()**-2)
        return mc.flib.logsum(logp - pl.log(N))
def plot_ratio_analysis(data_samples=(100,), dataset_samples=(100,), datasets=100):
    x, y = np.meshgrid(data_samples, dataset_samples)
    z = np.empty(x.shape, dtype=np.float)

    for i, data_sample in enumerate(data_samples):
        for j, dataset_sample in enumerate(dataset_samples):
            data = np.random.randn(x[j, i])
            errors = []
            sl_sum = 0
            pt_sum = 0
            for rep in range(1, 200):
                # Chose two random mu pts
                mu1 = (np.random.rand()-.5) * 3
                mu2 = (np.random.rand()-.5) * 3

                # Evaluate true likelihood
                pt1 = pm.normal_like(data, mu=mu1, tau=1)
                pt2 = pm.normal_like(data, mu=mu2, tau=1)

                ptr = pt1 / pt2
                pt_sum += pt1
                pt_sum += pt2

                #print ptr

                # Evaluate synth likelihood
                ps1 = synth_likelihood(data, mu1, 1, dataset_samples=y[j, i], samples=datasets)
                ps2 = synth_likelihood(data, mu2, 1, dataset_samples=y[j, i], samples=datasets)

                sl_sum += ps1
                sl_sum += ps2

                pts = ps1 / ps2
                #print pts

                errors.append((pts - ptr)**2)
            print pt_sum
            print sl_sum
            z[j, i] = np.mean(errors)
            print x[j, i], y[j,i], z[j, i]

    print x
    print y
    print z
    cont = plt.contourf(x, y, z)

    plt.colorbar(cont)
    plt.xlabel('Number of samples per dataset')
    plt.ylabel('Size of input data.')
Beispiel #11
0
def logdoublenormal(x,mean,sigmaP,sigmaM):
    #mean is measured value
    #x is computed theory value
    #sigmaP and sigmaM are distances from mean to upper and lower 1 sigma (68%)
    #confidence limits.
    if x==None: return -1e300
    if x>=mean:
        tauP = 1./sigmaP**2
        #need to remove the normalisation factor so we get the same normalisation
        #for each half of the likelihood.
        loglike = pymc.normal_like(x,mean,tauP) - pymc.normal_like(mean,mean,tauP)
    if x<mean:
        tauM = 1./sigmaM**2
        loglike = pymc.normal_like(x,mean,tauM) - pymc.normal_like(mean,mean,tauM)
    return loglike
Beispiel #12
0
 def obs(f=rate_stoch,
         age_indices=age_indices,
         age_weights=age_weights,
         value=d_val,
         tau=1./(d_se)**2):
     f_i = dismod3.utils.rate_for_range(f, age_indices, age_weights)
     return mc.normal_like(value, f_i, tau)
Beispiel #13
0
 def x(N=N, mu=moo, tau=tau, n=n, value=np.log(data)):
    k = N-n
    dev = (value[0]-mu)*np.sqrt(tau)   
    out = gammaln(N+1) - gammaln(k) + (k-1)*np.log(pm.utils.normcdf(dev)) + pm.normal_like(value, mu, tau)
    if np.isnan(out):
        raise ValueError
    return out
Beispiel #14
0
 def obs(value=data.y,
         i_obs=i_obs,
         mu=mu,
         sigma_explained=sigma_explained,
         sigma_e=sigma_e):
     return mc.normal_like(value[i_obs], mu[i_obs],
                           1. / (sigma_explained[i_obs]**2. + sigma_e**-2.))
Beispiel #15
0
 def obs(f=rate_stoch,
         age_indices=age_indices,
         age_weights=age_weights,
         value=d_val,
         tau=1. / (d_se)**2):
     f_i = dismod3.utils.rate_for_range(f, age_indices, age_weights)
     return mc.normal_like(value, f_i, tau)
Beispiel #16
0
    def covariate_constraint(mu=vars['mu_age'], alpha=vars['alpha'], beta=vars['beta'],
                             U_all=U_all,
                             X_sex_max=X_sex_max,
                             X_sex_min=X_sex_min,
                             lower=np.log(model.parameters[name]['level_bounds']['lower']),
                             upper=np.log(model.parameters[name]['level_bounds']['upper'])):
        log_mu_max = np.log(mu.max())
        log_mu_min = np.log(mu.min())

        alpha = np.array([float(x) for x in alpha])
        if len(alpha) > 0:
            for U_i in U_all:
                log_mu_max += max(0, alpha[U_i].max())
                log_mu_min += min(0, alpha[U_i].min())

        # this estimate is too crude, and is causing problems
        #if len(beta) > 0:
        #    log_mu_max += np.sum(np.maximum(X_max*beta, X_min*beta))
        #    log_mu_min += np.sum(np.minimum(X_max*beta, X_min*beta))

        # but leaving out the sex effect results in strange problems, too
        log_mu_max += X_sex_max*float(beta[sex_index])
        log_mu_min += X_sex_min*float(beta[sex_index])

        lower_violation = min(0., log_mu_min - lower)
        upper_violation = max(0., log_mu_max - upper)
        return mc.normal_like([lower_violation, upper_violation], 0., 1.e-6**-2)
Beispiel #17
0
def get_likelihood_M0(map_M0, x, pwr, sigma, tau, obstype):
    A0 = get_variables_M0(map_M0)
    A0 = curve_fit_M0(x, pwr, A0, sigma)
    if obstype == '.logiobs':
        return pymc.normal_like(pwr, get_spectrum_M0(x, A0), tau)
    else:
        return pymc.lognormal_like(pwr, get_spectrum_M0(x, A0), tau)
Beispiel #18
0
def get_likelihood_M2(map_M2, x, pwr, sigma, tau, obstype):
    A2 = get_variables_M2(map_M2)
    A2 = curve_fit_M2(x, pwr, A2, sigma)
    if obstype == '.logiobs':
        return pymc.normal_like(pwr, get_spectrum_M2(x, A2), tau)
    else:
        return pymc.lognormal_like(pwr, get_spectrum_M2(x, A2), tau)
Beispiel #19
0
def get_likelihood_M1(map_M1, x, pwr, sigma, tau, obstype):
    A1 = get_variables_M1(map_M1)
    A1 = curve_fit_M1(x, pwr, A1, sigma)
    if obstype == '.logiobs':
        return pymc.normal_like(pwr, get_spectrum_M1(x, A1), tau)
    else:
        return pymc.lognormal_like(pwr, get_spectrum_M1(x, A1), tau)
Beispiel #20
0
        def model(params=params,
                  vars=vars,
                  paramnames=paramnames,
                  filters=filters,
                  value=1.0):
            # Set the parameters in the model
            for i, param in enumerate(paramnames):
                if debug:
                    print("setting ", param, " to ", params[i])
                self.model.parameters[param] = params[i]

            logp = 0
            numpts = 0
            for i, f in enumerate(filters):
                mod, err, mask = self.model(f, self.sn.data[f].MJD)
                m = mask * self.sn.data[f].mask
                if not np.sometrue(m):
                    continue
                numpts += np.sum(m)
                tau = np.power(vars[i] + np.power(self.sn.data[f].e_mag, 2),
                               -1)
                logp += pymc.normal_like(self.sn.data[f].mag[m], mod[m],
                                         tau[m])
            #if numpts < len(paramnames):
            #   return -np.inf
            return logp
Beispiel #21
0
 def mixture(value=1., gamma=gamma, pi=[0.2, 0.8], mu=[-2., 3.],
             sigma=[0.01, 0.01]):
     """
     The log probability of a mixture of normal densities.
     :param value:       The point of evaluation.
     :type value :       float
     :param gamma:       The parameter characterizing the SMC one-parameter
                         family.
     :type gamma :       float
     :param pi   :       The weights of the components.
     :type pi    :       1D :class:`numpy.ndarray`
     :param mu   :       The mean of each component.
     :type mu    :       1D :class:`numpy.ndarray`
     :param sigma:       The standard deviation of each component.
     :type sigma :       1D :class:`numpy.ndarray`
     """
     # Make sure everything is a numpy array
     pi = np.array(pi)
     mu = np.array(mu)
     sigma = np.array(sigma)
     # The number of components in the mixture
     n = pi.shape[0]
     # pymc.normal_like requires the precision not the variance:
     tau = np.sqrt(1. / sigma ** 2)
     # The following looks a little bit awkward because of the need for
     # numerical stability:
     p = np.log(pi)
     p += np.array([pymc.normal_like(value, mu[i], tau[i])
                    for i in range(n)])
     p = math.fsum(np.exp(p))
     # logp should never be negative, but it can be zero...
     if p <= 0.:
         return -np.inf
     return gamma * math.log(p)
Beispiel #22
0
 def deriv_sign_rate(f=rate,
                     age_indices=age_indices,
                     tau=1.e14,
                     deriv=deriv,
                     sign=sign):
     df = pl.diff(f[age_indices], deriv)
     return mc.normal_like(pl.absolute(df) * (sign * df < 0), 0., tau)
Beispiel #23
0
    def mixture(value=1., gamma=gamma, pi=[0.2, 0.8], mu=[-2., 3.],
                sigma=[0.01, 0.01]):
        """
        The log probability of a mixture of normal densities.

        :param value:       The point of evaluation.
        :type value :       float
        :param gamma:       The parameter characterizing the SMC one-parameter
                            family.
        :type gamma :       float
        :param pi   :       The weights of the components.
        :type pi    :       1D :class:`numpy.ndarray`
        :param mu   :       The mean of each component.
        :type mu    :       1D :class:`numpy.ndarray`
        :param sigma:       The standard deviation of each component.
        :type sigma :       1D :class:`numpy.ndarray`
        """
        # Make sure everything is a numpy array
        pi = np.array(pi)
        mu = np.array(mu)
        sigma = np.array(sigma)
        # The number of components in the mixture
        n = pi.shape[0]
        # pymc.normal_like requires the precision not the variance:
        tau = np.sqrt(1. / sigma ** 2)
        # The following looks a little bit awkward because of the need for
        # numerical stability:
        p = np.log(pi)
        p += np.array([pymc.normal_like(value, mu[i], tau[i])
                       for i in range(n)])
        p = math.fsum(np.exp(p))
        # logp should never be negative, but it can be zero...
        if p <= 0.:
            return -np.inf
        return gamma * math.log(p)
Beispiel #24
0
 def obs(f=vars['rate_stoch'],
         age_indices=age_indices,
         age_weights=age_weights,
         value=np.log(dm.value_per_1(d)),
         tau=se**-2, data=d):
     f_i = rate_for_range(f, age_indices, age_weights)
     return mc.normal_like(value, np.log(f_i), tau)
Beispiel #25
0
def multi_normal_like(values, vec_mu, tau):
    """logp for multi normal"""
    logp = 0
    for i in range(len(vec_mu)):
        logp += pm.normal_like(values[i, :], vec_mu[i], tau)

    return logp
Beispiel #26
0
def multi_normal_like(values, vec_mu, tau):
    """logp for multi normal"""
    logp = 0
    for i in range(len(vec_mu)):
        logp += pm.normal_like(values[i,:], vec_mu[i], tau)

    return logp
Beispiel #27
0
        def smooth_gamma(gamma=flat_gamma, knots=knots, tau=smoothing**-2):
            # the following is to include a "noise floor" so that level value
            # zero prior does not exert undue influence on age pattern
            # smoothing
            gamma = gamma.clip(pl.log(pl.exp(gamma).mean()/10.), pl.inf)  # only include smoothing on values within 10x of mean

            return mc.normal_like(pl.sqrt(pl.sum(pl.diff(gamma)**2 / pl.diff(knots))), 0, tau)
Beispiel #28
0
    def covariate_constraint(
        mu=vars['mu_age'],
        alpha=vars['alpha'],
        beta=vars['beta'],
        U_all=U_all,
        X_sex_max=X_sex_max,
        X_sex_min=X_sex_min,
        lower=np.log(model.parameters[name]['level_bounds']['lower']),
        upper=np.log(model.parameters[name]['level_bounds']['upper'])):
        log_mu_max = np.log(mu.max())
        log_mu_min = np.log(mu.min())

        alpha = np.array([float(x) for x in alpha])
        if len(alpha) > 0:
            for U_i in U_all:
                log_mu_max += max(0, alpha[U_i].max())
                log_mu_min += min(0, alpha[U_i].min())

        # this estimate is too crude, and is causing problems
        #if len(beta) > 0:
        #    log_mu_max += np.sum(np.maximum(X_max*beta, X_min*beta))
        #    log_mu_min += np.sum(np.minimum(X_max*beta, X_min*beta))

        # but leaving out the sex effect results in strange problems, too
        log_mu_max += X_sex_max * float(beta[sex_index])
        log_mu_min += X_sex_min * float(beta[sex_index])

        lower_violation = min(0., log_mu_min - lower)
        upper_violation = max(0., log_mu_max - upper)
        return mc.normal_like([lower_violation, upper_violation], 0.,
                              1.e-6**-2)
def plot_erroranalysis(data_samples=(10,), dataset_samples=(10,), datasets=200):
    x = dataset_samples
    y = np.empty(x.shape, dtype=np.float)

    for data_sample in data_samples:
        data = np.random.randn(data_sample)

        for i, dataset_sample in enumerate(dataset_samples):
            errors = []
            sl_sum = 0
            pt_sum = 0
            for rep in range(1, 400):
                # Chose two random mu pts
                mu1 = 0
                mu2 = (np.random.rand()-.5)

                # Evaluate true likelihood
                pt1 = pm.normal_like(data, mu=mu1, tau=1**-2)
                pt2 = pm.normal_like(data, mu=mu2, tau=1**-2)

                ptr = pt1 / pt2
                pt_sum += pt1
                pt_sum += pt2

            #print ptr

                # Evaluate synth likelihood
                ps1 = synth_likelihood(data, mu1, 1, dataset_samples=x[i], samples=datasets)
                ps2 = synth_likelihood(data, mu2, 1, dataset_samples=x[i], samples=datasets)

                sl_sum += ps1
                sl_sum += ps2

                pts = ps1 / ps2
            #print pts

                errors.append((pts - ptr)**2)
            print pt_sum
            print sl_sum
            y[i] = np.mean(errors)

        plt.plot(x, y, label='%i' % data_sample)

    plt.xlabel('Number of samples per dataset')
    plt.ylabel('MSE')
    plt.legend()
Beispiel #30
0
 def obs(f=vars['rate_stoch'],
         age_indices=age_indices,
         age_weights=age_weights,
         value=pl.log(dm.value_per_1(d)),
         tau=se**-2,
         data=d):
     f_i = dismod3.utils.rate_for_range(f, age_indices, age_weights)
     return mc.normal_like(value, pl.log(f_i), tau)
Beispiel #31
0
def get_likelihood_M0(map_M0, x, pwr, sigma, obstype):
    tau = 1.0 / (sigma ** 2)
    A0 = get_variables_M0(map_M0)[0:3]
    A0 = curve_fit_M0(x, pwr, A0, sigma)
    if obstype == '.logiobs':
        return pymc.normal_like(pwr, get_spectrum_M0(x, A0), tau)
    else:
        return pymc.lognormal_like(pwr, get_spectrum_M0(x, A0), tau)
Beispiel #32
0
    def r_like(b1=beta1_summ, n=obs_summ['n']):
        """Likelihood for correlation coefficients of summarized data"""
        # Convert slope to r
        rho = b1 * stdev_phe / stdev_iq

        # Fisher transformation to allow for normality assumption
        eps = np.arctan(rho) - np.arctan(obs_summ['correlation'])
        # Difference should be mean-zero
        return normal_like(eps, mu=np.zeros(len(n)), tau=n - 3)
Beispiel #33
0
    def r_like(b1=beta1_summ, n=obs_summ['n']):
        """Likelihood for correlation coefficients of summarized data"""
        # Convert slope to r
        rho = b1*stdev_phe/stdev_iq

        # Fisher transformation to allow for normality assumption
        eps = np.arctan(rho) - np.arctan(obs_summ['correlation'])
        # Difference should be mean-zero
        return normal_like(eps, mu=np.zeros(len(n)), tau=n-3)
def test_states_single_step():
    """Test custom sampling of mixture states (in isolation).
    """

    np.random.seed(2352523)

    model_true = simple_state_seq_model()

    trans_mat_true = model_true['trans_mat_obs']
    mu_true = model_true['mu'].value
    states_true = model_true['states_rv'].value
    y_obs = model_true['y_rv'].value

    model_test = simple_state_seq_model(y_obs=y_obs)

    mcmc_step = pymc.MCMC(model_test)
    mcmc_step.draw_from_prior()

    mcmc_step.use_step_method(HMMStatesStep, model_test['states_rv'])

    (states_step, ) = mcmc_step.step_method_dict[model_test['states_rv']]
    assert isinstance(states_step, HMMStatesStep)

    #
    # First, let's check that the logp function is working.
    #
    y_logps_0_true = np.array([
        pymc.normal_like(y_obs[t], model_test['mu_vals'][0], 100)
        for t in xrange(model_test['N_obs'])
    ])
    y_logps_1_true = np.array([
        pymc.normal_like(y_obs[t], model_test['mu_vals'][1], 100)
        for t in xrange(model_test['N_obs'])
    ])

    states_step.compute_y_logp()
    y_logps_est = states_step.y_logp_vals

    assert_allclose(y_logps_est[0], y_logps_0_true)
    assert_allclose(y_logps_est[1], y_logps_1_true)

    mcmc_step.sample(2000)

    assert_hpd(mcmc_step.states_rv, states_true, alpha=0.01)
Beispiel #35
0
        def smooth_gamma(gamma=flat_gamma, knots=knots, tau=smoothing**-2):
            # the following is to include a "noise floor" so that level value
            # zero prior does not exert undue influence on age pattern
            # smoothing
            gamma = gamma.clip(
                pl.log(pl.exp(gamma).mean() / 10.),
                pl.inf)  # only include smoothing on values within 10x of mean

            return mc.normal_like(
                pl.sqrt(pl.sum(pl.diff(gamma)**2 / pl.diff(knots))), 0, tau)
Beispiel #36
0
 def yhat(x=self.x,
          y=self.y,
          ysigma=self.ysigma,
          m=self.m,
          sigma=self.intrinsic_sigma):
     yhat = m * x
     return np.sum([
         pymc.normal_like(yhat[i], y[i], 1. / (ysigma[i]**2 + sigma**2))
         for i in range(len(y))
     ])
Beispiel #37
0
        def obs(value=logit_val, logit_se=logit_se,
                X=covariates(d),
                alpha=alpha, beta=beta, gamma=gamma, sigma=sigma,
                age_indices=age_indices,
                age_weights=age_weights):

            # calculate study-specific rate function
            mu = predict_logit_rate(X, alpha, beta, gamma)
            mu_i = rate_for_range(mu, age_indices, age_weights)
            
            tau_i = 1. / (sigma**2 + logit_se**2)
            logp = mc.normal_like(x=value, mu=mu_i, tau=tau_i)
            return logp
Beispiel #38
0
      def model(params=params, vars=vars, paramnames=paramnames, filters=filters,
            value=1.0):
         # Set the parameters in the model
         for i,param in enumerate(paramnames):
            if debug:
               print "setting ",param, " to ",params[i]
            self.model.parameters[param] = params[i]

         logp = 0
         numpts = 0
         for i,f in enumerate(filters):
            mod,err,mask = self.model(f, self.sn.data[f].MJD)
            m = mask*self.sn.data[f].mask
            if not np.sometrue(m):
               continue
            numpts += np.sum(m)
            tau = np.power(vars[i] + np.power(self.sn.data[f].e_mag,2),-1)
            logp += pymc.normal_like(self.sn.data[f].mag[m],mod[m],tau[m])
         #if numpts < len(paramnames):
         #   return -np.inf
         return logp
Beispiel #39
0
 def parent_similarity(mu_child=mu_child, mu_parent=mu_parent,
                       tau=tau):
     log_mu_child = pl.log(mu_child.clip(offset, pl.inf))
     log_mu_parent = pl.log(mu_parent.clip(offset, pl.inf))
     return mc.normal_like(log_mu_child, log_mu_parent, tau)
Beispiel #40
0
 def gamma_potential(gamma=gamma, mu_gamma=mu_gamma, tau_gamma=1./sigma_gamma[param_mesh]**2, param_mesh=param_mesh):
     return mc.normal_like(gamma[param_mesh], mu_gamma[param_mesh], tau_gamma)
Beispiel #41
0
 def gamma_potential(gamma=gamma, mu_gamma=mu_gamma, tau_gamma=1./sigma_gamma[param_mesh]**2, param_mesh=param_mesh):
     return mc.normal_like(gamma[param_mesh], mu_gamma[param_mesh], tau_gamma)
Beispiel #42
0
def X_obs(value=X_obs_vals, mu=X, tau=1.):
    """Data"""
    return normal_like(value, mu[::obs_interval], tau)
Beispiel #43
0
 def delta_pot(delta=delta, mu=mu_delta, tau=sigma_delta**-2):
     return mc.normal_like(delta, mu, tau)
Beispiel #44
0
 def mu_potential(mu1=rate_vars['unbounded_rate'], mu2=rate_vars['rate_stoch']):
     return mc.normal_like(mu1, mu2, .0001**-2)
Beispiel #45
0
 def mu_potential(mu1=rate_vars['unbounded_rate'],
                  mu2=rate_vars['rate_stoch']):
     return mc.normal_like(mu1, mu2, .0001**-2)
Beispiel #46
0
 def alpha_potential(alpha=alpha[i], mu=old_alpha_i.parents['mu'], tau=old_alpha_i.parents['tau']):
     return mc.normal_like(alpha, mu, tau)
Beispiel #47
0
 def my_trunc_norm(value=value, mu=mu, tau=tau, a=a, b=b):
     if a <= value <= b:
         return mc.normal_like(value, mu, tau)
     else:
         return -np.inf
Beispiel #48
0
 def alpha_potential(alpha=alpha[i], mu=old_alpha_i.parents['mu'], tau=old_alpha_i.parents['tau']):
     return mc.normal_like(alpha, mu, tau)
Beispiel #49
0
 def my_trunc_norm(value=value, mu=mu, tau=tau, a=a, b=b):
     if a <= value <= b:
         return mc.normal_like(value, mu, tau)
     else:
         return -np.inf
Beispiel #50
0
 def emp_prior_potential(f=rate_stoch, mu=emp_prior["mu"], tau=1.0 / np.array(emp_prior["se"]) ** 2):
     return mc.normal_like(f, mu, tau)
Beispiel #51
0
 def smooth_across_regions(rate_list=rate_stochs):
     logp = 0.
     for ii in range(len(rate_list)):
         for jj in range(ii+1, len(rate_list)):
             logp += mc.normal_like(np.diff(np.log(rate_list[ii]))-np.diff(np.log(rate_list[jj])), 0., 1./(.1)**2)
     return logp
Beispiel #52
0
 def logp(value, theta2, y, rho):
     mean = y[1] + rho * (theta1 - y[0])
     var = 1. - rho ^ 2
     return normal_like(value, mean, 1. / var)
Beispiel #53
0
 def p_obs(value=p, pi=pi, sigma=sigma, s=s, p_zeta=p_zeta):
     return mc.normal_like(
         pl.log(value[~i_inf] + p_zeta),
         pl.log(pi[~i_inf] + p_zeta),
         1.0 / (sigma ** 2.0 + (s / (value + p_zeta))[~i_inf] ** 2.0),
     )
Beispiel #54
0
def A(value=A_init, mu=-1.*ones(K_max,dtype=float), tau=ones(K_max,dtype=float)):
    """A ~ normal(mu, tau)"""
    return normal_like(value, mu, tau)
Beispiel #55
0
 def deriv_sign_rate(f=rate,
                     age_indices=age_indices,
                     tau=1.e14,
                     deriv=deriv, sign=sign):
     df = pl.diff(f[age_indices], deriv)
     return mc.normal_like(pl.absolute(df) * (sign * df < 0), 0., tau)
Beispiel #56
0
def y_i(value=y, mu=y_hat, tau=tau_y):
    return pymc.normal_like(value, mu, tau)
 def output(value=y, model_output=model_output, sigma=sigma, gamma=gamma):
     return gamma * pm.normal_like(y, model_output, 1. / (sigma ** 2.))
Beispiel #58
0
 def parent_similarity(mu_child=mu_child, mu_parent=mu_parent, tau=tau):
     log_mu_child = np.log(mu_child.clip(offset, np.inf))
     log_mu_parent = np.log(mu_parent.clip(offset, np.inf))
     return mc.normal_like(log_mu_child, log_mu_parent, tau)