コード例 #1
0
def elbo_grad(z_sample, mu, sigma_sq, y, X, P, prior_sigma):
    score_mu = (z_sample - mu)/(sigma_sq)
    score_logsigma_sq = (-1/(2*sigma_sq) + np.power((z_sample - mu),2)/(2*np.power(sigma_sq,2))) * sigma_sq
    log_p = np.sum(y * np.log(sigmoid(np.dot(X,z_sample))) + (1-y) * np.log(1-sigmoid(np.dot(X,z_sample))))\
        + np.sum(norm.logpdf(z_sample, np.zeros(P), prior_sigma*np.ones(P)))
    log_q = np.sum(norm.logpdf(z_sample, mu, np.sqrt(sigma_sq)))
    return np.concatenate([score_mu,score_logsigma_sq])*(log_p - log_q)
コード例 #2
0
def test_gauss():
    # sample from N(0,1)

    pdf = lambda x: norm.logpdf(x)
    condpdf = lambda a, b: norm.logpdf(b, loc=a)
    condsamp = lambda a: np.random.normal(loc=a)

    def sampler(x0, niters):
        x = x0
        for _ in xrange(niters):
            x = sample(x, pdf, condpdf, condsamp)
            yield x

    bins = np.linspace(-3, 3, 1000)
    smoothing = 1e-5

    actual_samples = np.random.normal(size=1000)
    actual_hist = hist(actual_samples, bins) + smoothing
    actual_hist /= actual_hist.sum()

    slice_samples = np.array(list(sampler(1.0, 10000))[::10])
    slice_hist = hist(slice_samples, bins) + smoothing
    slice_hist /= slice_hist.sum()

    assert KL_approx(actual_hist, slice_hist, bins[1] - bins[0]) <= 0.1
コード例 #3
0
ファイル: dostuff.py プロジェクト: adrn/BruceWillis
def ln_posterior(p, coordinate, obs_vlos, err_vlos):
    pm_ra,pm_dec,vlos = p

    vgal = gc.vhel_to_gal(coordinate,
                          pm=(pm_ra*u.mas/u.yr,pm_dec*u.mas/u.yr),
                          rv=vlos*u.km/u.s,
                          vcirc=vcirc, vlsr=vlsr, galactocentric_frame=gc_frame)
    vtot = np.sqrt(np.sum(vgal**2)).to(u.km/u.s).value
    return norm.logpdf(vtot, loc=0., scale=sigma_halo) + norm.logpdf(vlos, loc=obs_vlos, scale=err_vlos)
コード例 #4
0
def get_normal_example(sample_count):
    loc = 1.0
    scale = 2.0
    samples0 = norm.rvs(loc, scale, sample_count)
    samples1 = norm.rvs(loc, scale, sample_count)
    scores0 = norm.logpdf(samples0, loc, scale)
    scores1 = norm.logpdf(samples1, loc, scale)
    samples = numpy.array(zip(samples0, samples1))
    scores = scores0 + scores1
    return {'name': 'normal', 'samples': samples, 'scores': scores}
コード例 #5
0
def test_aic_posterior_dependence():
    d = norm.rvs(size=1000)
    p = norm.logpdf(d)
    p2 = norm.logpdf(d, scale=2)
    c = ChainConsumer()
    c.add_chain(d, posterior=p, num_free_params=1, num_eff_data_points=1000)
    c.add_chain(d, posterior=p2, num_free_params=1, num_eff_data_points=1000)
    aics = c.comparison.aic()
    assert len(aics) == 2
    assert aics[0] == 0
    expected = 2 * np.log(2)
    assert np.isclose(aics[1], expected, atol=1e-3)
コード例 #6
0
def test_dic_posterior_dependence():
    d = norm.rvs(size=1000000)
    p = norm.logpdf(d)
    p2 = norm.logpdf(d, scale=2)
    c = ChainConsumer()
    c.add_chain(d, posterior=p)
    c.add_chain(d, posterior=p2)
    bics = c.comparison.dic()
    assert len(bics) == 2
    assert bics[1] == 0
    dic1 = 2 * np.mean(-2 * p) + 2 * norm.logpdf(0)
    dic2 = 2 * np.mean(-2 * p2) + 2 * norm.logpdf(0, scale=2)
    assert np.isclose(bics[0], dic1 - dic2, atol=1e-3)
コード例 #7
0
ファイル: adjacency.py プロジェクト: slinderman/graphistician
    def log_prior(self):
        """
        Compute the prior probability of F, mu0, and lmbda
        """
        lp  = 0

        # Log prior of F under spherical Gaussian prior
        from scipy.stats import norm
        lp += norm.logpdf(self.L, 0, np.sqrt(self.sigma)).sum()

        # Log prior of mu_0 and mu_self
        lp += norm.logpdf(self.mu_0, 0, 1)
        lp += norm.logpdf(self.mu_self, 0, 1)
        return lp
コード例 #8
0
ファイル: run.py プロジェクト: dessn/sn-bhm
def calculate_bias(chain_dictionary, supernovae, cosmologies, return_mbs=False):
    supernovae = supernovae[supernovae[:, 6] > 0.0]
    supernovae = supernovae[supernovae[:, 0] < 10.3]
    masses = np.ones(supernovae.shape[0])
    redshifts = supernovae[:, 0]
    apparents = supernovae[:, 1]
    stretches = supernovae[:, 2]
    colours = supernovae[:, 3]
    smear = supernovae[:, 4]
    apparents += smear
    # return np.ones(chain_dictionary["weight"].shape)
    existing_prob = norm.logpdf(colours, 0, 0.1) + norm.logpdf(stretches, 0, 1) + norm.logpdf(smear, 0, 0.1)

    weight = []
    for i in range(chain_dictionary["mean_MB"].size):
        om = np.round(chain_dictionary["Om"][i], decimals=3)
        key = "%0.3f" % om
        mus = cosmologies[key](redshifts)

        dscale = chain_dictionary["dscale"][i]
        dratio = chain_dictionary["dratio"][i]
        redshift_pre_comp = 0.9 + np.power(10, 0.95 * redshifts)
        mass_correction = dscale * (1.9 * (1 - dratio) / redshift_pre_comp + dratio)
        mass_correction = 0
        mabs = apparents - mus + chain_dictionary["alpha"][i] * stretches - chain_dictionary["beta"][i] * colours + mass_correction * masses

        mbx1cs = np.vstack((mabs, stretches, colours)).T
        chain_MB = chain_dictionary["mean_MB"][i]
        chain_x1 = chain_dictionary["mean_x1"][i]
        chain_c = chain_dictionary["mean_c"][i]
        chain_sigmas = np.array([chain_dictionary["sigma_MB"][i], chain_dictionary["sigma_x1"][i], chain_dictionary["sigma_c"][i]])
        chain_sigmas_mat = np.dot(chain_sigmas[:, None], chain_sigmas[None, :])
        chain_correlations = np.dot(chain_dictionary["intrinsic_correlation"][i], chain_dictionary["intrinsic_correlation"][i].T)
        chain_pop_cov = chain_correlations * chain_sigmas_mat
        chain_mean = np.array([chain_MB, chain_x1, chain_c])

        chain_prob = multivariate_normal.logpdf(mbx1cs, chain_mean, chain_pop_cov)
        reweight = logsumexp(chain_prob - existing_prob)
        if reweight < 1:
            for key in chain_dictionary.keys():
                print(key, chain_dictionary[key][i])
        weight.append(reweight)

    weights = np.array(weight)
    if return_mbs:
        mean_mb = chain_dictionary["mean_MB"] - chain_dictionary["alpha"] * chain_dictionary["mean_x1"] + \
                  chain_dictionary["beta"] * chain_dictionary["mean_c"]
        return weights, mean_mb
    return weights
コード例 #9
0
ファイル: gbn.py プロジェクト: jcasademont/datacenter
    def proba(self, name, data, given):
        if self.mean_ is None or self.covariance_ is None:
            self.compute_mean_cov_matrix()

        ll = 0
        Q = self.precision_
        idx = self.indices[name]
        evidence_indices = np.array([self.indices[n] for n in given if n in self.variables_names], dtype=int)

        indices = list(filter(lambda x: x not in evidence_indices,
                                np.arange(Q.shape[0])))

        new_indices = np.array(np.append(indices, evidence_indices), dtype=int)

        pos = np.where(np.array(indices) == idx)[0][0]
        _Q = (Q[new_indices, :])[:, new_indices]

        lim_a = np.size(indices)
        Qaa = _Q[:lim_a, :lim_a]
        Qab = _Q[:lim_a, lim_a:]

        iQaa = inv(Qaa)

        mean_a = self.mean_[indices]
        mean_b = self.mean_[evidence_indices]

        std = self.nodes[name][1]

        mean = mean_a - (np.dot(iQaa,
            np.dot(Qab, (data[:, evidence_indices] - mean_b).T))).T
        ll = np.sum(norm.logpdf(data[:, idx], mean[:, pos], std))

        return ll
コード例 #10
0
ファイル: mlfmmixsa.py プロジェクト: danieljtait/pydygp
    def _get_responsibilities(self, pi, g, beta, mu_ivp, alpha):
        """ Gets the posterior responsibilities for each comp. of the mixture.
        """
        probs = [[]]*len(self.N_data)
        for i, ifx in enumerate(self._ifix):

            zM = self._forward(g, beta, mu_ivp[i], ifx)

            for q, yq in enumerate(self.Y_train_):
                logprob = norm.logpdf(
                    yq, zM[self.data_inds[q], :, q], scale=1/np.sqrt(alpha))

                # sum over the dimension component
                logprob = logprob.sum(-1)

                if probs[q] == []:
                    probs[q] = logprob

                else:
                    probs[q] = np.column_stack((probs[q], logprob))
        probs = [lp - pi for lp in probs]
        # subtract the maxmium for exponential normalize
        probs = [p - np.atleast_1d(p.max(axis=-1))[:, None]
                 for p in probs]
        probs = [np.exp(p) / np.exp(p).sum(-1)[:, None] for p in probs]

        return probs
コード例 #11
0
ファイル: metrics.py プロジェクト: NICTA/revrand
def mll(y_true, y_pred, y_var):
    """
    Mean log loss under a Gaussian distribution.

    Parameters
    ----------
    y_true: ndarray
        vector of true targets
    y_pred: ndarray
        vector of predicted targets
    y_var: float or ndarray
        predicted variances

    Returns
    -------
    float:
        The mean negative log loss (negative log likelihood)

    Example
    -------
    >>> y_true = np.random.randn(100)
    >>> mean_prob = - norm.logpdf(1e-2, loc=0)  # -ve log prob close to mean
    >>> mll(y_true, y_true, 1) <= mean_prob  # Should be good predictor
    True
    >>> mll(y_true, np.random.randn(100), 1) >= mean_prob  # naive predictor
    True
    """

    return - norm.logpdf(y_true, loc=y_pred, scale=np.sqrt(y_var)).mean()
コード例 #12
0
ファイル: non_stationary.py プロジェクト: jmetzen/skgp
    def log_prior(self, theta):
        """ Returns the (log) prior probability of parameters theta.

        TODO

        NOTE: The returned quantity is an improper prior as its integral over
              the parameter space is not equal to 1.

        Parameters
        ----------
        theta : array_like
            An array giving the autocorrelation parameter(s).

        Returns
        -------
        log_p : float
            The (log) prior probability of parameters theta. An improper
            probability.
        """
        theta_gp, theta_nn = self._parse_theta(theta)

        if self.prior_nn_scale == np.inf:
            prior_nn = 0.0
        else:
            prior_nn = norm.logpdf(theta_nn, scale=self.prior_nn_scale).sum()

        if self.prior_gp_scale == np.inf:
            prior_gp = 0.0
        else:
            prior_gp = expon.logpdf(theta_gp, scale=self.prior_gp_scale).sum()

        return prior_nn + prior_gp
コード例 #13
0
ファイル: mcmc_lm.py プロジェクト: imeru/mcmc_ep
def likelihood(param):
    ROOF = param[0]
    WALL = param[1]
    WIN = param[2]
    SHGC = param[3]
    EPD = param[4]
    LPD = param[5]
    HSP = param[6]
    CSP = param[7]
    OCC = param[8]
    INF = param[9]
    Boiler = param[10]
    COP = param[11]
    
    """
    #---------------------------------
    #to do: get the EUI from energyplus
    prediction = -0.2177929 - 0.2458677 * ROOF - 0.3842544 * WALL - 0.0049753 * WIN  \
        + 0.0176093 * SHGC + 0.014322 * EPD + 0.0125891 * LPD + 0.0518334 * HSP \
        +0.0026691 * CSP + 0.0003318 * OCC + 0.5664563* INF - 0.6163375 * Boiler \
        - 0.0425599 * COP
    #---------------------------------
    """
    prediction = param[12]  # To read prediction value from run_metropolis_MCMC function
    singlelikelihoods = norm.logpdf(x=y, loc=prediction, scale=sd)
    #sumll = sum(singlelikelihoods)
    return singlelikelihoods
コード例 #14
0
    def uncollapsed_likelihood(self, X, parameters):
        """
        Calculates the score of the data X under this component model with mean 
        mu and precision rho. 
        Inputs:
            X: A column of data (numpy)
            parameters: a dict with the following keys
                mu: the Gaussian mean
                rho: the precision of the Gaussian
        """
        check_data_type_column_data(X)
        check_model_params_dict(parameters)

        mu = parameters['mu']
        rho = parameters['rho']
    
        N = float(len(X))
        
        hypers = self.get_hypers()
        s = hypers['s']
        r = hypers['r']
        nu = hypers['nu']
        m = hypers['mu']
        
        sum_err = numpy.sum((mu-X)**2.0)
            
        log_likelihood = self.log_likelihood(X, {'mu':mu, 'rho':rho})   
        log_prior_mu = norm.logpdf(m, (r/rho)**.5)
        log_prior_rho = -(nu/2.0)*LOG_2+(nu/2.0)*math.log(s)+ \
            (nu/2.0-1.0)*math.log(rho)-.5*s*rho-math.lgamma(nu/2.0)
            
        log_p = log_likelihood + log_prior_mu + log_prior_rho
        
        return log_p
コード例 #15
0
ファイル: laplaceCg.py プロジェクト: zuai/preconditioned_GPs
	def __init__(self, K, Y, init=None, threshold=1e-9):
		
		N = np.shape(K)[0]
		f = np.zeros((N,1))
		converged = False
		k = 0
		innerC = 0

		for i in xrange(N):
			pdfDiff = norm.logpdf(f) - norm.logcdf(Y*f)
			W = np.exp(2*pdfDiff) + Y*f*np.exp(pdfDiff)
			Wsqrt = np.sqrt(W)
			Wdiag= np.diag(Wsqrt.flatten())

			B = np.identity(N) + np.dot(Wdiag, np.dot(K, Wdiag))
			grad = Y*np.exp(pdfDiff)
			b = W*f + grad
			interim = np.dot(Wdiag, np.dot(K, b))

			cgRes = Cg(B, interim, threshold=threshold)
			s1 = cgRes.result
			innerC = innerC + cgRes.iterations
			a = b - Wsqrt*s1

			if(converged):
				break
			f_prev = f
			f = np.dot(K, a)
			diff = f - f_prev
			if (np.dot(diff.T,diff).flatten() < threshold*N or innerC>15000):
				converged = True
			k = k+1

		self.result = f
		self.iterations = k + innerC
コード例 #16
0
def _logp_w(I, V, W, Y, mu_W, sigma_W):
    
    # Indices and lengths
    idx_done = I == 2
    idx_traded_away = I == 1
    idx_not_traded = I == 0
    _, l = W.shape
    
    # Done case: Y < min(V, W) => min(W) > Y
    W_lower = (Y[idx_done] - mu_W) / sigma_W
    logp_done = truncnorm_logpdf(
            W[idx_done], W_lower, np.inf, loc=mu_W, scale=sigma_W)
    
    # Traded away case: min(W) < min(Y, V) = m (Working)
    m = np.minimum(Y[idx_traded_away], V[idx_traded_away])
    F_0_m = 1 - (1 - norm.cdf(m, mu_W, sigma_W))
    logp_traded_away = np.sum(np.log(1. / F_0_m * l)
            + norm.logpdf(W[idx_traded_away, 0], loc=mu_W, scale=sigma_W)
            + np.log((1 - norm.cdf(W[idx_traded_away, 0], loc=mu_W, scale=sigma_W))**(l-1)))
    

    # Not traded case: min(W) > V (and Y > V)
    W_lower = (V[idx_not_traded] - mu_W) / sigma_W
    logp_not_traded = truncnorm_logpdf(
            W[idx_not_traded], W_lower, np.inf, loc=mu_W, scale=sigma_W)
    
    return logp_done + logp_traded_away + logp_not_traded
コード例 #17
0
ファイル: mlfmmixsa.py プロジェクト: danieljtait/pydygp
    def loglikelihood(self, g, beta, mu_ivp, alpha, pi, priors):
        
        logprobs = []
        for i, ifx in enumerate(self._ifix):
            # get the logprobability for each mixture component
            ll = 0.
            
            zM = self._forward(g, beta, mu_ivp[i], ifx)
            for q, yq in enumerate(self.Y_train_):
                ll += norm.logpdf(
                    yq, zM[..., q], scale=1/np.sqrt(alpha)).sum()

            logprobs.append(ll + np.log(pi[i]))
        logprobs = np.array(logprobs)

        lpmax = max(logprobs)

        loglik = lpmax + np.log(np.exp(logprobs - lpmax).sum())

        Cg = self.latentforces[0].kernel(self.ttc[:, None])
        Cg[np.diag_indices_from(Cg)] += 1e-5
        Lg = np.linalg.cholesky(Cg)
        logprior = -0.5 * g.dot(cho_solve((Lg, True), g)) - \
                   np.log(np.diag(Lg)).sum() - \
                   Lg.shape[0] / 2 * np.log(2 * np.pi)


        for vn, x in zip(['beta'], beta):
            try:
                prior_logpdf = priors[vn]
                logprior += prior_logpdf(x)
            except KeyError:
                pass

        return loglik + logprior
コード例 #18
0
def log_liklihood(logL0, a,b, B_l, sigma,M, z, logRich):
    p = 0
    #mass function
    p+= np.sum(log_n_approx(M,z))#not normalized, if that's a problem i can approximate it.
    #lillihood of richness
    p+=np.sum(norm.logpdf(logRich, loc =logLam(logL0, a, b, B_l, M, z), scale = sigma))
    return p
コード例 #19
0
ファイル: norm.py プロジェクト: ronrest/pyrpy
def dnorm(x, mean=0, sd=1, log=False):
    """
    ============================================================================
                                                                         dnorm()
    ============================================================================
    Density Function for the normal distribution.
    Returns the probability density value at the value x.

    USAGE:
    cnorm(mean=0, sd=1, type="equal", conf=0.95)
    dnorm(x, mean=0, sd=1, log=False)
    pnorm(q, mean=0, sd=1, lowertail=True, log=False)
    qnorm(p, mean=0, sd=1, lowertail=True, log=False)
    rnorm(n=1, mean=0, sd=1)

    :param x (float, array of floats): The value(s) of x
    :param mean (float): mean of the distribution
    :param sd (float):   standard deviation
    :param log (bool):   take the log?
    :return:        returns an array of density values
    ============================================================================
    """
    if log:
        return norm.logpdf(x, loc=mean, scale=sd)
    else:
        return norm.pdf(x, loc=mean, scale=sd)
コード例 #20
0
ファイル: mlfmsamoe2.py プロジェクト: danieljtait/pydygp
    def log_likelihood(self, g, beta,
                       mu_ivp, alpha_prec, pi,
                       ifix):

        # Has to be a nicer way of doing this using slices
        inds = [np.concatenate([self.data_inds[q] + k*self.dim.N
                                for k in range(self.dim.K)])
                for q in range(len(self.y_train_))]
        
        logps = []
        for m, ifx in enumerate(ifix):

            Km = self._K(g, beta, ifx)
            means = np.kron(mu_ivp[:, m, :], np.ones(self.dim.N)).T

            L = np.linalg.matrix_power(Km, self.order)
            L = L.dot(means)

            # sum over the experiments
            logp_m = 0.

            for q, y in enumerate(self.y_train_):
                inds_q = inds[q]
                Lim_q = L[inds_q, q]
                eta = Lim_q - y

                logp_m += norm.logpdf(eta,
                                      scale=1/np.sqrt(alpha_prec)).sum()
            logps.append(logp_m + np.log(pi[m]))

        logps = np.array(logps)
        a = logps.max()

        res = a + np.log(np.exp(logps - a).sum())
        return res
コード例 #21
0
def test_KL_divergence_for_normal_distributions(show_plot=True):

	mu_0 = 0
	sigma_0 = 1

	interval = norm.interval(.99,mu_0,sigma_0)

	support = numpy.linspace(interval[0], interval[1], num=2000)

	mus = numpy.linspace(0, 3, num=30)

	p_0 = norm.logpdf(support, mu_0, sigma_0)

	KL_inf = []
	KL_ana = []

	for mu in mus:
		p_1 = norm.logpdf(support, mu, sigma_0)

		kld = qtu.KL_divergence_arrays(support, p_0, p_1, False)

		KL_inf.append(float(kld))
		KL_ana.append(actual_KL(mu_0, sigma_0, mu, sigma_0))


	KL_inf = numpy.array(KL_inf)
	KL_ana = numpy.array(KL_ana)
	KL_diff = KL_ana-KL_inf


	if show_plot:
		pylab.subplot(1,2,1)
		pylab.plot(KL_inf, label='est')
		pylab.plot(KL_ana, label='analytical')
		pylab.title('estimated KL')
		pylab.legend()

		pylab.subplot(1,2,2)
		pylab.plot(KL_diff)
		pylab.title('KL error')

		pylab.show()


	_, p = pearsonr(KL_inf, KL_ana)

	return p
コード例 #22
0
def loglikelihood(X, Z, W):
  ZW = Z.dot(W.T)
  LL = 0
  for i in xrange(N):
    ll = mvn.logpdf(X[i], mean=ZW[i], cov=sigmaI)
    LL += ll
  LL += norm.logpdf(W.flatten(), scale=1/lam).sum()
  return LL
コード例 #23
0
def test_aic_0():
    d = norm.rvs(size=1000)
    p = norm.logpdf(d)
    c = ChainConsumer()
    c.add_chain(d, posterior=p, num_free_params=1, num_eff_data_points=1000)
    aics = c.comparison.aic()
    assert len(aics) == 1
    assert aics[0] == 0
コード例 #24
0
def test_bic_fail_no_data_points():
    d = norm.rvs(size=1000)
    p = norm.logpdf(d)
    c = ChainConsumer()
    c.add_chain(d, posterior=p, num_free_params=1)
    bics = c.comparison.bic()
    assert len(bics) == 1
    assert bics[0] is None
コード例 #25
0
def log_liklihood(logL0, a,b, B_l, sigma, z, logRich):

    logPRich = np.array([norm.logpdf(lr, loc =logLam(logL0, a, b, B_l, z, ms), scale = sigma)\
                         for lr, ms in izip(logRich, massSamples)])

    logL_k = logsumexp(logPRich+logPMass-logPSample, axis = 1) - np.log(nSamples)#mean of weights

    return np.sum(logL_k)
コード例 #26
0
ファイル: cairs_test_suit.py プロジェクト: krejcmat/pycairs
def log_p_MWL(S, I):
    mu = I / log_p_MWLLen
    sigma = 0.005

    # # log of normal density, p(S|R)
    # The probability density function for norm is:
    # norm.pdf(x) = exp(-x**2/2)/sqrt(2*pi)
    return norm.logpdf(np.float(S), loc=np.float(mu), scale=np.float(sigma))
コード例 #27
0
def test_dic_0():
    d = norm.rvs(size=1000)
    p = norm.logpdf(d)
    c = ChainConsumer()
    c.add_chain(d, posterior=p)
    dics = c.comparison.dic()
    assert len(dics) == 1
    assert dics[0] == 0
コード例 #28
0
ファイル: basic_mcmc.py プロジェクト: ThomasDing2016/bdapy
def metropolis_bivariate(y1, y2, start_theta1, start_theta2, num_samples, 
                         include_start=True, include_loglik=False):
    if include_start:
        theta1_samples = [start_theta1]
        theta2_samples = [start_theta2]
    else:
        theta1_samples = []
        theta2_samples = []
        
    loglik_samples = []
    current_theta1 = start_theta1
    current_theta2 = start_theta2
    # somehow the book is using unnormalized log probability. don't know why.
    current_log_prob = norm.logpdf((current_theta1,current_theta2),loc=(0,0),scale=(1,1)).sum() \
            - norm.logpdf((0,0),loc=(0,0),scale=(1,1)).sum()
    
    for i in xrange(num_samples):
        proposal_theta1, proposal_theta2 = np.random.normal(loc=(current_theta1, current_theta2),
                                                            scale=(0.2,0.2))
        proposal_log_prob = norm.logpdf((proposal_theta1,proposal_theta2),
                                         loc=(0,0),scale=(1,1)).sum() \
                            - norm.logpdf((0,0),
                                         loc=(0,0),scale=(1,1)).sum()
        if proposal_log_prob > current_log_prob:
            flag_accept = True
        else:
            acceptance_prob = np.exp(proposal_log_prob - current_log_prob)
            if np.random.random() < acceptance_prob:
                flag_accept = True
            else:
                flag_accept = False
        
        if flag_accept:
            current_theta1 = proposal_theta1
            current_theta2 = proposal_theta2
            current_log_prob = proposal_log_prob
            
        theta1_samples.append(current_theta1)
        theta2_samples.append(current_theta2)
        loglik_samples.append(current_log_prob)
    
    if include_loglik:
        return theta1_samples, theta2_samples, loglik_samples
    else:
        return theta1_samples, theta2_samples
コード例 #29
0
    def test_one_dim(self):
        """ Test log pdf for one dimensional data """
        x = np.array([1])
        mean = np.array([2])
        Q  = np.array([[ 1 / 25 ]])

        gmrf = GMRF()
        self.assertAlmostEqual(gmrf._logpdf(x, mean, Q),
                               norm.logpdf(1, 2, 5))
コード例 #30
0
ファイル: pysolve.py プロジェクト: calonlay-hj/ppaml-cp4
def _compute_ev_det_loglike(physics, event, det):

  # load the station tuple and compute basic event-station attributes like
  # distance, travel time, azimuth, and azimuth difference

  stanum = det.stanum
  station = STATIONS[stanum]
  
  dist = compute_distance((station.lon, station.lat),
                          (event.lon, event.lat))

  ttime = compute_travel_time(dist)
  
  sta_to_ev_az = compute_azimuth((station.lon, station.lat),
                                 (event.lon, event.lat))

  # the azimuth difference of observed to theoretical
  degdiff = compute_degdiff(sta_to_ev_az, det.azimuth)
  
  loglike = 0

  # detection probability
  
  detprob = logistic(physics.mu_d0[stanum]
                     + physics.mu_d1[stanum] * event.mag
                     + physics.mu_d2[stanum] * dist)

  loglike += log(detprob)

  # detection time

  loglike += laplace.logpdf(det.time,
                            event.time + ttime + physics.mu_t[stanum],
                            physics.theta_t[stanum])


  # detection azimuth
  
  loglike += laplace.logpdf(degdiff, physics.mu_z[stanum],
                            physics.theta_z[stanum])

  # detection slowness

  loglike += laplace.logpdf(det.slowness,
                            compute_slowness(dist) + physics.mu_s[stanum],
                            physics.theta_s[stanum])

  # detection amplitude

  loglike += norm.logpdf(log(det.amp),
                         physics.mu_a0[stanum]
                         + physics.mu_a1[stanum] * event.mag
                         + physics.mu_a2[stanum] * dist,
                         physics.sigma_a[stanum])
  
  return loglike
コード例 #31
0
ファイル: blm.py プロジェクト: lmc2179/blm
 def _loglikelihood(self, v):
     coef, sigma = v[:-1], v[-1]
     y_hat = np.dot(coef, self.X_.T)
     return np.sum(norm.logpdf(self.y_, y_hat, sigma))
コード例 #32
0
.. figure::     ../../examples/resources/table_comparison.png
    :align:     center
    :width:     60%

"""

###############################################################################
# The code to produce this, and the raw LaTeX, is given below:

from scipy.stats import norm
from chainconsumer import ChainConsumer

n = 10000
d1 = norm.rvs(size=n)
p1 = norm.logpdf(d1)
p2 = norm.logpdf(d1, scale=1.1)

c = ChainConsumer()
c.add_chain(d1,
            posterior=p1,
            name="Model A",
            num_eff_data_points=n,
            num_free_params=4)
c.add_chain(d1,
            posterior=p2,
            name="Model B",
            num_eff_data_points=n,
            num_free_params=5)
c.add_chain(d1,
            posterior=p2,
コード例 #33
0
ファイル: carbon.py プロジェクト: dsteinberg/carbon
def negative_log_proba(y_true, y_pred, s_pred):
    nlp = -norm.logpdf(y_true, loc=y_pred, scale=s_pred)
    mean_nlp = np.mean(nlp)
    return mean_nlp
コード例 #34
0
ファイル: lpf.py プロジェクト: hpparvi/PyTransit
 def T14(pv):
     pv = atleast_2d(pv)
     a = as_from_rhop(pv[:, 2], pv[:, 1])
     t14 = duration_eccentric(pv[:, 1], sqrt(pv[:, 4]), a,
                              arccos(pv[:, 3] / a), 0, 0, 1)
     return norm.logpdf(t14, mean, std)
コード例 #35
0
ファイル: bohamiann.py プロジェクト: mcgrady20150318/pybnn
    def train_and_evaluate(self,
                           x_train: np.ndarray,
                           y_train: np.ndarray,
                           x_valid: np.ndarray,
                           y_valid: np.ndarray,
                           num_steps: int = 13000,
                           validate_every_n_steps=1000,
                           keep_every: int = 100,
                           num_burn_in_steps: int = 3000,
                           lr: float = 1e-2,
                           epsilon: float = 1e-10,
                           batch_size: int = 20,
                           mdecay: float = 0.05,
                           verbose=False):
        """
        Train and validates the neural network

        :param x_train: input training datapoints.
        :param y_train: input training targets.
        :param x_valid: validation data points
        :param y_valid: valdiation targets
        :param num_steps:  Number of sampling steps to perform after burn-in is finished.
            In total, `num_steps // keep_every` network weights will be sampled.
        :param validate_every_n_steps:
        :param keep_every: Number of sampling steps (after burn-in) to perform before keeping a sample.
            In total, `num_steps // keep_every` network weights will be sampled.
        :param num_burn_in_steps: Number of burn-in steps to perform.
            This value is passed to the given `optimizer` if it supports special
            burn-in specific behavior.
            Networks sampled during burn-in are discarded.
        :param lr: learning rate
        :param batch_size: batch size
        :param epsilon: epsilon for numerical stability
        :param mdecay: momemtum decay
        :param verbose: verbose output

        """
        assert batch_size >= 1, "Invalid batch size. Batches must contain at least a single sample."

        if x_train.shape[0] < batch_size:
            logging.warning(
                "Not enough datapoints to form a batch. Use all datapoints in each batch"
            )
            batch_size = x_train.shape[0]

        # burn-in
        self.train(x_train,
                   y_train,
                   num_burn_in_steps=num_burn_in_steps,
                   num_steps=num_burn_in_steps,
                   lr=lr,
                   epsilon=epsilon,
                   mdecay=mdecay,
                   verbose=verbose)

        learning_curve_mse = []
        learning_curve_ll = []
        n_steps = []
        for i in range(num_steps // validate_every_n_steps):
            self.train(x_train,
                       y_train,
                       num_burn_in_steps=0,
                       num_steps=validate_every_n_steps,
                       lr=lr,
                       epsilon=epsilon,
                       mdecay=mdecay,
                       verbose=verbose,
                       keep_every=keep_every,
                       continue_training=True,
                       batch_size=batch_size)

            mu, var = self.predict(x_valid)

            ll = np.mean(norm.logpdf(y_valid, loc=mu, scale=np.sqrt(var)))
            mse = np.mean((y_valid - mu)**2)
            step = num_burn_in_steps + (i + 1) * validate_every_n_steps

            learning_curve_ll.append(ll)
            learning_curve_mse.append(mse)
            n_steps.append(step)

            if verbose:
                print("Validate : NLL = {:11.4e} MSE = {:.4e}".format(
                    -ll, mse))

        return n_steps, learning_curve_ll, learning_curve_mse
コード例 #36
0
def likelihood(a, b, sd):
    pred = a * x + b  # predictions
    single_likelihood = norm.logpdf(y, loc=pred,
                                    scale=sd)  # loc=mean, scale=var
    return sum(single_likelihood)
コード例 #37
0
ファイル: lpf.py プロジェクト: hpparvi/PyTransit
 def as_prior(pv):
     a = as_from_rhop(pv[2], pv[1])
     return norm.logpdf(a, mean, std)
コード例 #38
0
def sample_and_plot(sess, kld, kl_from_pq, kl_from_cob, p_samples, q_samples, m_samples, log_ratio_p_q, log_ratio_p_m, mu_1, mu_2, scale_p, scale_q, mu_3, scale_m):
    kl_ratio_store=[]
    log_ratio_store=[]
    log_r_p_from_m_direct_store=[]


    feed_dict = {}
    kl_ratio, kl_true, kl_cob, p_s, q_s, m_s, lpq, lpq_from_cob_dre_direct = sess.run([kld, kl_from_pq, kl_from_cob, p_samples, q_samples, m_samples,
                                                                                log_ratio_p_q,  log_ratio_p_m],
                                                                              feed_dict=feed_dict)
    
    
    
    '''Save ratio estimates'''
    data_dir = "../data/sym/"+str(scale_p)+"-"+str(scale_q)+str(scale_m)+"/"
    if not os.path.exists(data_dir):
        os.makedirs(data_dir)
    
    f = open(data_dir+"KLD"+".txt", "a")
    f.write("GT for mu_3 = "+str(mu_3)+": "+str(kl_ratio)+"\nGT-est: "+str(kl_true)+"\nCoB: "+str(kl_cob)+"\n----------\n")
    f.close()
    log_ratio_store.append(lpq)
    log_r_p_from_m_direct_store.append(lpq_from_cob_dre_direct)
    
    pickle.dump(log_r_p_from_m_direct_store, open(data_dir+"log_r_p_from_m_direct_store"+str(mu_3)+".p", "wb"))
    pickle.dump(m_s, open(data_dir+"xs"+str(mu_3)+".p", "wb"))
    pickle.dump(log_ratio_store, open(data_dir+"log_ratio_store"+str(mu_3)+".p", "wb"))
    
    xs = m_s

    fig, [ax1,ax2,ax3, ax4] = plt.subplots(1, 4,figsize=(13,4))
    ax1.hist(p_s, density=True, histtype='stepfilled', alpha=0.8, label='P')
    ax1.hist(q_s, density=True, histtype='stepfilled', alpha=0.8, label='Q')
    ax1.hist(m_s, density=True, histtype='stepfilled', alpha=0.8, label='M')
    ax1.legend(loc='best', frameon=False)
    ax1.set_xlim([-5,5])
    
    ax2.scatter(xs,log_ratio_store[0],label='True p/q',alpha=0.9,s=10.,c='b')
    ax2.scatter(xs,log_r_p_from_m_direct_store[-1][:,0]-log_r_p_from_m_direct_store[-1][:,1],label='CoB p/q',alpha=0.9,s=10.,c='r')
    ax2.scatter(xs,-log_ratio_store[0],label='True q/p',alpha=0.9,s=10.,c='b')
    ax2.scatter(xs,log_r_p_from_m_direct_store[-1][:,1]-log_r_p_from_m_direct_store[-1][:,0],label='CoB q/p',alpha=0.9,s=10.,c='r')

    ax2.set_xlabel("Samples")
    ax2.set_ylabel("Log Ratio")
    ax2.legend(loc='best')
    ax2.set_xlim([-6,10])
    ax2.set_ylim([-1000,1000])
    
    pm = [np.squeeze(norm.logpdf(x,mu_1,scale_p)-cauchy.logpdf(x,mu_3,scale_m)) for x in xs]
    qm = [np.squeeze(norm.logpdf(x,mu_2,scale_q)-cauchy.logpdf(x,mu_3,scale_m)) for x in xs]
    ax4.scatter(xs,pm,label='True p/m',alpha=0.9,s=10.,c='b')
    ax4.scatter(xs,log_r_p_from_m_direct_store[-1][:,0]-log_r_p_from_m_direct_store[-1][:,2],label='CoB p/m',alpha=0.9,s=10.,c='r')
    ax4.scatter(xs,qm,label='True q/m',alpha=0.9,s=10.,c='y')
    ax4.scatter(xs,log_r_p_from_m_direct_store[-1][:,1]-log_r_p_from_m_direct_store[-1][:,2],label='CoB q/m',alpha=0.9,s=10.,c='g')

    ax4.set_xlabel("Samples")
    ax4.set_ylabel("Log Ratio")
    ax4.legend(loc='best')
    ax4.set_xlim([-6,10])
    ax4.set_ylim([-1000,1000])
    
    
    rat = log_r_p_from_m_direct_store[-1][:,0]-log_r_p_from_m_direct_store[-1][:,1]
    d = [np.squeeze(norm.logpdf(x,mu_2,scale_q)) for x in xs]
    b = [np.squeeze(norm.logpdf(x,mu_1,scale_p)) for x in xs]
    ax3.scatter(xs,b,label='True P',alpha=0.9,s=5.)
    ax3.scatter(xs,rat+d,label='P',alpha=0.9,s=5.)

    ax3.set_xlabel("Samples")
    ax3.set_ylabel("Log P(x)")
    ax3.legend(loc='best')
    ax3.set_xlim([-6,10])
    ax3.set_ylim([-600,400])
    
    plt.savefig(data_dir+str(mu_3)+".jpg")
    
    
    
コード例 #39
0
def main(old_samples_tsv,
         old_segmentations_tsv,
         new_samples_tsv,
         benchmark=True):
    """
    Infer and print the new values for mu and sigma (for each of S, L, A, P, C, T) to STDOUT.

    Args:
    * old_samples_tsv: path to TSV file containing polya-samples data from an older kmer model.
    * old_segmentations_tsv: path to TSV file containing polya-segmentation data from an older kmer model.
    * new_samples_tsv: path to TSV file containing polya-samples data from the newer kmer model.

    Returns: N/A, prints outputs to STDOUT.
    """
    ### read all samples into numpy arrays:
    print("Loading data from TSV...")
    old_data = old_tsv_to_numpy(old_samples_tsv)
    segmentations = make_segmentation_dict(old_segmentations_tsv)
    new_data = new_tsv_to_numpy(new_samples_tsv, segmentations)
    print("... Datasets loaded.")

    ### infer best possible new mu,sigma for each of S, L, A, P, T:
    print("Fitting gaussians to new scaled samples (this may take a while)...")
    new_mu_S, new_sigma_S = fit_gaussian(new_data['S_samples'])
    new_mu_L, new_sigma_L = fit_gaussian(new_data['L_samples'])
    (new_pi0_A, new_mu0_A,
     new_sig0_A), (new_pi1_A, new_mu1_A,
                   new_sig1_A) = fit_gmm(new_data['A_samples'], ncomponents=2)
    new_mu_P, new_sigma_P = fit_gaussian(new_data['P_samples'])
    (new_pi0_T, new_mu0_T,
     new_sig0_T), (new_pi1_T, new_mu1_T,
                   new_sig1_T) = fit_gmm(new_data['T_samples'], ncomponents=2)

    ### print to stdout:
    print("New params for START: mu = {0}, var = {1}, stdv = {2}".format(
        new_mu_S, new_sigma_S, np.sqrt(new_sigma_S)))
    print("New params for LEADER: mu = {0}, var = {1}, stdv = {2}".format(
        new_mu_L, new_sigma_L, np.sqrt(new_sigma_L)))
    print("New params for ADAPTER0: pi = {0}, mu = {1}, var = {2}, stdv = {3}".
          format(new_pi0_A, new_mu0_A, new_sig0_A, np.sqrt(new_sig0_A)))
    print("New params for ADAPTER1: pi = {0}, mu = {1}, var = {2}, stdv = {3}".
          format(new_pi1_A, new_mu1_A, new_sig1_A, np.sqrt(new_sig1_A)))
    print("New params for POLYA: mu = {0}, var = {1}, stdv = {2}".format(
        new_mu_P, new_sigma_P, np.sqrt(new_sigma_P)))
    print("New params for TRANSCR0: pi = {0}, mu = {1}, var = {2}, stdv = {3}".
          format(new_pi0_T, new_mu0_T, new_sig0_T, np.sqrt(new_sig0_T)))
    print("New params for TRANSCR1: pi = {0}, mu = {1}, var = {2}, stdv = {3}".
          format(new_pi1_T, new_mu1_T, new_sig1_T, np.sqrt(new_sig1_T)))

    ### optionally, benchmark:
    if not benchmark:
        return

    print("===== Emission Log-Likelihood Benchmarks =====")
    old_S_llh = np.mean(old_data['S_loglkhd'])
    new_S_llh = np.mean(
        norm.logpdf(new_data['S_samples'],
                    loc=new_mu_S,
                    scale=np.sqrt(new_sigma_S)))
    print("> Average START log-probs:")
    print(
        "> Old avg. log-likelihood: {0} | New avg. log-likelihood: {1}".format(
            old_S_llh, new_S_llh))

    old_L_llh = np.mean(old_data['L_loglkhd'])
    new_L_llh = np.mean(
        norm.logpdf(new_data['L_samples'],
                    loc=new_mu_L,
                    scale=np.sqrt(new_sigma_L)))
    print("> Average LEADER log-probs:")
    print(
        "> Old avg. log-likelihood: {0} | New avg. log-likelihood: {1}".format(
            old_L_llh, new_L_llh))

    old_A_llh = np.mean(old_data['A_loglkhd'])
    new_A_llh0 = new_pi0_A * norm.pdf(
        new_data['A_samples'], loc=new_mu0_A, scale=np.sqrt(new_sig0_A))
    new_A_llh1 = new_pi1_A * norm.pdf(
        new_data['A_samples'], loc=new_mu1_A, scale=np.sqrt(new_sig1_A))
    new_A_llh = np.mean(np.log(new_A_llh0 + new_A_llh1))
    print("> Average ADAPTER log-probs:")
    print(
        "> Old avg. log-likelihood: {0} | New avg. log-likelihood: {1}".format(
            old_A_llh, new_A_llh))

    old_P_llh = np.mean(old_data['P_loglkhd'])
    new_P_llh = np.mean(
        norm.logpdf(new_data['P_samples'],
                    loc=new_mu_P,
                    scale=np.sqrt(new_sigma_P)))
    print("> Average POLYA log-probs:")
    print(
        "> Old avg. log-likelihood: {0} | New avg. log-likelihood: {1}".format(
            old_P_llh, new_P_llh))

    old_T_llh = np.mean(old_data['T_loglkhd'])
    new_T_llh0 = new_pi0_T * norm.pdf(
        new_data['T_samples'], loc=new_mu0_T, scale=np.sqrt(new_sig0_T))
    new_T_llh1 = new_pi1_T * norm.pdf(
        new_data['T_samples'], loc=new_mu1_T, scale=np.sqrt(new_sig1_T))
    new_T_llh = np.mean(np.log(new_T_llh0 + new_T_llh1))
    print("> Average TRANSCRIPT log-probs:")
    print(
        "> Old avg. log-likelihood: {0} | New avg. log-likelihood: {1}".format(
            old_T_llh, new_T_llh))
コード例 #40
0
 def __call__(self, new_params, old_params, old_grad_params):
     return np.sum(norm.logpdf(new_params,
                               loc=(old_params + 0.5 * self.dt ** 2 * old_grad_params),
                               scale=self.dt))
コード例 #41
0
ファイル: inference.py プロジェクト: drbenvincent/badapted
 def q_log_pdf(θold, θ):
     # bit of a clusterfuck here about subtracting dataframes as the indicies are unordered
     logpdf = norm.logpdf((θold.values - θ.values) / scale_walk,
                          loc=0,
                          scale=1)
     return np.sum(logpdf, axis=1)
コード例 #42
0
        dPSIL = hour2day(PSIL_hat, idx)[~discard_vod]
        VOD_hat, popt = fitVOD_RMSE(dPSIL, dLAI, VOD_ma, return_popt=True)
        dS1 = hour2day(S1_hat, idx)[~discard_vod][::2]

        if np.isfinite(np.nansum(dS1)) and np.nansum(dS1) > 0:
            counts, bin_edges = np.histogram(dS1, bins=bins, normed=True)
            cdf2 = np.cumsum(counts) / sum(counts)
            dS1_matched = np.array([
                bin_edges[np.abs(cdf1 - cdf2[int(itm * 100)]).argmin()]
                for itm in dS1
            ])
        else:
            dS1_matched = np.zeros(dS1.shape) + np.nan

        loglik_vod = np.nanmean(
            norm.logpdf(VOD_ma, VOD_hat, theta[idx_sigma_vod]))
        loglik_et = np.nanmean(norm.logpdf(ET, ET_hat, theta[idx_sigma_et]))
        loglik_sm = np.nanmean(
            norm.logpdf(SOILM, dS1_matched, theta[idx_sigma_sm]))

        TS = [
            np.concatenate([TS[ii], itm])
            for ii, itm in enumerate((VOD_hat, ET_hat, PSIL_hat, dS1_matched))
        ]
        PARA = [
            np.concatenate([PARA[ii], itm]) for ii, itm in enumerate((
                popt, theta, np.array([loglik_vod, loglik_et, loglik_sm])))
        ]

    TS = [np.reshape(itm, [nsample, -1]) for itm in TS]  # VOD,ET,PSIL,S1
    PARA = [np.reshape(itm, [nsample, -1]) for itm in PARA]
コード例 #43
0
ファイル: frequency.py プロジェクト: maciejjan/morle
 def edge_cost(self, edge: GraphEdge) -> float:
     rule_id = self.rule_set.get_id(edge.rule)
     return -norm.logpdf(edge.source.logfreq - edge.target.logfreq,
                         self.means[rule_id, ], self.sdevs[rule_id, ])
コード例 #44
0
ファイル: normal_trunc.py プロジェクト: wilsondy/cgpm
 def calc_predictive_logp(x, mu, sigma, l, h):
     return norm.logpdf(x, loc=mu, scale=sigma)
コード例 #45
0
def gaussian_new(obs, x, params):
    x_vec = np.sqrt((params.beta**2) * np.exp(x))
    return norm.logpdf(obs, loc=0, scale=x_vec)
# Gaussian distribution (normal distribution ) is a very common continuous probability distribution
# scipy is a fast way to calculate

from scipy.stats import norm
import numpy as np

# probability density of a zero from standart normal
norm.pdf(0)

# if mean = 5 and standart devation = 10
norm.pdf(0, loc=5, scale=10)

# to calculate the probability densities of many diff-t values
# 1st) create an array
r = np.random.randn(10)

# 2nd) calculate the pdf of all values stored in array at the same time
norm.pdf(r)

# we can calculate lognorm
norm.logpdf(r)

# cumulative distribution function (CDF)
# this is the integral of the PDAF from -inf to x
norm.cdf(r)

# and also log CDF
norm.logcdf(r)
#! python3

from scipy.stats import norm
import numpy as np

# Gaussian Probability Density Function
# default mean = 0 and variance = 1
print(norm.pdf(0))

# Gaussian with mean == loc = 5 and standard deviation == scale = 10 == variance = 100
print(norm.pdf(0, loc=5, scale=10))

r = np.random.randn(10)
print(norm.pdf(r))

# Joint log probability
print(norm.logpdf(r))

# Cumulative Distribution function == integral of pdf from -infinity to x
print(norm.cdf(r))
print(norm.logcdf(r))
コード例 #48
0
def model(parm, h, self):
    """
    Calculate the log probability of a model `h`
    [instance of :class:`hmf._framework.Framework`] with parameters ``parm``.

    At the moment, this is a little hacky, because the parameters have to
    be the first argument (for both Minimize and MCMC), so we use a
    function and pass self last.

    Parameters
    ----------
    parm : list of floats
        The position of the model. Takes arbitrary parameters.

    h : instance of :class:`~_framework.Framework`
        An instance of any subclass of :class:`~_framework.Framework` with the
        desired options set. Variables of the estimation are updated within the
        routine.

    Returns
    -------
    ll : float
        The log likelihood of the model at the given position.
    """
    if self.verbose > 1:
        print(("Params: ", list(zip(self.attrs, parm))))

    ll = 0
    p = copy.copy(parm)
    for prior in self.priors:
        if type(prior.name) == list:
            index = [self.attrs.index(name) for name in prior.name]
        else:
            index = self.attrs.index(prior.name)
        ll += prior.ll(parm[index])
    if np.isinf(ll):
        return ret_arg(ll, self.blobs)

    # If it is a log distribution, un-log it for use.
    if isinstance(prior, Log):
        p[index] = 10**parm[index]

    # Rebuild the hod dict from given vals
    # Any attr starting with <name>: is put into a dictionary.
    param_dict = {}
    for attr, val in zip(self.attrs, p):
        if ":" in attr:
            if attr.split(":")[0] not in param_dict:
                param_dict[attr.split(":")[0]] = {}

            param_dict[attr.split(":")[0]][attr.split(":")[1]] = val
        else:
            param_dict[attr] = val

    # Update the actual model
    try:  # This try: except: should capture poor parameter choices quickly.
        h.update(**param_dict)
    except ValueError as e:
        if self.relax:
            print(("WARNING: PARAMETERS FAILED ON UPDATE, RETURNING INF: ",
                   list(zip(self.attrs, parm))))
            print(e)
            print((traceback.format_exc()))
            return ret_arg(-np.inf, self.blobs)
        else:
            print((traceback.format_exc()))
            raise e

    # Get the quantity to compare (if exceptions are raised, treat properly)
    try:
        q = getattr(h, self.quantity)
    except Exception as e:
        if self.relax:
            print((
                "WARNING: PARAMETERS FAILED WHEN CALCULATING QUANTITY, RETURNING INF: ",
                list(zip(self.attrs, parm))))
            print(e)
            print((traceback.format_exc()))
            return ret_arg(-np.inf, self.blobs)
        else:
            print((traceback.format_exc()))
            raise e

    # The logprob of the model
    if self.cov:
        ll += _lognormpdf(q, self.data, self.sigma)
    else:
        ll += np.sum(norm.logpdf(self.data, loc=q, scale=self.sigma))

    # Add the likelihood of the contraints
    for k, v in list(self.constraints.items()):
        ll += norm.logpdf(getattr(h, k), loc=v[0], scale=v[1])
        if self.verbose > 2:
            print(("CONSTRAINT: ", k, getattr(h, k)))

    if self.verbose:
        print(("Likelihood: ", ll))
    if self.verbose > 1:
        print(("Update Dictionary: ", param_dict))
    if self.verbose > 2:
        print(("Final Quantity: ", q))

    # Get blobs to return as well.
    if self.blobs is not None:
        out = []
        for b in self.blobs:
            if ":" not in b:
                out.append(getattr(h, b))
            elif ":" in b:
                out.append(getattr(h, b.split(":")[0])[b.split(":")[1]])
        return ll, out
    else:
        return ll
コード例 #49
0
 def log_likelihood(self, x):
     log_l = np.zeros(x.size)
     for n in self.names:
         log_l += norm.logpdf(x[n])
     return log_l
コード例 #50
0
    def forward_filter(self):

        T         = self.T         # Number of timesteps
        obs_dim   = self.obs_dim   # Dimension of observed data
        state_dim = self.state_dim # Dimension of state vector
        
        if self.obs_discount:
            self.gamma_n = np.zeros(T)
            self.s       = np.zeros(T)
            self.s[0]    = self.s0
           
        else:
            V = self.V # Dimensions of [obs_dim,obs_dim]
            
        self.r = np.zeros(T)       # For unknown obs. variance
        self.e = np.zeros([T,obs_dim])   # Forecast error
        self.f = np.zeros([T,obs_dim])   # Forecast mean
        self.m = np.zeros([T,state_dim])   # State vector/matrix posterior mean
        self.a = np.zeros([T,state_dim])   # State vector/matrix prior mean
        self.Q = np.zeros([T,obs_dim,obs_dim]) # Forecast covariance
        self.A = np.zeros([T,state_dim,obs_dim]) # Adaptive coefficient vector
        self.R = np.zeros([T,state_dim,state_dim]) # State vector prior variance
        self.C = np.zeros([T,state_dim,state_dim]) # State vector posterior variance
        self.B = np.zeros([T,state_dim,state_dim]) # Retrospective ???
        
        # If we want to change the tracked quantities all at once later,
        # it would be handy to be able to reference all of them at the 
        # same time.
        self.dynamic_names = ['F','Y','r' , 'e', 'f' ,'m' ,'a', 'Q', 'A', 'R','C','B']
        
        if self.obs_discount:
            self.dynamic_names = self.dynamic_names + ['gamma_n','s']
        if self.dynamic_G:
            self.dynamic_names = self.dynamic_names + ['G']

        # Forward filtering
        # For each time step, we ingest a new observation and update our priors
        # to posteriors.
        for t in range(T):
            self.t = t
            self.filter_step(t)
                        
        # The last thing we want to do is tabulate the current
        # step's contribution to the overall log-likelihood.
        if self.calculate_ll:
            if self.obs_discount:
                # We need the shape parameters for the preceding time step in the current
                # timestep's calculation of the log likelihood. This just offsets the 
                # vector of shape parameters.
                shifted_gamma = np.roll(np.squeeze(self.gamma_n),1)
                shifted_gamma[0]  = 1.0
                self.log_likelihood = student_t.logpdf(np.squeeze(self.e),
                                                          shifted_gamma,
                                                          scale=np.squeeze(np.sqrt(self.Q)))
            else:
                self.log_likelihood = norm.logpdf(np.squeeze(self.e), 
                                                     scale=np.squeeze(np.sqrt(self.Q)))

            # This is the marginal model likelihood.
            self.ll_sum = np.sum(self.log_likelihood)
        
        if self.nancheck:
            try:
                for array in [self.A,self.C,self.Q,self.m,self.log_likelihood]:
                    assert np.any(np.isnan(array)) == False
                          
            except AssertionError:
                print 'NaN values encountered in forward filtering.'
        
        self.populate_scores()
        
        self.is_filtered = True
コード例 #51
0
 def logpdf(self, vals: np.ndarray) -> float:
     return norm.logpdf(vals, loc=self.mean, scale=self.variance**.5)
コード例 #52
0
X = butter_lowpass_filter(X, cutoff=5, fs=32, order=6)

Y = butter_lowpass_filter(Y, cutoff=5, fs=32, order=6)

Z = butter_lowpass_filter(Z, cutoff=5, fs=32, order=6)

#plotdata(original,filtered=Z,fs=32,t=T)
D = np.column_stack((X, Y, Z))
X_norm = normalize(X, -2, 2)

Y_norm = normalize(Y, -2, 2)

Z_norm = normalize(Z, -2, 2)

delta = norm.logpdf(Z_norm,
                    loc=np.mean(Z_norm, dtype=np.float64, axis=0),
                    scale=np.std(Z_norm, dtype=np.float64, axis=0))
delta3 = multivariate_normal.logpdf(D,
                                    mean=np.mean(D, dtype=np.float64, axis=0),
                                    cov=np.std(D, dtype=np.float64, axis=0))

XYZ_norm = np.column_stack((X_norm, Y_norm, Z_norm))

#xnp.savetxt('filtereddata/XYZ_norm1.txt',XYZ_norm,fmt='%1.15g')

# for index in range(0,len(Z)):
# 	if(delta[index]<-5):
# 		print(index)
# 		t = np.linspace(0, 2, 64, endpoint=False)
# 		plt.plot(t, Z[index-32:index+32], 'g-', linewidth=2, label='filtered data')
# 		plt.xlabel('Time [sec]')
コード例 #53
0
 def ll(self, param):
     return norm.logpdf(param, loc=self.mean, scale=self.sd)
コード例 #54
0
        def fwd_pass(w, x, y, z, var_Z):
            w = np.copy(w)
            tpts = w.shape[0]
            ntaxa = w.shape[1]
            A = self.A
            A_init = self.A_init

            # normalized forward probabilities
            alpha = np.zeros(w.shape)

            np.seterr(divide="ignore")  # zeros are appropriately handled here
            p = np.concatenate((z[0] + var_Z, [0]))
            p = np.tile(p, ntaxa).reshape(ntaxa, ntaxa)
            w0 = np.tile(np.log(w[0]), ntaxa).reshape(ntaxa, ntaxa)
            np.fill_diagonal(w0, 0)
            p += w0
            p = np.exp(p - logsumexp(p, axis=1, keepdims=True))
            n = y[0].sum()
            y0 = np.tile(y[0], ntaxa).reshape(ntaxa, ntaxa)
            alpha_w1 = np.log(A_init) + multinomial(y0, n, p)
            alpha_w1[:ntaxa - 1] += norm.logpdf(loc=x[0],
                                                scale=np.sqrt(var_Z),
                                                x=z[0])

            np.fill_diagonal(p, 0)
            p /= p.sum(axis=1, keepdims=True)
            alpha_w0 = np.zeros(ntaxa)
            alpha_w0[y[0] > 0] = -np.inf
            alpha_w0[y[0] == 0] = np.log(1 - A_init)[y[0] == 0] + multinomial(
                y0, y0.sum(), p)[y[0] == 0]
            alpha[0] = np.exp(
                alpha_w1 -
                logsumexp(np.vstack([alpha_w0, alpha_w1]).T, axis=1))
            assert np.all(alpha[0] >= 0) and np.all(alpha[0] <= 1)

            for t in range(1, tpts):
                alpha_w1 = np.zeros(ntaxa)
                alpha_w0 = np.zeros(ntaxa)
                at0 = alpha[t - 1]

                p = np.concatenate((z[t] + var_Z, [0]))
                p = np.tile(p, ntaxa).reshape(ntaxa, ntaxa)
                w0 = np.tile(np.log(w[t]), ntaxa).reshape(ntaxa, ntaxa)
                np.fill_diagonal(w0, 0)
                p += w0
                p = np.exp(p - logsumexp(p, axis=1, keepdims=True))
                n = y[t].sum()
                y0 = np.tile(y[t], ntaxa).reshape(ntaxa, ntaxa)
                alpha_w1 = np.log(A[:, 1, 1] * at0 + A[:, 0, 1] *
                                  (1 - at0)) + multinomial(y0, n, p)
                alpha_w1[:ntaxa - 1] += norm.logpdf(loc=x[t],
                                                    scale=np.sqrt(var_Z),
                                                    x=z[t])

                np.fill_diagonal(p, 0)
                p /= p.sum(axis=1, keepdims=True)
                alpha_w0 = np.zeros(ntaxa)
                alpha_w0[y[t] > 0] = -np.inf
                alpha_w0[y[t] ==
                         0] = np.log(A[:, 1, 0] * at0 + A[:, 0, 0] *
                                     (1 - at0))[y[t] == 0] + multinomial(
                                         y0, n, p)[y[t] == 0]

                np.set_printoptions(threshold=np.inf)
                assert np.all(
                    np.logical_or(np.isfinite(alpha_w0), np.isfinite(alpha_w1))
                    >= 1), str(p[0]) + "\n" + str(y[t])
                alpha[t] = np.exp(
                    alpha_w1 -
                    logsumexp(np.vstack([alpha_w0, alpha_w1]).T, axis=1))
                assert np.all(alpha[t] >= 0) and np.all(
                    alpha[t] <= 1), alpha[t]

            return alpha
コード例 #55
0
ファイル: datasets.py プロジェクト: jiamings/kl_wgan_sim
 def logpdf_multiple(self, x):
     v = self.thresh(x[:, 0])
     return norm.logpdf(x[:, 0], 0, self.sigma) + norm.logpdf(
         x[:, 1:], 0,
         np.sqrt(v)[:, None]).sum(1)
コード例 #56
0
ファイル: log_ei.py プロジェクト: DailyActie/AI_DL_NAS-RoBO
    def compute(self, X, derivative=False, **kwargs):
        """
        Computes the Log EI value and its derivatives.

        Parameters
        ----------
        X: np.ndarray(1, D), The input point where the acquisition_functions function
            should be evaluate. The dimensionality of X is (N, D), with N as
            the number of points to evaluate at and D is the number of
            dimensions of one X.

        derivative: Boolean
            If is set to true also the derivative of the acquisition_functions
            function at X is returned
            Not implemented yet!

        Returns
        -------
        np.ndarray(1,1)
            Log Expected Improvement of X
        np.ndarray(1,D)
            Derivative of Log Expected Improvement at X
            (only if derivative=True)
        """
        if derivative:
            logger.error("LogEI does not support derivative \
                calculation until now")
            return

        m, v = self.model.predict(X)

        _, eta = self.model.get_incumbent()

        f_min = eta - self.par

        s = np.sqrt(v)

        z = (f_min - m) / s

        log_ei = np.zeros([m.size])
        for i in range(0, m.size):
            mu, sigma = m[i], s[i]

            #    par_s = self.par * sigma

            # Degenerate case 1: first term vanishes
            if np.any(abs(f_min - mu) == 0):
                if sigma > 0:
                    log_ei[i] = np.log(sigma) + norm.logpdf(z[i])
                else:
                    log_ei[i] = -np.Infinity
            # Degenerate case 2: second term vanishes and first term
            # has a special form.
            elif sigma == 0:
                if np.any(mu < f_min):
                    log_ei[i] = np.log(f_min - mu)
                else:
                    log_ei[i] = -np.Infinity
            # Normal case
            else:
                b = np.log(sigma) + norm.logpdf(z[i])
                # log(y+z) is tricky, we distinguish two cases:
                if np.any(f_min > mu):
                    # When y>0, z>0, we define a=ln(y), b=ln(z).
                    # Then y+z = exp[ max(a,b) + ln(1 + exp(-|b-a|)) ],
                    # and thus log(y+z) = max(a,b) + ln(1 + exp(-|b-a|))
                    a = np.log(f_min - mu) + norm.logcdf(z[i])

                    log_ei[i] = max(a, b) + np.log(1 + np.exp(-abs(b - a)))
                else:
                    # When y<0, z>0, we define a=ln(-y), b=ln(z),
                    # and it has to be true that b >= a in
                    # order to satisfy y+z>=0.
                    # Then y+z = exp[ b + ln(exp(b-a) -1) ],
                    # and thus log(y+z) = a + ln(exp(b-a) -1)
                    a = np.log(mu - f_min) + norm.logcdf(z[i])
                    if a >= b:
                        # a>b can only happen due to numerical inaccuracies
                        # or approximation errors
                        log_ei[i] = -np.Infinity
                    else:
                        log_ei[i] = b + np.log(1 - np.exp(a - b))

        return log_ei
コード例 #57
0
        def pairwise_pass(w, x, y, z, var_Z, alpha, gamma):
            w = np.copy(w)
            # w_{t-1}, w_t pairwise probabilities
            w0_w1 = np.zeros((w.shape[1], w.shape[0], 2, 2))
            A_init = self.A_init
            A = self.A
            p0 = gamma[0]
            ntaxa = w.shape[1]
            tpts = w.shape[0]

            # log of invalid values are dealt with automatically
            # so let's turn off the warnings here. the assertions
            # should catch any unexpected errors
            np.seterr(divide="ignore", invalid="ignore")

            for t in range(1, tpts):
                at0 = alpha[t - 1]
                at1 = alpha[t]
                gt1 = gamma[t]

                p = np.concatenate((z[t] + var_Z, [0]))
                p = np.tile(p, ntaxa).reshape(ntaxa, ntaxa)
                w0 = np.tile(np.log(w[t]), ntaxa).reshape(ntaxa, ntaxa)
                np.fill_diagonal(w0, 0)
                p += w0
                p = np.exp(p - logsumexp(p, axis=1, keepdims=True))
                n = y[t].sum()
                y0 = np.tile(y[t], ntaxa).reshape(ntaxa, ntaxa)
                obs_1 = multinomial(y0, n, p)
                obs_1[:ntaxa - 1] += norm.logpdf(loc=x[t],
                                                 scale=np.sqrt(var_Z),
                                                 x=z[t])

                np.fill_diagonal(p, 0)
                p /= p.sum(axis=1, keepdims=True)
                obs_0 = np.zeros(ntaxa)
                obs_0[y[t] > 0] = -np.inf
                obs_0[y[t] == 0] = multinomial(y0, n, p)[y[t] == 0]

                ids = np.logical_and(y[t] > 0, y[t - 1] > 0)
                if np.sum(ids) > 0:
                    w0_w1[ids, t, 0, 1] = 0
                    w0_w1[ids, t, 0, 0] = 0
                    w0_w1[ids, t, 1, 1] = 1
                    w0_w1[ids, t, 1, 0] = 0

                ids = np.logical_and(y[t] > 0, y[t - 1] == 0)
                if np.sum(ids) > 0:
                    w0_w1[ids, t, 0,
                          1] = np.log(1 - at0[ids]) + obs_1[ids] + np.log(
                              gt1[ids]) + np.log(A[ids, 0, 1]) - np.log(
                                  at1[ids])
                    w0_w1[ids, t, 0, 0] = -np.inf
                    w0_w1[ids, t, 1, 1] = np.log(
                        at0[ids]) + obs_1[ids] + np.log(gt1[ids]) + np.log(
                            A[ids, 1, 1]) - np.log(at1[ids])
                    w0_w1[ids, t, 1, 0] = -np.inf
                    log_denom = logsumexp(w0_w1[ids, t],
                                          axis=(1, 2),
                                          keepdims=True)
                    w0_w1[ids, t] = np.exp(w0_w1[ids, t] - log_denom)
                    assert np.all(
                        np.abs(w0_w1[ids, t].sum(axis=(1, 2)) -
                               1) < 1e-2), w0_w1[ids, t]

                ids = np.logical_and(y[t] == 0, y[t - 1] > 0)
                if np.sum(ids) > 0:
                    w0_w1[ids, t, 0, 1] = -np.inf
                    w0_w1[ids, t, 0, 0] = -np.inf
                    w0_w1[ids, t, 1, 1] = np.log(
                        at0[ids]) + obs_1[ids] + np.log(gt1[ids]) + np.log(
                            A[ids, 1, 1]) - np.log(at1[ids])
                    w0_w1[ids, t, 1, 0] = np.log(
                        at0[ids]) + obs_0[ids] + np.log(1 - gt1[ids]) + np.log(
                            A[ids, 1, 0]) - np.log(1 - at1[ids])
                    w0_w1[np.logical_and(ids, gt1 == 0), t, 1, 1] = -np.inf
                    w0_w1[np.logical_and(ids, gt1 == 1), t, 1, 0] = -np.inf
                    log_denom = logsumexp(w0_w1[ids, t],
                                          axis=(1, 2),
                                          keepdims=True)
                    w0_w1[ids, t] = np.exp(w0_w1[ids, t] - log_denom)
                    assert np.all(
                        np.abs(w0_w1[ids, t].sum(axis=(1, 2)) - 1) < 1e-2
                    ), str(w0_w1[ids, t]) + "\n" + str(log_denom)

                ids = np.logical_and(y[t] == 0, y[t - 1] == 0)
                if np.sum(ids) > 0:
                    w0_w1[ids, t, 0,
                          1] = np.log(1 - at0[ids]) + obs_1[ids] + np.log(
                              gt1[ids]) + np.log(A[ids, 0, 1]) - np.log(
                                  at1[ids])
                    w0_w1[ids, t, 0,
                          0] = np.log(1 - at0[ids]) + obs_0[ids] + np.log(
                              1 - gt1[ids]) + np.log(
                                  A[ids, 0, 0]) - np.log(1 - at1[ids])
                    w0_w1[ids, t, 1, 1] = np.log(
                        at0[ids]) + obs_1[ids] + np.log(gt1[ids]) + np.log(
                            A[ids, 1, 1]) - np.log(at1[ids])
                    w0_w1[ids, t, 1, 0] = np.log(
                        at0[ids]) + obs_0[ids] + np.log(1 - gt1[ids]) + np.log(
                            A[ids, 1, 0]) - np.log(1 - at1[ids])

                    w0_w1[np.logical_and(ids, gt1 == 0), t, 0, 1] = -np.inf
                    w0_w1[np.logical_and(ids, gt1 == 0), t, 1, 1] = -np.inf
                    w0_w1[np.logical_and(ids, gt1 == 1), t, 0, 0] = -np.inf
                    w0_w1[np.logical_and(ids, gt1 == 1), t, 1, 0] = -np.inf

                    log_denom = logsumexp(w0_w1[ids, t],
                                          axis=(1, 2),
                                          keepdims=True)
                    w0_w1[ids, t] = np.exp(w0_w1[ids, t] - log_denom)
                    assert np.all(
                        np.abs(w0_w1[ids, t].sum(axis=(1, 2)) - 1) < 1e-2
                    ), str(w0_w1[ids, t]) + "\n" + str(log_denom)

                assert np.all(w0_w1[:, t, 0, 1] >= 0) and np.all(
                    w0_w1[:, t, 0, 1] <= 1)
                assert np.all(w0_w1[:, t, 1, 1] >= 0) and np.all(
                    w0_w1[:, t, 1, 1] <= 1)

            # reset error messages
            np.seterr(divide="warn", invalid="warn")
            return w0_w1
コード例 #58
0
 def log_prior(self, x):
     log_p = np.log(self.in_bounds(x))
     log_p += norm.logpdf(x['y'])
     return log_p
コード例 #59
0
Date: August, 2019
[email protected]
=========================================================================
"""
from scipy.stats import norm
import numpy as np
import matplotlib.pyplot as plt

#%%

print(norm.pdf(0))

mean = 5
std = 10

print(norm.pdf(0, mean, std))

array = np.random.randn(1000)

plt.figure(1)
plt.scatter(array, norm.pdf(array))

plt.figure(2)
plt.scatter(array, norm.logpdf(array))

plt.figure(3)
plt.scatter(array, norm.cdf(array))

plt.figure(4)
plt.scatter(array, norm.logcdf(array))
コード例 #60
0
def pred_ll(y_test, mu, sigma):
    return norm.logpdf(y_test, loc=mu, scale=sigma).mean()