示例#1
0
def st_d_logp(x, mu, nu, sigma2):
    x_p = (x - mu) / T.sqrt(sigma2)
    prob = T.log(
        T.gamma((nu + 1.0) / 2.0) /
        (T.gamma(nu / 2.0) * T.sqrt(pi * nu * sigma2)) *
        T.power(1.0 + x_p**2 / nu, -(nu + 1) / 2.0))
    return prob
示例#2
0
 def likelihood(xs):
     return T.sum(
         T.log(beta) -
         T.log(2.0 * std *
               T.sqrt(T.gamma(1. / beta) / T.gamma(3. / beta))) -
         T.gammaln(1.0 / beta) + -T.power(
             T.abs_(xs - mu) / std *
             T.sqrt(T.gamma(1. / beta) / T.gamma(3. / beta)), beta))
示例#3
0
 def likelihood(xs):
     return tt.sum(
         tt.log(beta) -
         tt.log(2.0 * std *
                tt.sqrt(tt.gamma(1. / beta) / tt.gamma(3. / beta))) -
         tt.gammaln(1.0 / beta) + -tt.power(
             tt.abs_(xs - mu) / std *
             tt.sqrt(tt.gamma(1. / beta) / tt.gamma(3. / beta)), beta))
示例#4
0
def kl_div_ng_ng(p_alpha, p_beta, p_nu, p_mu, q_alpha, q_beta, q_nu, q_mu):
    kl_dist = 1.0 / 2.0 * p_alpha / p_beta * (q_mu - p_mu)**2.0 * q_nu
    kl_dist = kl_dist + 1.0 / 2.0 * q_nu / p_nu
    kl_dist = kl_dist - 1.0 / 2.0 * T.log(q_nu / p_nu)
    kl_dist = kl_dist - 1.0 / 2.0 + q_alpha * T.log(p_beta / q_beta) - T.log(
        T.gamma(p_alpha) / T.gamma(q_alpha))
    kl_dist = kl_dist + (p_alpha - q_alpha) * special_functions.psi(
        p_alpha) - (p_beta - q_beta) * p_alpha / p_beta
    return kl_dist
示例#5
0
    def __init__(self, input, n_in, n_out, W=None, b=None):
        LogisticRegression.__init__(self, input, n_in, n_out, W=None, b=None)

        lin_out = T.dot(input, self.W) + self.b
        n = n_out - 1
        k = np.arange(n_out)
        binom = (T.log(T.gamma(n + 1)) - T.log(T.gamma(k + 1)) -
                 T.log(T.gamma(n - k + 1)))
        pre_softmax = -(n - k) * lin_out - n * T.nnet.softplus(-lin_out)

        self.p_y_given_x = T.nnet.softmax(binom + pre_softmax)
        self.y_pred = T.argmax(self.p_y_given_x, axis=1)
示例#6
0
def kl_div_dir_dir(p_a0, q_a0):
    a0 = T.sum(p_a0, axis=1)
    b0 = T.sum(q_a0, axis=1)
    #assume a0 = b0+1, since there was only one observation added
    kl_dist = T.log(T.gamma(a0) / T.gamma(b0)) + T.sum(
        T.log(T.gamma(q_a0) / T.gamma(p_a0)), axis=1)
    # kl_dist = T.log(b0)-T.log(T.sum((p_a0-q_a0)*q_a0))
    kl_dist = kl_dist + T.sum(
        (p_a0 - q_a0) *
        (special_functions.psi(p_a0) - special_functions.psi(a0).reshape(
            (a0.shape[0], 1))),
        axis=1)
    #    kl_dist = kl_dist + T.sum((p_a0-q_a0)*(special_functions.psi(p_a0)-special_functions.psi(a0)),axis=1)
    return kl_dist
示例#7
0
    def binomial_elemwise(self, y, t):
        # specify broadcasting dimensions (multiple inputs to multiple
        # density estimations)
        if self.convolutional:
            est_shuf = y.dimshuffle(0, 1, 2, 3)
            v_shuf = t.dimshuffle(0, 1, 2, 3)
        else:
            est_shuf = y.dimshuffle(0, 1)
            v_shuf = t.dimshuffle(0, 1)

        # Calculate probabilities of current v's to be sampled given estimations
        # Real-valued observation -> Binomial distribution
        # Binomial coefficient (factorial(x) == gamma(x+1))
        bin_coeff = 1 / (T.gamma(1 + v_shuf) * T.gamma(2 - v_shuf))
        pw_probs = bin_coeff * T.pow(est_shuf, v_shuf) * T.pow(
            1. - est_shuf, 1. - v_shuf)
        return pw_probs
示例#8
0
    def logp(self, value):
        """
		Calculate log-probability of EFF distribution at specified value.
		Parameters
		----------
		value : numeric
			Value(s) for which log-probability is calculated. If the log probabilities for multiple
			values are desired the values must be provided in a numpy array or theano tensor
		Returns
		-------
		TensorVariable
		"""
        gamma = self.gamma
        x = (self.location - value) / self.scale

        cte = tt.sqrt(
            np.pi) * self.scale * tt.gamma(gamma - 0.5) / tt.gamma(gamma)

        log_d = -gamma * tt.log(1. + x**2) - tt.log(cte)
        return log_d
示例#9
0
 def _Ps(self, n, l):
     return T.exp(-l) * (l ** n) / T.gamma(n + 1)
示例#10
0
文件: utils.py 项目: capybaralet/GAE
def theano_beta_fn(a, b):
    return T.gamma(a) * T.gamma(b) / T.gamma(a+b)
示例#11
0
data = np.asarray((exampleData[0:q], exampleDataNLost[0:q]))

with pm.Model() as BdWwithcfromNorm:
    alpha = pm.Uniform('alpha', 0.0001, 1000.0, testval=1.0)
    beta = pm.Uniform('beta', 0.0001, 1000.0, testval=1.0)
    # If c=1, the dW collapses to a geometric distribution. We assume
    # that in a usual case, customer survival probability normally
    # stay the same over time.
    # If mu = 1.5 we get exactly the same sampling results as if
    # we had used a uniform prior. The result is slightly better than
    # defining a N(1,None) variable.
    c = pm.Normal('c', mu=0.5, testval=1.0)

    p = [0.] * n
    s = [1.] * n
    logB = tt.gamma(alpha + beta) / tt.gamma(beta)
    for t in range(1, n):
        pt1 = tt.gamma(beta + (t - 1)**c) / tt.gamma(beta + alpha + (t - 1)**c)
        pt2 = tt.gamma(beta + t**c) / tt.gamma(beta + alpha + t**c)
        s[t] = pt2 * logB
        p[t] = s[t - 1] - s[t]
    p = tt.stack(p, axis=0)
    s = tt.stack(s, axis=0)

    # Log-likelihood function
    def logp(data):
        observedRenewed = data[0, :]
        observedReleased = data[1, :]

        # Released entries every year
        released = tt.mul(p[1:].log(), observedReleased[1:])
示例#12
0
    def __init__(self,
                 rng,
                 input,
                 batch_size,
                 in_size,
                 latent_size,
                 W_a=None,
                 W_b=None,
                 epsilon=0.01):
        self.srng = theano.tensor.shared_randomstreams.RandomStreams(
            rng.randint(999999))
        self.input = input

        # setup variational params
        if W_a is None:
            W_values = np.asarray(
                0.01 * rng.standard_normal(size=(in_size, latent_size - 1)),
                dtype=theano.config.floatX)
            W_a = theano.shared(value=W_values, name='W_a')
        if W_b is None:
            W_values = np.asarray(
                0.01 * rng.standard_normal(size=(in_size, latent_size - 1)),
                dtype=theano.config.floatX)
            W_b = theano.shared(value=W_values, name='W_b')
        self.W_a = W_a
        self.W_b = W_b

        # compute Gamma samples
        uniform_samples = T.cast(
            self.srng.uniform(size=(batch_size, latent_size - 1),
                              low=0.01,
                              high=0.99), theano.config.floatX)
        self.a = Softplus(T.dot(self.input, self.W_a))
        self.b = Softplus(T.dot(self.input, self.W_b))
        v_samples = ((uniform_samples * self.a * T.gamma(self.a))
                     **(1 / self.a)) / self.b

        # setup variables for recursion
        stick_segment = theano.shared(value=np.zeros(
            (batch_size, ), dtype=theano.config.floatX),
                                      name='stick_segment')
        remaining_stick = theano.shared(value=np.ones(
            (batch_size, ), dtype=theano.config.floatX),
                                        name='remaining_stick')

        def compute_latent_vars(i, stick_segment, remaining_stick, v_samples):
            # compute stick segment
            stick_segment = v_samples[:, i] * remaining_stick
            remaining_stick *= (1 - v_samples[:, i])
            return (stick_segment, remaining_stick)

        (stick_segments, remaining_sticks), updates = theano.scan(
            fn=compute_latent_vars,
            outputs_info=[stick_segment, remaining_stick],
            sequences=T.arange(latent_size - 1),
            non_sequences=[v_samples],
            strict=True)

        self.avg_used_dims = T.mean(T.sum(remaining_sticks > epsilon, axis=0))
        self.latent_vars = T.transpose(
            T.concatenate([
                stick_segments,
                T.shape_padaxis(remaining_sticks[-1, :], axis=1).T
            ],
                          axis=0))

        self.params = [self.W_a, self.W_b]