コード例 #1
0
ファイル: curiosity.py プロジェクト: xmanatee/ai.trading
def extract_params(bnn_weights, pred_loss, delta=0.01):
    rhos = list(
        filter(lambda p: p.name.endswith('bnn.rho'), bnn_weights))
    mus = list(
        filter(lambda p: p.name.endswith('bnn.mu'), bnn_weights))

    grad_mus = T.grad(pred_loss, mus)
    new_mus = [(mu - delta * grad_mu)
               for mu, grad_mu in zip(mus, grad_mus)]

    grad_rhos = T.grad(pred_loss, rhos)
    new_rhos = [(rho - delta * grad_rho)
                for rho, grad_rho in zip(rhos, grad_rhos)]

    for i in range(len(new_mus)):
        new_mus[i].name = 'new_mu'
    for i in range(len(new_rhos)):
        new_rhos[i].name = 'new_rho'

    mus, new_mus, rhos, new_rhos = list(map(
        lambda variables: T.concatenate(
            [var.ravel() for var in variables]),
        [mus, new_mus, rhos, new_rhos]))

    sigmas = T.log1p(T.exp(rhos))
    new_sigmas = T.log1p(T.exp(new_rhos))

    return mus, new_mus, sigmas, new_sigmas
コード例 #2
0
    def logp(self, value):
        r"""
        Calculate log-probability of ZeroInflatedBinomial distribution at specified value.

        Parameters
        ----------
        value: numeric
            Value(s) for which log-probability is calculated. If the log probabilities for multiple
            values are desired the values must be provided in a numpy array or theano tensor

        Returns
        -------
        TensorVariable
        """
        psi = self.psi
        p = self.p
        n = self.n

        logp_val = tt.switch(
            tt.gt(value, 0),
            tt.log(psi) + self.bin.logp(value),
            logaddexp(tt.log1p(-psi),
                      tt.log(psi) + n * tt.log1p(-p)),
        )

        return bound(logp_val, 0 <= value, value <= n, 0 <= psi, psi <= 1,
                     0 <= p, p <= 1)
コード例 #3
0
def neuron_theano(neuron_type, x):
    import theano
    import theano.tensor as tt
    floatX = theano.config.floatX

    if isinstance(neuron_type, nengo.Direct):
        return x
    elif isinstance(neuron_type, nengo.neurons.RectifiedLinear):
        return tt.maximum(x, 0)
    elif isinstance(neuron_type, nengo.neurons.Sigmoid):
        return tt.nnet.sigmoid(x)
    elif isinstance(neuron_type, SoftLIFRate):  # before LIF since subclass
        # do not apply amplitude, since this is done elsewhere!
        tau_ref = tt.cast(neuron_type.tau_ref, floatX)
        tau_rc = tt.cast(neuron_type.tau_rc, floatX)
        sigma = tt.cast(neuron_type.sigma, floatX)
        j = tt.nnet.softplus((x - 1) / sigma) * sigma
        r = 1 / (tau_ref + tau_rc * tt.log1p(1 / j))
        return tt.switch(j > 0, r, 0)
    elif isinstance(neuron_type, (nengo.LIF, nengo.LIFRate)):
        tau_ref = tt.cast(neuron_type.tau_ref, floatX)
        tau_rc = tt.cast(neuron_type.tau_rc, floatX)
        j = x - 1
        r = 1. / (tau_ref + tau_rc * tt.log1p(1. / j))
        return tt.switch(j > 0, r, 0)
    else:
        raise NotImplementedError("Neuron type %r" % neuron_type)
コード例 #4
0
ファイル: transforms.py プロジェクト: Riashat/pymc3
 def jacobian_det(self, y):
     Km1 = y.shape[0]
     k = tt.arange(Km1)[(slice(None),) + (None,) * (y.ndim - 1)]
     eq_share = -tt.log(Km1 - k)  # logit(1./(Km1 + 1 - k))
     yl = y + eq_share
     yu = tt.concatenate([tt.ones(y[:1].shape), 1 - inverse_logit(yl)])
     S = tt.extra_ops.cumprod(yu, 0)
     return tt.sum(tt.log(S[:-1]) - tt.log1p(tt.exp(yl)) - tt.log1p(tt.exp(-yl)), 0)
コード例 #5
0
 def jacobian_det(self, y_):
     y = y_.T
     Km1 = y.shape[0]
     k = tt.arange(Km1)[(slice(None), ) + (None, ) * (y.ndim - 1)]
     eq_share = logit(1. / (Km1 + 1 - k).astype(str(y_.dtype)))
     yl = y + eq_share
     yu = tt.concatenate([tt.ones(y[:1].shape), 1 - invlogit(yl, self.eps)])
     S = tt.extra_ops.cumprod(yu, 0)
     return tt.sum(tt.log(S[:-1]) - tt.log1p(tt.exp(yl)) - tt.log1p(tt.exp(-yl)), 0).T
コード例 #6
0
 def jacobian_det(self, y):
     Km1 = y.shape[0]
     k = tt.arange(Km1)[(slice(None), ) + (None, ) * (y.ndim - 1)]
     eq_share = -tt.log(Km1 - k)  # logit(1./(Km1 + 1 - k))
     yl = y + eq_share
     yu = tt.concatenate([tt.ones(y[:1].shape), 1 - inverse_logit(yl)])
     S = tt.extra_ops.cumprod(yu, 0)
     return tt.sum(
         tt.log(S[:-1]) - tt.log1p(tt.exp(yl)) - tt.log1p(tt.exp(-yl)), 0)
コード例 #7
0
ファイル: transforms.py プロジェクト: hstm/pymc3
 def jacobian_det(self, y_):
     y = y_.T
     Km1 = y.shape[0]
     k = tt.arange(Km1)[(slice(None), ) + (None, ) * (y.ndim - 1)]
     eq_share = logit(1. / (Km1 + 1 - k).astype(str(y_.dtype)))
     yl = y + eq_share
     yu = tt.concatenate([tt.ones(y[:1].shape), 1 - invlogit(yl, self.eps)])
     S = tt.extra_ops.cumprod(yu, 0)
     return tt.sum(tt.log(S[:-1]) - tt.log1p(tt.exp(yl)) - tt.log1p(tt.exp(-yl)), 0).T
コード例 #8
0
def log_i0(x):
    """
    Calculates the logarithm of the 0 order modified Bessel function of the first kind""
    """
    return tt.switch(tt.lt(x, 5), tt.log1p(x**2. / 4. + x**4. / 64. + x**6. / 2304.
                                           + x**8. / 147456. + x**10. / 14745600.
                                           + x**12. / 2123366400.),
                                  x - 0.5 * tt.log(2. * np.pi * x) + tt.log1p(1. / (8. * x)
                                  + 9. / (128. * x**2.) + 225. / (3072. * x**3.)
                                  + 11025. / (98304. * x**4.)))
コード例 #9
0
ファイル: deep-auto.py プロジェクト: colinsongf/nef-rbm
    def rates(self, x):
        dtype = theano.config.floatX
        sigma = tt.cast(0.05, dtype=dtype)
        tau_ref = tt.cast(self.tau_ref, dtype=dtype)
        tau_rc = tt.cast(self.tau_rc, dtype=dtype)

        j = self.gain * x + self.bias - 1
        j = sigma * tt.log1p(tt.exp(j / sigma))
        v = 1. / (tau_ref + tau_rc * tt.log1p(1. / j))
        return tt.switch(j > 0, v, 0.0) / self.max_rates
コード例 #10
0
ファイル: deep-auto.py プロジェクト: Narts/nef-rbm
    def rates(self, x):
        dtype = theano.config.floatX
        sigma = tt.cast(0.05, dtype=dtype)
        tau_ref = tt.cast(self.tau_ref, dtype=dtype)
        tau_rc = tt.cast(self.tau_rc, dtype=dtype)

        j = self.gain * x + self.bias - 1
        j = sigma * tt.log1p(tt.exp(j / sigma))
        v = 1. / (tau_ref + tau_rc * tt.log1p(1. / j))
        return tt.switch(j > 0, v, 0.0) / self.max_rates
コード例 #11
0
def safe_logaddexp(a, b):
    """Symbolic log(exp(a) + exp(b)). The edge case where `a` - `b` is undefined is handled by
    setting the difference to 0. This occurs if both `a` and `b` are +inf or -inf.

    Returns:
        symbolic log(exp(a) + exp(b))
    """
    diff = b - a
    safe_diff = tt.switch(tt.isnan(diff), 0, diff)
    return tt.switch(safe_diff >= 0, b + tt.log1p(tt.exp(-safe_diff)),
                     a + tt.log1p(tt.exp(safe_diff)))
コード例 #12
0
ファイル: commons.py プロジェクト: broadinstitute/gatk
def safe_logaddexp(a, b):
    """Symbolic log(exp(a) + exp(b)). The edge case where `a` - `b` is undefined is handled by
    setting the difference to 0. This occurs if both `a` and `b` are +inf or -inf.

    Returns:
        symbolic log(exp(a) + exp(b))
    """
    diff = b - a
    safe_diff = tt.switch(tt.isnan(diff), 0, diff)
    return tt.switch(safe_diff >= 0,
                     b + tt.log1p(tt.exp(-safe_diff)),
                     a + tt.log1p(tt.exp(safe_diff)))
コード例 #13
0
def mu_law_encode(audio, quantization_channels):
    '''Quantizes waveform amplitude
    code is derived from ibab's wavenet github
    '''
    mu = float(quantization_channels - 1)
    # Perform mu-law companding transformation (ITU-T, 1988)
    # Minimum operation is here to deal with rare large amplitudes caused
    # my resampling
    safe_audio_abs = T.minimum(abs(audio), 1.0)
    magnitude = T.log1p(mu * safe_audio_abs) / T.log1p(mu)
    signal = T.sgn(audio) * magnitude
    # Quantize signal to the specified number of levels.
    return T.cast((signal + 1) / 2 * mu + 0.5, 'int32')
コード例 #14
0
def log_i0(x):
    """
    Calculates the logarithm of the 0 order modified Bessel function of the first kind""
    """
    return tt.switch(
        tt.lt(x, 5),
        tt.log1p(x**2.0 / 4.0 + x**4.0 / 64.0 + x**6.0 / 2304.0 +
                 x**8.0 / 147456.0 + x**10.0 / 14745600.0 +
                 x**12.0 / 2123366400.0),
        x - 0.5 * tt.log(2.0 * np.pi * x) +
        tt.log1p(1.0 / (8.0 * x) + 9.0 / (128.0 * x**2.0) + 225.0 /
                 (3072.0 * x**3.0) + 11025.0 / (98304.0 * x**4.0)),
    )
コード例 #15
0
ファイル: discrete.py プロジェクト: robindang0573/pymc3
    def logp(self, value):
        psi = self.psi
        p = self.p
        n = self.n

        logp_val = tt.switch(
            tt.gt(value, 0),
            tt.log(psi) + self.bin.logp(value),
            logaddexp(tt.log1p(-psi),
                      tt.log(psi) + n * tt.log1p(-p)))

        return bound(logp_val, 0 <= value, value <= n, 0 <= psi, psi <= 1,
                     0 <= p, p <= 1)
コード例 #16
0
ファイル: discrete.py プロジェクト: aasensio/pymc3
    def logp(self, value):
        psi = self.psi
        p = self.p
        n = self.n

        logp_val = tt.switch(tt.gt(value, 0),
                 tt.log(psi) + self.bin.logp(value),
                 logaddexp(tt.log1p(-psi), tt.log(psi) + n * tt.log1p(-p)))

        return bound(logp_val,
            0 <= value, value <= n,
            0 <= psi, psi <= 1,
            0 <= p, p <= 1)
コード例 #17
0
ファイル: nlif-deep.py プロジェクト: Narts/nef-rbm
def nlif(x):
    dtype = theano.config.floatX
    sigma = tt.cast(0.05, dtype=dtype)
    tau_ref = tt.cast(0.002, dtype=dtype)
    tau_rc = tt.cast(0.02, dtype=dtype)
    alpha = tt.cast(1, dtype=dtype)
    beta = tt.cast(1, dtype=dtype)
    amp = tt.cast(1. / 65, dtype=dtype)

    j = alpha * x + beta - 1
    j = sigma * tt.log1p(tt.exp(j / sigma))
    v = amp / (tau_ref + tau_rc * tt.log1p(1. / j))
    return tt.switch(j > 0, v, 0.0)
コード例 #18
0
ファイル: train_lif.py プロジェクト: Narts/nef-rbm
def nlif(x):
    dtype = theano.config.floatX
    sigma = tt.cast(0.05, dtype=dtype)
    tau_ref = tt.cast(0.002, dtype=dtype)
    tau_rc = tt.cast(0.02, dtype=dtype)
    alpha = tt.cast(1, dtype=dtype)
    beta = tt.cast(1, dtype=dtype)  # so that f(0) = firing threshold
    amp = tt.cast(1. / 63.04, dtype=dtype)  # so that f(1) = 1

    j = alpha * x + beta - 1
    j = sigma * tt.log1p(tt.exp(j / sigma))
    v = amp / (tau_ref + tau_rc * tt.log1p(1. / j))
    return tt.switch(j > 0, v, 0.0)
コード例 #19
0
ファイル: train_lif.py プロジェクト: colinsongf/nef-rbm
def nlif(x):
    dtype = theano.config.floatX
    sigma = tt.cast(0.05, dtype=dtype)
    tau_ref = tt.cast(0.002, dtype=dtype)
    tau_rc = tt.cast(0.02, dtype=dtype)
    alpha = tt.cast(1, dtype=dtype)
    beta = tt.cast(1, dtype=dtype)  # so that f(0) = firing threshold
    amp = tt.cast(1. / 63.04, dtype=dtype)  # so that f(1) = 1

    j = alpha * x + beta - 1
    j = sigma * tt.log1p(tt.exp(j / sigma))
    v = amp / (tau_ref + tau_rc * tt.log1p(1. / j))
    return tt.switch(j > 0, v, 0.0)
コード例 #20
0
ファイル: nlif-deep.py プロジェクト: colinsongf/nef-rbm
def nlif(x):
    dtype = theano.config.floatX
    sigma = tt.cast(0.05, dtype=dtype)
    tau_ref = tt.cast(0.002, dtype=dtype)
    tau_rc = tt.cast(0.02, dtype=dtype)
    alpha = tt.cast(1, dtype=dtype)
    beta = tt.cast(1, dtype=dtype)
    amp = tt.cast(1. / 65, dtype=dtype)

    j = alpha * x + beta - 1
    j = sigma * tt.log1p(tt.exp(j / sigma))
    v = amp / (tau_ref + tau_rc * tt.log1p(1. / j))
    return tt.switch(j > 0, v, 0.0)
コード例 #21
0
 def kl_div(self, x, y):
     """
     Compute sum of D(x_i || y_i) for each corresponding element
     along the 3rd dimension (the embedding dimension)
     of x and y
     This function takes care to not compute logarithms that are close
     to 0, since NaN's could result for log(sigmoid(x)) if x is negative.
     It simply uses that log(sigmoid(x)) = - log(1 + e^-x)
     """
     sig_x = T.nnet.sigmoid(x)
     exp_x = T.exp(x)
     exp_y = T.exp(y)
     exp_neg_x = T.exp(-x)
     exp_neg_y = T.exp(-y)
     return (sig_x * (T.log1p(exp_neg_y) - T.log1p(exp_neg_x)) + (1 - sig_x) * (T.log1p(exp_y) - T.log1p(exp_x))).mean()
コード例 #22
0
ファイル: bayes.py プロジェクト: dantodor/Practical_RL
 def log_posterior_approx(self,weights, mean, rho):
     """
     Logarithm of ELBO on posterior probabilities:
     log q(weights|learned mu and rho) aka log q(theta|x)
     """
     std = T.log1p(T.exp(rho))  #rho to std
     return self.log_normal(weights, mean, std)
コード例 #23
0
 def kl_div(self, x, y):
     """
     Compute sum of D(x_i || y_i) for each corresponding element
     along the 3rd dimension (the embedding dimension)
     of x and y
     This function takes care to not compute logarithms that are close
     to 0, since NaN's could result for log(sigmoid(x)) if x is negative.
     It simply uses that log(sigmoid(x)) = - log(1 + e^-x)
     """
     sig_x = T.nnet.sigmoid(x)
     exp_x = T.exp(x)
     exp_y = T.exp(y)
     exp_neg_x = T.exp(-x)
     exp_neg_y = T.exp(-y)
     return (sig_x * (T.log1p(exp_neg_y) - T.log1p(exp_neg_x)) +
             (1 - sig_x) * (T.log1p(exp_y) - T.log1p(exp_x))).mean()
コード例 #24
0
ファイル: censored_data.py プロジェクト: aasensio/pymc3
def normal_lccdf(mu, sigma, x):
    z = (x - mu) / sigma
    return tt.switch(
        tt.gt(z, 1.0),
        tt.log(tt.erfcx(z / tt.sqrt(2.)) / 2.) - tt.sqr(z) / 2.,
        tt.log1p(-tt.erfc(-z / tt.sqrt(2.)) / 2.)
    )
コード例 #25
0
def stick_breaking_log(u):
    """Return log of weights from stick-breaking process."""
    lu = tns.concatenate((tns.log(u), [0.0]))
    cs = tns.concatenate(([0.0], tns.cumsum(tns.log1p(-u))))
    lw = lu + cs

    return lw
コード例 #26
0
ファイル: vdrvc.py プロジェクト: mutual-ai/vd-ard-bdl16
        def create_theano_loss(d):
            X, t = T.dmatrix('X'), T.dvector('t')
            log_sigma2 = theano.shared(np.ones((num_classes, d)))
            theta = theano.shared(np.random.randn(num_classes, d))

            # Change parametrization
            log_alpha = log_sigma2 - T.log(theta**2)
            la, alpha = log_alpha, T.exp(log_alpha)

            # -KL(q || prior)
            mD_KL = -(0.5 * T.log1p(T.exp(-la)) -
                      (0.03 + 1.0 /
                       (1.0 + T.exp(-(1.5 * (la + 1.3)))) * 0.64)).sum()

            # NLL through Local Reparametrization
            mu, si = T.dot(X, theta.T), T.sqrt(
                T.dot(X * X, (alpha * theta * theta).T))
            activation = mu + self._srng.normal(mu.shape, avg=0, std=1) * si
            predictions = T.nnet.softmax(activation)
            ell = -T.sum(
                categorical_crossentropy(predictions, one_hot(t, num_classes)))

            # Objective Negative SGVLB
            nlb = -(N / batch_size * ell + mD_KL)

            # Optimization Method and Function Compiling
            opt = lasagne.updates.adam(nlb, [log_sigma2, theta],
                                       learning_rate=lr,
                                       beta1=beta)
            lbf = function([X, t], nlb, updates=opt)

            return lbf, theta, log_sigma2
コード例 #27
0
    def setup(self, bottom, top):
        from caffe_helper.theano_util import init_theano
        init_theano()

        import theano as tn
        import theano.tensor as T
        assert len(bottom) == 2
        assert len(top) == 1
        s_y = T.matrix('y')  # y in [-inf, inf]
        s_t = T.matrix('t')  # t in {-1, 0, 1} where 0 is ignored
        s_dloss = T.scalar('dloss')
        # Forward
        # s_loss = T.mean(abs(s_t) * T.log1p(T.exp(-s_y * s_t)))  # unstable
        s_loss = -T.sum(
            abs(s_t) * (
                s_y * ((s_t >= 0) - (s_y >= 0)) - T.log1p(T.exp(-abs(s_y)))))\
            / T.maximum(T.sum(abs(s_t)), 1)
        # Backward
        s_p = 1 / (1 + T.exp(-s_y))
        s_dy = s_dloss * abs(s_t) * (s_p - (s_t >= 0)) / \
            T.maximum(T.sum(abs(s_t)), 1)

        def _o(s):
            return tn.Out(s, borrow=True)
        self.tn_forward = tn.function([s_y, s_t], s_loss)
        self.tn_backward = tn.function([s_y, s_t, s_dloss], _o(s_dy))
コード例 #28
0
 def eval_reg(self, **kwargs):
     k1, k2, k3 = 0.63576, 1.8732, 1.48695
     C = -k1
     log_alpha = self.clip(self.log_sigma2 - T.log(self.W**2))
     mdkl = k1 * T.nnet.sigmoid(k2 + k3 * log_alpha) - 0.5 * T.log1p(
         T.exp(-log_alpha)) + C
     return -T.sum(mdkl)
コード例 #29
0
ファイル: censored_data.py プロジェクト: rsumner31/pymc3-23
def normal_lcdf(mu, sigma, x):
    z = (x - mu) / sigma
    return tt.switch(
        tt.lt(z, -1.0),
        tt.log(tt.erfcx(-z / tt.sqrt(2.)) / 2.) - tt.sqr(z) / 2,
        tt.log1p(-tt.erfc(z / tt.sqrt(2.)) / 2.)
    )
コード例 #30
0
ファイル: bayes.py プロジェクト: dantodor/Practical_RL
    def __call__(self, layer, spec, shape, name=None, **tags):
        # case when user uses default init specs
        assert tags.get('variational',False) == True, "Please declare param as variational to avoid confusion"
        
        if not isinstance(spec, dict):
            initial_rho = np.log(np.expm1(self.prior_std))   #std to rho
            assert np.isfinite(initial_rho),"too small std to initialize correctly. Please pass explicit"\
                                            " initializer (dict with {'mu':mu_init, 'rho':rho_init})."
            spec = {'mu': spec,'rho':init.Constant(initial_rho)}
            

        mu_spec,rho_spec = spec['mu'],spec['rho']
        
        rho = layer.add_param(rho_spec, shape,name=(name or 'unk')+'.rho', **tags)
        mean = layer.add_param(mu_spec, shape,name=(name or 'unk')+'.mu', **tags)

        #Reparameterization trick
        e = self.srng.normal(shape, std=1)  
        W = mean + T.log1p(T.exp(rho)) * e 

        #KL divergence KL(q,p) = E_(w~q(w|x)) [log q(w|x) - log P(w)] aka variational cost
        q_p = T.sum(self.log_posterior_approx(W, mean, rho) - self.log_prior(W))
            
        #accumulate variational cost
        layer._bbwrap_var_cost += q_p
        return W
コード例 #31
0
    def setup(self, bottom, top):
        from caffe_helper.theano_util import init_theano
        init_theano()

        import theano as tn
        import theano.tensor as T
        assert len(bottom) == 2
        assert len(top) == 1
        s_y = T.matrix('y')  # y in [-inf, inf]
        s_t = T.matrix('t')  # t in {-1, 0, 1} where 0 is ignored
        s_dloss = T.scalar('dloss')
        # Forward
        # s_loss = T.mean(abs(s_t) * T.log1p(T.exp(-s_y * s_t)))  # unstable
        s_loss = -T.sum(
            abs(s_t) * (
                s_y * ((s_t >= 0) - (s_y >= 0)) - T.log1p(T.exp(-abs(s_y)))))\
            / T.maximum(T.sum(abs(s_t)), 1)
        # Backward
        s_p = 1 / (1 + T.exp(-s_y))
        s_dy = s_dloss * abs(s_t) * (s_p - (s_t >= 0)) / \
            T.maximum(T.sum(abs(s_t)), 1)

        def _o(s):
            return tn.Out(s, borrow=True)

        self.tn_forward = tn.function([s_y, s_t], s_loss)
        self.tn_backward = tn.function([s_y, s_t, s_dloss], _o(s_dy))
コード例 #32
0
    def logp(self, value):
        r"""
        Calculate log-probability of ZeroInflatedNegativeBinomial distribution at specified value.

        Parameters
        ----------
        value: numeric
            Value(s) for which log-probability is calculated. If the log probabilities for multiple
            values are desired the values must be provided in a numpy array or theano tensor

        Returns
        -------
        TensorVariable
        """
        alpha = self.alpha
        mu = self.mu
        psi = self.psi

        logp_other = tt.log(psi) + self.nb.logp(value)
        logp_0 = logaddexp(
            tt.log1p(-psi),
            tt.log(psi) + alpha * (tt.log(alpha) - tt.log(alpha + mu)))

        logp_val = tt.switch(tt.gt(value, 0), logp_other, logp_0)

        return bound(logp_val, 0 <= value, 0 <= psi, psi <= 1, mu > 0,
                     alpha > 0)
コード例 #33
0
ファイル: bayes.py プロジェクト: Aakash3101/Deep-Learning
    def __call__(self, layer, spec, shape, name=None, **tags):
        # case when user uses default init specs
        assert tags.get(
            'variational',
            False), "Please declare param as variational to avoid confusion"

        if not isinstance(spec, dict):
            initial_rho = np.log(np.expm1(self.prior_std))  # std to rho
            assert np.isfinite(initial_rho), "too small std to initialize correctly. Please pass explicit"\
                " initializer (dict with {'mu':mu_init, 'rho':rho_init})."
            spec = {'mu': spec, 'rho': init.Constant(initial_rho)}

        mu_spec, rho_spec = spec['mu'], spec['rho']

        rho = layer.add_param(rho_spec,
                              shape,
                              name=(name or 'unk') + '.rho',
                              **tags)
        mean = layer.add_param(mu_spec,
                               shape,
                               name=(name or 'unk') + '.mu',
                               **tags)

        # Reparameterization trick
        e = self.srng.normal(shape, std=1)
        W = mean + T.log1p(T.exp(rho)) * e

        # KL divergence KL(q,p) = E_(w~q(w|x)) [log q(w|x) - log P(w)] aka
        # variational cost
        q_p = T.sum(
            self.log_posterior_approx(W, mean, rho) - self.log_prior(W))

        # accumulate variational cost
        layer._bbwrap_var_cost += q_p
        return W
コード例 #34
0
def normal_lccdf(mu, sigma, x):
    z = (x - mu) / sigma
    return tt.switch(
        tt.gt(z, 1.0),
        tt.log(tt.erfcx(z / tt.sqrt(2.0)) / 2.0) - tt.sqr(z) / 2.0,
        tt.log1p(-tt.erfc(-z / tt.sqrt(2.0)) / 2.0),
    )
コード例 #35
0
ファイル: bayes.py プロジェクト: Aakash3101/Deep-Learning
 def log_posterior_approx(self, weights, mean, rho):
     """
     Logarithm of ELBO on posterior probabilities:
     log q(weights|learned mu and rho) aka log q(theta|x)
     """
     std = T.log1p(T.exp(rho))  # rho to std
     return self.log_normal(weights, mean, std)
コード例 #36
0
    def logp(self, value):
        quaddist, logdet, ok = self._quaddist(value)
        k = value.shape[-1].astype(theano.config.floatX)

        norm = (gammaln((self.nu + k) / 2.) - gammaln(self.nu / 2.) -
                0.5 * k * floatX(np.log(self.nu * np.pi)))
        inner = -(self.nu + k) / 2. * tt.log1p(quaddist / self.nu)
        return bound(norm + inner - logdet, ok)
コード例 #37
0
ファイル: dist_math.py プロジェクト: alexander-belikov/pymc3
def normal_lcdf(mu, sigma, x):
    """Compute the log of the cumulative density function of the normal."""
    z = (x - mu) / sigma
    return tt.switch(
        tt.lt(z, -1.0),
        tt.log(tt.erfcx(-z / tt.sqrt(2.)) / 2.) - tt.sqr(z) / 2.,
        tt.log1p(-tt.erfc(z / tt.sqrt(2.)) / 2.)
    )
コード例 #38
0
 def gev_logp(value):
     scaled = (value - loc) / scale
     logp = -(tt.log(scale) + (((c - 0.5) + 1) / (c - 0.5) * tt.log1p(
         (c - 0.5) * scaled) + (1 + (c - 0.5) * scaled)**(-1 /
                                                          (c - 0.5))))
     bound1 = loc - scale / (c - 0.5)
     bounds = tt.switch((c - 0.5) > 0, value > bound1, value < bound1)
     return bound(logp, bounds, c != 0)
コード例 #39
0
def normal_lcdf(mu, sigma, x):
    """Compute the log of the cumulative density function of the normal."""
    z = (x - mu) / sigma
    return tt.switch(
        tt.lt(z, -1.0),
        tt.log(tt.erfcx(-z / tt.sqrt(2.0)) / 2.0) - tt.sqr(z) / 2.0,
        tt.log1p(-tt.erfc(z / tt.sqrt(2.0)) / 2.0),
    )
コード例 #40
0
ファイル: multivariate.py プロジェクト: aasensio/pymc3
    def logp(self, value):
        quaddist, logdet, ok = self._quaddist(value)
        k = value.shape[-1].astype(theano.config.floatX)

        norm = (gammaln((self.nu + k) / 2.)
                - gammaln(self.nu / 2.)
                - 0.5 * k * floatX(np.log(self.nu * np.pi)))
        inner = - (self.nu + k) / 2. * tt.log1p(quaddist / self.nu)
        return bound(norm + inner - logdet, ok)
コード例 #41
0
def log_add(lna, lnb):
    """
    Compute the ln(a+b) given {lna,lnb}
    :param
    :return: ln(a+b)
    """
    max_ = tensor.maximum(lna, lnb)
    result = (max_ + tensor.log1p(tensor.exp(lna + lnb - 2 * max_)))   #log1p(x) = log(1+x)
    return tensor.switch(tensor.isnan(result), max_, result)
コード例 #42
0
ファイル: ctc.py プロジェクト: igul222/Marmot
def _log_add_3(log_a, log_b, log_c):
    """Theano expression for log(a+b+c) given log(a), log(b), log(c)."""
    smaller = T.minimum(log_a, log_b)
    larger = T.maximum(log_a, log_b)
    largest = T.maximum(larger, log_c)
    larger = T.minimum(larger, log_c)

    return largest + T.log1p(
        T.exp(smaller - largest) + T.exp(larger - largest))
コード例 #43
0
ファイル: discrete.py プロジェクト: robindang0573/pymc3
    def logp(self, value):
        psi = self.psi
        theta = self.theta

        logp_val = tt.switch(tt.gt(value, 0),
                             tt.log(psi) + self.pois.logp(value),
                             logaddexp(tt.log1p(-psi),
                                       tt.log(psi) - theta))

        return bound(logp_val, 0 <= value, 0 <= psi, psi <= 1, 0 <= theta)
コード例 #44
0
 def gev_logp(value, t, t2):
     loc = m1 * t2 + m2 * t + m3
     #            scale=tt.log(tt.exp(scale1))
     scaled = (value - loc) / scale
     logp = -(tt.log(scale) + (((c - 0.5) + 1) / (c - 0.5) * tt.log1p(
         (c - 0.5) * scaled) + (1 + (c - 0.5) * scaled)**(-1 /
                                                          (c - 0.5))))
     bound1 = loc - scale / (c - 0.5)
     bounds = tt.switch((c - 0.5) > 0, value > bound1, value < bound1)
     return bound(logp, bounds, c != 0)
コード例 #45
0
ファイル: math.py プロジェクト: zivtigher/pymc3
def log1mexp(x):
    """Return log(1 - exp(-x)).

    This function is numerically more stable than the naive approach.

    For details, see
    https://cran.r-project.org/web/packages/Rmpfr/vignettes/log1mexp-note.pdf
    """
    return tt.switch(tt.lt(x, 0.683), tt.log(-tt.expm1(-x)),
                     tt.log1p(-tt.exp(-x)))
コード例 #46
0
ファイル: nlif-rbm.py プロジェクト: Narts/nef-rbm
def lif(x):
    dtype = theano.config.floatX
    tau_ref = tt.cast(0.002, dtype=dtype)
    tau_rc = tt.cast(0.02, dtype=dtype)
    alpha = tt.cast(1, dtype=dtype)
    beta = tt.cast(1, dtype=dtype)
    amp = tt.cast(1. / 65, dtype=dtype)

    j = alpha * x + beta - 1
    v = amp / (tau_ref + tau_rc * tt.log1p(1. / j))
    return tt.switch(j > 0, v, 0.0)
コード例 #47
0
ファイル: ctc.py プロジェクト: igul222/Marmot
def _log_add_3(log_a, log_b, log_c):
    """Theano expression for log(a+b+c) given log(a), log(b), log(c)."""
    smaller = T.minimum(log_a, log_b)
    larger = T.maximum(log_a, log_b)
    largest = T.maximum(larger, log_c)
    larger = T.minimum(larger, log_c)

    return largest + T.log1p(
            T.exp(smaller - largest) + 
            T.exp(larger - largest)
            )
コード例 #48
0
ファイル: continuous.py プロジェクト: gurganious/pymc3
    def logp(self, value):
        nu = self.nu
        mu = self.mu
        lam = self.lam
        sd = self.sd

        return bound(gammaln((nu + 1.0) / 2.0)
                     + .5 * T.log(lam / (nu * np.pi))
                     - gammaln(nu / 2.0)
                     - (nu + 1.0) / 2.0 * T.log1p(lam * (value - mu)**2 / nu),
                     lam > 0, nu > 0, sd > 0)
コード例 #49
0
ファイル: discrete.py プロジェクト: aasensio/pymc3
    def logp(self, value):
        psi = self.psi
        theta = self.theta

        logp_val = tt.switch(tt.gt(value, 0),
                     tt.log(psi) + self.pois.logp(value),
                     logaddexp(tt.log1p(-psi), tt.log(psi) - theta))

        return bound(logp_val,
            0 <= value,
            0 <= psi, psi <= 1,
            0 <= theta)
コード例 #50
0
ファイル: math.py プロジェクト: alexander-belikov/pymc3
def log1mexp(x):
    """Return log(1 - exp(-x)).

    This function is numerically more stable than the naive approch.

    For details, see
    https://cran.r-project.org/web/packages/Rmpfr/vignettes/log1mexp-note.pdf
    """
    return tt.switch(
        tt.lt(x, 0.683),
        tt.log(-tt.expm1(-x)),
        tt.log1p(-tt.exp(-x)))
コード例 #51
0
ファイル: discrete.py プロジェクト: aasensio/pymc3
    def logp(self, value):
        alpha = self.alpha
        mu = self.mu
        psi = self.psi

        logp_val = tt.switch(tt.gt(value, 0),
                     tt.log(psi) + self.nb.logp(value),
                     logaddexp(tt.log1p(-psi), tt.log(psi) + alpha * (tt.log(alpha) - tt.log(alpha + mu))))

        return bound(logp_val,
                    0 <= value,
                    0 <= psi, psi <= 1,
                    mu > 0, alpha > 0)
コード例 #52
0
ファイル: NetworkCtcLayer.py プロジェクト: atuxhe/returnn
def log_sum(a, axis=None, keepdims=False):
  if axis is None:
    assert keepdims is False  # not implemented atm
    return log_sum(a.flatten(), axis=0)
  assert isinstance(axis, int)  # current implementation only for exactly one axis
  m, argm = T.max_and_argmax(a, axis=axis, keepdims=True)
  exp_a = T.exp(a - m)
  idx = T.arange(a.shape[axis]).dimshuffle(['x'] * axis + [0] + ['x'] * (a.ndim - axis - 1))
  exp_a = T.switch(T.eq(idx, argm), 0, exp_a)
  sum = T.sum(exp_a, axis=axis, keepdims=True)
  res = m + T.log1p(sum)
  if not keepdims:
    if axis is not None:
      res = res.dimshuffle([i for i in range(res.ndim) if i != axis])
    else:
      res = res.dimshuffle()  # expect a scalar
  return res
コード例 #53
0
ファイル: neurons.py プロジェクト: hunse/mnist-nengo
def s_softrelu(x, sigma):
    import theano.tensor as tt
    y = x / sigma
    return tt.switch(y < 34.0, sigma * tt.log1p(tt.exp(y)), x)
コード例 #54
0
ファイル: neurons.py プロジェクト: hunse/mnist-nengo
def s_lif(x, tau_ref, tau_rc, gain, bias, amp):
    import theano.tensor as tt
    j = gain * x + bias - 1
    v = amp / (tau_ref + tau_rc * tt.log1p(1. / j))
    return tt.switch(j > 0, v, 0.0)
コード例 #55
0
ファイル: neurons.py プロジェクト: hunse/mnist-nengo
def s_softlif(x, sigma, tau_ref, tau_rc, gain, bias, amp):
    import theano.tensor as tt
    j = gain * x + bias - 1
    j = s_softrelu(j, sigma)
    v = amp / (tau_ref + tau_rc * tt.log1p(1. / j))
    return tt.switch(j > 0, v, 0.0)
コード例 #56
0
ファイル: continuous.py プロジェクト: gurganious/pymc3
 def logp(self, value):
     beta = self.beta
     return bound(T.log(2) - T.log(np.pi) - T.log(beta)
                  - T.log1p((value / beta)**2),
                  value >= 0, beta > 0)
コード例 #57
0
ファイル: continuous.py プロジェクト: gurganious/pymc3
 def logp(self, value):
     alpha = self.alpha
     beta = self.beta
     return bound(- T.log(np.pi) - T.log(beta)
                  - T.log1p(((value - alpha) / beta)**2),
                  beta > 0)
コード例 #58
0
ファイル: ctc.py プロジェクト: igul222/Marmot
def _log_add(log_a, log_b):
    """Theano expression for log(a+b) given log(a) and log(b)."""
    # TODO fix potential axis bug here!!! (it might be subtracting the wrong vals)
    smaller = T.minimum(log_a, log_b)
    larger = T.maximum(log_a, log_b)
    return larger + T.log1p(T.exp(smaller - larger))
コード例 #59
0
ファイル: ctc_cost.py プロジェクト: trungnt13/Lasagne
def _log_add(a, b):
    # TODO: move functions like this to utils
    max_ = tensor.maximum(a, b)
    result = max_ + tensor.log1p(tensor.exp(a + b - 2 * max_))
    return T.switch(T.isnan(result), max_, result)