def logp(self, value): if self._is_logit: lp = tt.switch(value, self._logit_p, -self._logit_p) return -log1pexp(-lp) else: p = self.p return bound(tt.switch(value, tt.log(p), tt.log(1 - p)), value >= 0, value <= 1, p >= 0, p <= 1)
def logp(self, value): if self._is_logit: lp = tt.switch(value, self._logit_p, -self._logit_p) return -log1pexp(-lp) else: p = self.p return bound( tt.switch(value, tt.log(p), tt.log(1 - p)), value >= 0, value <= 1, p >= 0, p <= 1)
def test_log1pexp(): vals = np.array([-1e20, -100, -10, -1e-4, 0, 1e-4, 10, 100, 1e20]) # import mpmath # mpmath.mp.dps = 1000 # [float(mpmath.log(1 + mpmath.exp(x))) for x in vals] expected = np.array([ 0.0, 3.720075976020836e-44, 4.539889921686465e-05, 0.6930971818099453, 0.6931471805599453, 0.6931971818099453, 10.000045398899218, 100.0, 1e+20 ]) actual = log1pexp(vals).eval() npt.assert_allclose(actual, expected)
def test_log1pexp(): vals = np.array([-1e20, -100, -10, -1e-4, 0, 1e-4, 10, 100, 1e20]) # import mpmath # mpmath.mp.dps = 1000 # [float(mpmath.log(1 + mpmath.exp(x))) for x in vals] expected = np.array([ 0.0, 3.720075976020836e-44, 4.539889921686465e-05, 0.6930971818099453, 0.6931471805599453, 0.6931971818099453, 10.000045398899218, 100.0, 1e+20]) actual = log1pexp(vals).eval() npt.assert_allclose(actual, expected)
def logp(self, value): r""" Calculate log-probability of Bernoulli distribution at specified value. Parameters ---------- value: numeric Value(s) for which log-probability is calculated. If the log probabilities for multiple values are desired the values must be provided in a numpy array or theano tensor Returns ------- TensorVariable """ if self._is_logit: lp = tt.switch(value, self._logit_p, -self._logit_p) return -log1pexp(-lp) else: p = self.p return bound(tt.switch(value, tt.log(p), tt.log(1 - p)), value >= 0, value <= 1, p >= 0, p <= 1)