def _check_input(self, loc, scale): loc = self._convert2tensor(loc) scale = self._convert2tensor(scale) if loc.shape != scale.shape: shape = np.broadcast(loc.value, scale.value).shape if loc.shape != shape: loc = broadcast_to(loc, shape) if scale.shape != shape: scale = broadcast_to(scale, shape) return loc, scale
def _check_input(self, x, t): x = self._convert2tensor(x) t = self._convert2tensor(t) if x.shape != t.shape: shape = np.broadcast(x.value, t.value).shape if x.shape != shape: x = broadcast_to(x, shape) if t.shape != shape: t = broadcast_to(t, shape) return x, t
def _check_input(self, x, y): x = self._convert2tensor(x) y = self._convert2tensor(y) if x.shape != y.shape: shape = np.broadcast(x.value, y.value).shape if x.shape != shape: x = broadcast_to(x, shape) if y.shape != shape: y = broadcast_to(y, shape) return x, y
def _check_input(self, shape, rate): shape = self._convert2tensor(shape) rate = self._convert2tensor(rate) if shape.shape != rate.shape: shape_ = np.broadcast(shape.value, rate.value).shape if shape.shape != shape_: shape = broadcast_to(shape, shape_) if rate.shape != shape_: rate = broadcast_to(rate, shape_) return shape, rate
def _check_input(self, x, mu, tau): x = self._convert2tensor(x) mu = self._convert2tensor(mu) tau = self._convert2tensor(tau) if not x.shape == mu.shape == tau.shape: shape = np.broadcast(x.value, mu.value, tau.value).shape if x.shape != shape: x = broadcast_to(x, shape) if mu.shape != shape: mu = broadcast_to(mu, shape) if tau.shape != shape: tau = broadcast_to(tau, shape) return x, mu, tau
def _check_input(self, x, y): x = self._convert2tensor(x) y = self._convert2tensor(y) self._atleast_ndim(x, 2) self._atleast_ndim(y, 2) if x.shape[-1] != y.shape[-2]: raise ValueError( "shapes {} and {} not aligned: {} (dim -1) != {} (dim -2)". format(x.shape, y.shape, x.shape[-1], y.shape[-2])) if x.shape[:-2] != y.shape[:-2]: shape = np.broadcast(x.value[..., 0, 0], y.value[..., 0, 0]).shape if x.shape[:-2] != shape: x = broadcast_to(x, shape + x.shape[-2:]) if y.shape[:-2] != shape: y = broadcast_to(y, shape + y.shape[-2:]) return x, y
def _check_input(self, a, b): a = self._convert2tensor(a) b = self._convert2tensor(b) self._atleast_ndim(a, 2) self._atleast_ndim(b, 2) if a.shape[-2:] != (b.shape[-2], b.shape[-2]): raise ValueError( "Mismatching dimensionality of a and b: {} and {}".format( a.shape[-2:], b.shape[-2:])) if a.shape[:-2] != b.shape[:-2]: shape = np.broadcast(a.value[..., 0, 0], b.value[..., 0, 0]).shape if a.shape[:-2] != shape: a = broadcast_to(a, shape + a.shape[-2:]) if b.shape[:-2] != shape: b = broadcast_to(b, shape + b.shape[-2:]) return a, b
def _check_input(self, coef, mu, std): coef = self._convert2tensor(coef) mu = self._convert2tensor(mu) std = self._convert2tensor(std) if not coef.shape == mu.shape == std.shape: shape = np.broadcast(coef.value, mu.value, std.value).shape if coef.shape != shape: coef = broadcast_to(coef, shape) if mu.shape != shape: mu = broadcast_to(mu, shape) if std.shape != shape: std = broadcast_to(std, shape) self.n_component = coef.shape[self.axis] return coef, mu, std
def test_broadcast(self): x = bn.Parameter(np.ones((1, 1))) shape = (5, 2, 3) y = broadcast_to(x, shape) self.assertEqual(y.shape, shape) y.backward(np.ones(shape)) self.assertTrue((x.grad == np.ones((1, 1)) * 30).all())
def _check_input(self, mu, cov): mu = self._convert2tensor(mu) cov = self._convert2tensor(cov) self._atleast_ndim(mu, 1) self._atleast_ndim(cov, 2) if cov.shape[-2:] != (mu.shape[-1], mu.shape[-1]): raise ValueError( "Mismatching dimensionality of mu and cov: {} and {}" .format(mu.shape[-1], cov.shape[-2:]) ) if mu.shape[:-1] != cov.shape[:-2]: shape = np.broadcast(mu.value[..., 0], cov.value[..., 0, 0]).shape if mu.shape[:-1] != shape: mu = broadcast_to(mu, shape + (mu.shape[-1],)) if cov.shape[:-2] != shape: cov = broadcast_to(cov, shape + cov.shape[-2:]) return mu, cov
def _pdf(self, x): assert x.shape[-1] == self.mu.size if x.ndim == 1: squeeze = True x = broadcast_to(x, (1, self.mu.size)) else: squeeze = False assert x.ndim == 2 d = x - self.mu d = d.transpose() p = ( exp(-0.5 * (solve(self.cov, d) * d).sum(axis=0)) / (2 * np.pi) ** (self.mu.size * 0.5) / sqrt(det(self.cov)) ) if squeeze: p = p.sum() return p
def _log_pdf(self, x): assert x.shape[-1] == self.mu.size if x.ndim == 1: squeeze = True x = broadcast_to(x, (1, self.mu.size)) else: squeeze = False assert x.ndim == 2 d = x - self.mu d = d.transpose() logp = ( -0.5 * (solve(self.cov, d) * d).sum(axis=0) - (self.mu.size * 0.5) * log(2 * np.pi) - 0.5 * logdet(self.cov) ) if squeeze: logp = logp.sum() return logp