def log_prob(self, x): scale_tril_inv = \ _batch_triangular_inv(self.scale_tril.reshape(-1, self.d, self.d)) scale_tril_inv = scale_tril_inv.reshape(self.batch_shape + (self.d, self.d)) bsti = broadcast.broadcast_to(scale_tril_inv, x.shape + (self.d, )) bl = broadcast.broadcast_to(self.loc, x.shape) m = matmul.matmul(bsti, expand_dims.expand_dims(x - bl, axis=-1)) m = matmul.matmul(swapaxes.swapaxes(m, -1, -2), m) m = squeeze.squeeze(m, axis=-1) m = squeeze.squeeze(m, axis=-1) logz = LOGPROBC * self.d - self._logdet(self.scale_tril) return broadcast.broadcast_to(logz, m.shape) - 0.5 * m
def log_prob(self, x): scale_tril_inv = \ _batch_triangular_inv(self.scale_tril.reshape(-1, self.d, self.d)) scale_tril_inv = scale_tril_inv.reshape( self.batch_shape+(self.d, self.d)) bsti = broadcast.broadcast_to(scale_tril_inv, x.shape + (self.d,)) bl = broadcast.broadcast_to(self.loc, x.shape) m = matmul.matmul( bsti, expand_dims.expand_dims(x - bl, axis=-1)) m = matmul.matmul(swapaxes.swapaxes(m, -1, -2), m) m = squeeze.squeeze(m, axis=-1) m = squeeze.squeeze(m, axis=-1) logz = LOGPROBC * self.d - self._logdet(self.scale_tril) return broadcast.broadcast_to(logz, m.shape) - 0.5 * m
def sample_n(self, n): if self._is_gpu: eps = cuda.cupy.random.standard_normal( (n,)+self.loc.shape+(1,), dtype=self.loc.dtype) else: eps = numpy.random.standard_normal( (n,)+self.loc.shape+(1,)).astype(numpy.float32) return self.loc + squeeze.squeeze( matmul.matmul(self.scale_tril, eps), axis=-1)
def sample_n(self, n): if self._is_gpu: eps = cuda.cupy.random.standard_normal( (n,)+self.loc.shape+(1,), dtype=self.loc.dtype) else: eps = numpy.random.standard_normal( (n,)+self.loc.shape+(1,)).astype(numpy.float32) noise = matmul.matmul(repeat.repeat( expand_dims.expand_dims(self.scale_tril, axis=0), n, axis=0), eps) noise = squeeze.squeeze(noise, axis=-1) noise += repeat.repeat(expand_dims.expand_dims( self.loc, axis=0), n, axis=0) return noise