def compute_log_determinant(self, x, W):
     h, w = x.shape[2:]
     W = self.conv.W
     det = cf.det(W)
     if det.data == 0:
         det += 1e-16  # avoid nan
     return h * w * cf.log(abs(det))
Пример #2
0
 def test_answer_gpu_cpu(self):
     x = cuda.to_gpu(self.x)
     y = F.det(chainer.Variable(x))
     gpu = cuda.to_cpu(y.data)
     if self.dtype == numpy.float16:
         cpu = numpy.linalg.det(self.x.astype(numpy.float32)).astype(
             numpy.float16)
         testing.assert_allclose(gpu, cpu, atol=5e-3, rtol=5e-3)
     else:
         cpu = numpy.linalg.det(self.x)
         testing.assert_allclose(gpu, cpu)
Пример #3
0
 def test_answer_gpu_cpu(self):
     x = cuda.to_gpu(self.x)
     y = F.det(chainer.Variable(x))
     gpu = cuda.to_cpu(y.data)
     if self.dtype == numpy.float16:
         cpu = numpy.linalg.det(
             self.x.astype(numpy.float32)).astype(numpy.float16)
         testing.assert_allclose(gpu, cpu, atol=5e-3, rtol=5e-3)
     else:
         cpu = numpy.linalg.det(self.x)
         testing.assert_allclose(gpu, cpu)
Пример #4
0
    def _kdeparts(self, input_obs, input_ins):
        """ Multivariate Kernel Density Estimation (KDE) with Gaussian kernels on the given random variables.
            INPUT:
                input_obs - Variable of input observation random variables to estimate density
                input_ins - Variable of input data instance to calculate the probability value
            OUTPUT:
                const - Constant term in the Gaussian KDE expression
                energy - Expressions in the exponential to calculate Gaussian KDE (energy wrt. every obs. point)
        """
        [n, d] = input_obs.shape

        # Compute Kernel Bandwidth Matrix based on Silverman's Rule of Thumb
        silverman_factor = np.power(n * (d + 2.0) / 4.0, -1. / (d + 4))
        input_centered = input_obs - F.mean(input_obs, axis=0, keepdims=True)
        data_covariance = F.matmul(F.transpose(input_centered), input_centered) / n
        kernel_bw = F.diagonal(data_covariance) * (silverman_factor ** 2) * np.eye(d, d)
        const = 1 / (n * ((2 * np.pi) ** (d/2)) * F.sqrt(F.det(kernel_bw)))

        # Compute energy expressions in the exponent for every observation point
        diff = input_obs - input_ins
        energy = -0.5 * F.diagonal(F.matmul(F.matmul(diff, F.inv(kernel_bw)), F.transpose(diff)))

        return const, energy
Пример #5
0
 def test_answer_gpu_cpu(self):
     x = cuda.to_gpu(self.x)
     y = F.det(chainer.Variable(x))
     gpu = cuda.to_cpu(y.data)
     cpu = numpy.linalg.det(self.x)
     testing.assert_allclose(gpu, cpu)
Пример #6
0
 def check_by_definition(self, x):
     ans = F.det(chainer.Variable(x)).data
     y = x[0, 0] * x[1, 1] - x[0, 1] * x[1, 0]
     testing.assert_allclose(ans, y)
Пример #7
0
 def test_answer_gpu_cpu(self):
     x = cuda.to_gpu(self.x)
     y = F.det(chainer.Variable(x))
     gpu = cuda.to_cpu(y.data)
     cpu = numpy.linalg.det(self.x)
     gradient_check.assert_allclose(gpu, cpu)
Пример #8
0
 def check_by_definition(self, x):
     ans = F.det(chainer.Variable(x)).data
     y = x[0, 0] * x[1, 1] - x[0, 1] * x[1, 0]
     gradient_check.assert_allclose(ans, y)
Пример #9
0
 def __call__(self, x):
     return F.convolution_1d(x, self.W), \
         x.shape[0] * x.shape[-1] * F.log(F.det(self.W[..., 0]))
Пример #10
0
 def check_by_definition(self, x):
     ans = F.det(chainer.Variable(x)).data
     y = x[0, 0] * x[1, 1] - x[0, 1] * x[1, 0]
     testing.assert_allclose(ans, y, **self.check_forward_options)