Example #1
0
 def test_answer_gpu_cpu(self):
     x = cuda.to_gpu(self.x)
     y = F.batch_det(chainer.Variable(x))
     gpu = cuda.to_cpu(y.data)
     if self.dtype == numpy.float16:
         cpu = numpy.linalg.det(self.x.astype(numpy.float32)).astype(
             numpy.float16)
         testing.assert_allclose(gpu, cpu, atol=5e-3, rtol=5e-3)
     else:
         cpu = numpy.linalg.det(self.x)
         testing.assert_allclose(gpu, cpu)
Example #2
0
 def test_answer_gpu_cpu(self):
     x = cuda.to_gpu(self.x)
     y = F.batch_det(chainer.Variable(x))
     gpu = cuda.to_cpu(y.data)
     if self.dtype == numpy.float16:
         cpu = numpy.linalg.det(
             self.x.astype(numpy.float32)).astype(numpy.float16)
         testing.assert_allclose(gpu, cpu, atol=5e-3, rtol=5e-3)
     else:
         cpu = numpy.linalg.det(self.x)
         testing.assert_allclose(gpu, cpu)
Example #3
0
 def test_invalid_shape(self):
     with self.assertRaises(type_check.InvalidType):
         F.batch_det(chainer.Variable(numpy.zeros((1, 2))))
Example #4
0
 def test_answer_gpu_cpu(self):
     x = cuda.to_gpu(self.x)
     y = F.batch_det(chainer.Variable(x))
     gpu = cuda.to_cpu(y.data)
     cpu = numpy.linalg.det(self.x)
     testing.assert_allclose(gpu, cpu)
Example #5
0
 def test_invalid_shape(self):
     with self.assertRaises(type_check.InvalidType):
         F.batch_det(chainer.Variable(numpy.zeros((1, 2))))
Example #6
0
 def test_answer_gpu_cpu(self):
     x = cuda.to_gpu(self.x)
     y = F.batch_det(chainer.Variable(x))
     gpu = cuda.to_cpu(y.data)
     cpu = numpy.linalg.det(self.x)
     gradient_check.assert_allclose(gpu, cpu)
Example #7
0
File: net.py Project: KaijiS/DAGMM
    def fwd(self, x, isTraining=False, gpu=-1):
        if gpu >= 0:
            xp = cuda.cupy
        else:
            xp = np

        # Compression Network
        # エンコード
        zc = self.Encoder(x)
        # デコード
        y = self.Decoder(zc)

        # 再構築誤差を計算 relativeユークリッド距離とコサイン類似度
        rEucDist = self.relativeEuclideanDistance(x, y)
        CosSim = self.cosineSimilarity(x, y)
        #潜在変数と再構築誤差を合体
        z = F.concat((zc, rEucDist, CosSim), axis=1)

        # Estimation Network
        gamma = self.Estimation(z)

        NumOfData, NumOfClass = gamma.shape
        _, zDim = z.shape

        # GMM
        # 各サンプルの各分布への帰属確率から、各分布の混合比、平均ベクトル、共分散行列を得る
        if chainer.config.train:
            phi = self._phi(gamma)
            mu = self._mu(z, gamma)
            sigma = self._sigma(z, gamma, mu)

            os.makedirs(self.dirGMMparameters, exist_ok=True)
            if chainer.cuda.available:
                np.savetxt(self.dirGMMparameters + "phi.csv",
                           np.array((phi.data).get()),
                           delimiter=",")
                np.savetxt(self.dirGMMparameters + "mu.csv",
                           np.array((mu.data).get()),
                           delimiter=",")
                np.savetxt(self.dirGMMparameters + "sigma.csv",
                           np.array(
                               (sigma.data).get()).reshape(NumOfClass, -1),
                           delimiter=",")
            else:
                np.savetxt(self.dirGMMparameters + "phi.csv",
                           np.array(phi.data),
                           delimiter=",")
                np.savetxt(self.dirGMMparameters + "mu.csv",
                           np.array(mu.data),
                           delimiter=",")
                np.savetxt(self.dirGMMparameters + "sigma.csv",
                           np.array(sigma.data).reshape(NumOfClass, -1),
                           delimiter=",")

        else:
            phi = Variable(
                xp.array(
                    np.loadtxt(self.dirGMMparameters + "phi.csv",
                               delimiter=",").astype(np.float32)))
            mu = Variable(
                xp.array(
                    np.loadtxt(self.dirGMMparameters + "mu.csv",
                               delimiter=",").astype(np.float32)))
            sigma = Variable(
                xp.array((np.loadtxt(self.dirGMMparameters + "sigma.csv",
                                     delimiter=",")).reshape(
                                         NumOfClass, zDim,
                                         zDim).astype(np.float32)))

        # エネルギーを計算
        # eps = 1e-3 #ランク落ちまたは、行列式が0になってしまう対策
        # sigma = sigma + Variable(xp.array(np.array(list(np.eye(zDim))*NumOfClass).reshape(NumOfClass,zDim,zDim).astype(np.float32)) * eps)
        sigmaInv = F.batch_inv(sigma)  # shape(3D) -> NumOfClass, zDim, zDim
        z_broadcast = F.broadcast_to(
            z, (NumOfClass, NumOfData,
                zDim))  # shape(3D) -> NumOfClass, NumOfData, zDim
        mu_broadcast = F.transpose(F.broadcast_to(
            mu, (NumOfData, NumOfClass, zDim)),
                                   axes=(1, 0, 2))  # shape(3D) -> NumOfClass,
        sa = z_broadcast - mu_broadcast
        listForEnr1 = [
            F.matmul(sa[i], sigmaInv[i]) for i in range(NumOfClass)
        ]  # shape(3D) -> NumOfClass, NumOfData, zDim
        listForEnr2 = [
            F.sum(listForEnr1[i] * sa[i], axis=1) for i in range(NumOfClass)
        ]  # shape(2D) -> NumOfClass, NumOfData
        varForEnr = F.stack(
            [listForEnr2[i] for i in range(len(listForEnr2))],
            axis=0)  # リストからVariableへ変換 # shape(2D) -> NumOfClass, NumOfData
        numer = F.exp(-(1 / 2) *
                      varForEnr)  # 分子の計算  # shape(2D) -> NumOfClass, NumOfData
        denom = F.transpose(
            F.broadcast_to(
                F.sqrt(F.batch_det(2 * math.pi * sigma)),
                (NumOfData,
                 NumOfClass)))  # 分母の計算  # shape(2D) -> NumOfClass, NumOfData
        phi_broadcast = F.transpose(
            F.broadcast_to(
                phi,
                (NumOfData, NumOfClass)))  # shape(2D) -> NumOfClass, NumOfData
        energy = -1 * F.log(
            F.sum(phi_broadcast * (numer / denom), axis=0,
                  keepdims=True))  # shape(2D) -> 1, NumOfData
        energy = F.transpose(energy)  # shape(2D) -> NumOfData,1

        if isTraining:
            return y, energy, sigma
        else:
            return z, energy, gamma, y