예제 #1
0
파일: test_inv.py 프로젝트: zwcdp/chainer
    def test_singular_gpu(self):
        x = chainer.Variable(
            cuda.to_gpu(numpy.zeros((1, 2, 2), dtype=numpy.float32)))

        # Should raise exception only when debug mode.
        with chainer.using_config('debug', False):
            functions.batch_inv(x)

        with chainer.using_config('debug', True):
            with self.assertRaises(ValueError):
                functions.batch_inv(x)
예제 #2
0
def pixel2cam(K, p, z, D=None):
    """Convert pixel coordinates to 3D points

    Args:
        K (:class `~chainer.Variable` or :ref:`ndarray`):
            A 2-D array of shape `(B, 3, 3)`.
            Camera matrices.

        p (:class `~chainer.Variable` or :ref:`ndarray`):
            A 2-D array of shape `(B, 2, N)`.
            Pixel coordinates u and v.

        z (:class `~chainer.Variable` or :ref:`ndarray`):
            A 1-D array of length `(B, N)`.
            z-coordinates of corresponding (u, v).

        D (:class `~chainer.Variable` or :ref:`ndarray`):
            Distortion coefficients.
            A 2-D array of shape `(B, K)`
            K is 4 or 5 or 8. The elements corresponds to
            (k1, k2, p1, p2, [k3, [k4, k5 k6]])
            respectively.

    Returns:
        ~chainer.Variable:
            A 2-D array of shape `(B, 3, N)`.
            3D points x, y and z.
    """
    B, _, N = p.shape
    z = z[:, None, :]
    q = F.batch_inv(K) @ to_homogenous(p)
    if D is not None:
        q = undistort_points(D, q[:, :2, :])
        q = to_homogenous(q)
    return z * q
예제 #3
0
def compute_shifts(cell, pbc, cutoff):
    xp = cell.xp
    reciprocal_cell = F.batch_inv(cell)
    inv_distances = F.max(F.sqrt(F.sum(reciprocal_cell**2, axis=1)), axis=0)
    num_repeats = F.ceil(cutoff * inv_distances)
    num_repeats = F.where(pbc, num_repeats, xp.zeros_like(num_repeats.data))
    num_repeats = F.max(num_repeats, axis=0)
    r1 = xp.arange(1, num_repeats.data[0] + 1)
    r2 = xp.arange(1, num_repeats.data[1] + 1)
    r3 = xp.arange(1, num_repeats.data[2] + 1)
    o = xp.zeros(1, dtype=r1.dtype)
    return F.vstack([
        xp.array([[0.0, 0.0, 0.0]]),
        cartesian_prod(r1, r2, r3),
        cartesian_prod(r1, r2, o),
        cartesian_prod(r1, r2, -r3),
        cartesian_prod(r1, o, r3),
        cartesian_prod(r1, o, o),
        cartesian_prod(r1, o, -r3),
        cartesian_prod(r1, -r2, r3),
        cartesian_prod(r1, -r2, o),
        cartesian_prod(r1, -r2, -r3),
        cartesian_prod(o, r2, r3),
        cartesian_prod(o, r2, o),
        cartesian_prod(o, r2, -r3),
        cartesian_prod(o, o, r3),
    ]).data
예제 #4
0
def undistort_image(K, coef, image):
    """Apply undistortion traosformation to given images.

    Args:
        K (:class `~chainer.Variable` or :ref:`ndarray`):
            A 2-D array of shape `(B, 3, 3)`.
            Camera matrices.

        coef (:class `~chainer.Variable` or :ref:`ndarray`):
            Distortion coefficients.
            A 2-D array of shape `(B, K)`
            K is 4 or 5 or 8. The elements corresponds to
            (k1, k2, p1, p2, [k3, [k4, k5 k6]])
            respectively.

        image (:class `~chainer.Variable` or :ref:`ndarray`):
            A 3-D array of shape `(B, C, H, W)`

    Returns:
        ~chainer.Variable:
            The distorted image.
            A 3-D array of shape `(B, C, H, W)`
    """
    xp = backend.get_array_module(coef)
    B, _, H, W = image.shape

    ps1 = pixel_coords(xp, H, W, coef.dtype).reshape(1, 2, -1)
    ps1 = F.batch_matmul(F.batch_inv(K), to_homogenous(ps1))[:, :2, :]
    ps0 = distort_points(coef, ps1)
    ps0 = F.batch_matmul(K, to_homogenous(ps0))[:, :2, :]
    return warp_dense(image, ps0.reshape(1, 2, H, W))
def pixel2cam(depthes, pixel_coords, intrinsics, im_shape, xp=np):
    """Converter from pixel coordinates to camera coordinates.

    Args:
        depthes(Variable): Shape is (N, 3, H*W)
        pixel_coords(array): Shape is (N, 3, H*W)
        intrinsics(array): Shape is (N, 3, 3)
    Returns:
        cam_coords(Variable): Shape is (N, 3, H*W)
    """
    N, _, H, W = im_shape
    cam_coords = F.batch_matmul(F.batch_inv(intrinsics), pixel_coords)
    cam_coords = depthes * cam_coords
    cam_coords = F.concat((cam_coords, xp.ones((N, 1, H * W), 'f')), axis=1)
    return cam_coords
예제 #6
0
def inverse_affine(A, t, x):
    """Compute A^-1(x-t)

    Args:
        A (:class `~chainer.Variable` or :ref:`ndarray`):
            A 3-D array of shape `(B, M, M)`
        t (:class `~chainer.Variable` or :ref:`ndarray`):
            A 2-D array of shape `(B, M)`
        x (:class `~chainer.Variable` or :ref:`ndarray`):
            A 2-D array of shape `(B, M, N)`

    Returns:
        ~chainer.Variable:
            A 2-D array of shape `(B, M, N)`
    """

    return F.batch_inv(A) @ (x - F.expand_dims(t, axis=2))
예제 #7
0
 def test_identity_gpu(self):
     eye = cuda.to_gpu(_make_eye(self.x.shape))
     x = chainer.Variable(cuda.to_gpu(self.x))
     y = functions.matmul(x, functions.batch_inv(x))
     testing.assert_allclose(y.data, eye, **self.check_forward_options)
예제 #8
0
 def check_forward(self, x_data, atol=1e-7, rtol=1e-7):
     x = chainer.Variable(x_data)
     y = functions.batch_inv(x)
     testing.assert_allclose(_inv(self.x), y.data,
                             **self.check_forward_options)
예제 #9
0
    def backward(self):
        """ LQR backward recursion
        Note: Ks ks is reversed version fo original
        :return: Ks, ks gain
        """
        Ks = []
        ks = []
        Vt = None
        vt = None
        # self.T-1 to 0 loop
        for t in range(self.T - 1, -1, -1):
            # initial case
            if t == self.T - 1:
                Qt = self.C[t]
                qt = self.c[t]
            else:
                Ft = self.F[t]
                Ft_T = F.transpose(Ft, axes=(0, 2, 1))
                assert Ft.dtype.kind == 'f', "Ft dtype"
                assert Vt.dtype.kind == 'f', "Vt dtype"
                Qt = self.C[t] + F.matmul(F.matmul(Ft_T, Vt), Ft)
                if self.f is None:
                    # NOTE f.nelement() == 0 condition ?
                    qt = self.c[t] + bmv(Ft_T, vt)
                else:
                    # f is not none
                    ft = self.f[t]
                    qt = self.c[t] + bmv(F.matmul(Ft_T, Vt), ft) + bmv(Ft_T, vt)
            assert list(qt.shape) == [self.n_batch, self.n_sc], "qt dim mismatch"
            assert list(Qt.shape) == [self.n_batch, self.n_sc, self.n_sc], str(Qt.shape) + " Qt dim mismatch"
            Qt_xx = Qt[:, :self.n_state, :self.n_state]
            Qt_xu = Qt[:, :self.n_state, self.n_state:]
            Qt_ux = Qt[:, self.n_state:, :self.n_state]
            Qt_uu = Qt[:, self.n_state:, self.n_state:]
            qt_x = qt[:, :self.n_state]
            qt_u = qt[:, self.n_state:]
            assert list(Qt_uu.shape) == [self.n_batch, self.n_ctrl, self.n_ctrl], "Qt_uu dim mismatch"
            assert list(Qt_ux.shape) == [self.n_batch, self.n_ctrl, self.n_state], "Qt_ux dim mismatch"
            assert list(Qt_xu.shape) == [self.n_batch, self.n_state, self.n_ctrl], "Qt_xu dim mismatch"
            assert list(qt_x.shape) == [self.n_batch, self.n_state], "qt_x dim mismatch"
            assert list(qt_u.shape) == [self.n_batch, self.n_ctrl], "qt_u dim mismatch"
            # Next calculate Kt and kt
            # TODO LU decomposition
            if self.n_ctrl == 1 and self.u_zero_Index is None:
                # scalar
                Kt = - (1. / Qt_uu) * Qt_ux
                kt = - (1. / F.squeeze(Qt_uu, axis=2)) * qt_u
            elif self.u_zero_Index is None:
                # matrix
                Qt_uu_inv = F.batch_inv(Qt_uu)
                Kt = - F.matmul(Qt_uu_inv, Qt_ux)
                kt = - bmv(Qt_uu_inv, qt_u)
            else:
                # u_zero_index is not none
                index = self.u_zero_Index[t]
                qt_u_ = copy.deepcopy(qt_u)
                qt_u_ = F.where(index, self.xp.zeros_like(qt_u_.array), qt_u_)
                Qt_uu_ = copy.deepcopy(Qt_uu)
                notI = 1.0 - F.cast(index, qt_u_.dtype)
                Qt_uu_I = 1 - bger(notI, notI)
                Qt_uu_I = F.cast(Qt_uu_I, 'bool')
                Qt_uu_ = F.where(Qt_uu_I, self.xp.zeros_like(Qt_uu_.array), Qt_uu_)
                index_qt_uu = self.xp.array([self.xp.diagflat(index[i]) for i in range(index.shape[0])])
                Qt_uu_ = F.where(F.cast(index_qt_uu, 'bool'), Qt_uu + 1e-8, Qt_uu)
                Qt_ux_ = copy.deepcopy(Qt_ux)
                index_qt_ux = F.repeat(F.expand_dims(index, axis=2), Qt_ux.shape[2], axis=2)
                Qt_ux_ = F.where(index_qt_ux, self.xp.zeros_like(Qt_ux_.array), Qt_ux)
                #  print("qt_u_", qt_u_)
                #  print("Qt_uu_", Qt_uu_)
                #  print("Qt_ux_", Qt_ux_)
                if self.n_ctrl == 1:
                    Kt = - (1. / Qt_uu_) * Qt_ux_  # NOTE different from original
                    kt = - (1. / F.squeeze(Qt_uu_, axis=2)) * qt_u_
                else:
                    Qt_uu_LU_ = batch_lu_factor(Qt_uu_)
                    Kt = - batch_lu_solve(Qt_uu_LU_, Qt_ux_)
                    kt = - batch_lu_solve(Qt_uu_LU_, qt_u_)
            assert list(Kt.shape) == [self.n_batch, self.n_ctrl, self.n_state], "Kt dim mismatch"
            assert list(kt.shape) == [self.n_batch, self.n_ctrl], "kt dim mismatch"
            Kt_T = F.transpose(Kt, axes=(0, 2, 1))
            Ks.append(Kt)
            ks.append(kt)
            Vt = Qt_xx + F.matmul(Qt_xu, Kt) + F.matmul(Kt_T, Qt_ux) + F.matmul(F.matmul(Kt_T, Qt_uu), Kt)
            vt = qt_x + bmv(Qt_xu, kt) + bmv(Kt_T, qt_u) + bmv(F.matmul(Kt_T, Qt_uu), kt)

        assert len(Ks) == self.T, "Ks length error"

        Ks.reverse()
        ks.reverse()
        return Ks, ks
예제 #10
0
파일: test_inv.py 프로젝트: zwcdp/chainer
 def test_singular_cpu(self):
     x = chainer.Variable(numpy.zeros((1, 2, 2), dtype=numpy.float32))
     with self.assertRaises(ValueError):
         functions.batch_inv(x)
예제 #11
0
파일: test_inv.py 프로젝트: zwcdp/chainer
 def test_identity(self, backend_config):
     x, = self.generate_inputs()
     x = chainer.Variable(backend_config.get_array(x))
     y = functions.matmul(x, functions.batch_inv(x))
     testing.assert_allclose(
         y.data, _make_eye(x.shape), **self.check_forward_options)
예제 #12
0
파일: test_inv.py 프로젝트: asi1024/chainer
 def test_identity_gpu(self):
     eye = cuda.to_gpu(_make_eye(self.x.shape))
     x = chainer.Variable(cuda.to_gpu(self.x))
     y = functions.matmul(x, functions.batch_inv(x))
     testing.assert_allclose(
         y.data, eye, **self.check_forward_options)
예제 #13
0
 def test_invalid_ndim(self):
     with self.assertRaises(TypeError):
         functions.batch_inv(chainer.Variable(numpy.zeros(2, 2)))
예제 #14
0
 def check_forward(self, x_data, atol=1e-7, rtol=1e-7):
     x = chainer.Variable(x_data)
     y = functions.batch_inv(x)
     gradient_check.assert_allclose(
         _inv(self.x), y.data, atol=atol, rtol=rtol)
예제 #15
0
 def test_identity_gpu(self):
     eye = cuda.to_gpu(_make_eye(self.x.shape))
     x = chainer.Variable(cuda.to_gpu(self.x))
     y = functions.batch_matmul(x, functions.batch_inv(x))
     gradient_check.assert_allclose(y.data, eye, rtol=1e-4, atol=1e-4)
예제 #16
0
 def test_identity_cpu(self):
     eye = _make_eye(self.x.shape)
     x = chainer.Variable(self.x)
     y = functions.batch_matmul(x, functions.batch_inv(x))
     gradient_check.assert_allclose(y.data, eye,
                                    **self.check_forward_options)
예제 #17
0
def undistort_points(coef, p, iteration=5):
    """Remove distortion from given points.

    Args:
        coef (:class `~chainer.Variable` or :ref:`ndarray`):
            Distortion coefficients.
            A 2-D array of shape `(B, K)`
            K is 4 or 5 or 8. The elements corresponds to
            (k1, k2, p1, p2, [k3, [k4, k5 k6]])
            respectively.

        p (:class `~chainer.Variable` or :ref:`ndarray`):
            A 3-D array of shape `(B, 2, N)`
            
    Returns:
        ~chainer.Variable:
            A 3-D array of shape `(B, 2, N)`
    """

    xp = backend.get_array_module(p)
    B, _, N = p.shape
    _, K = coef.shape
    if K < 8:
        coef = F.pad(coef, ((0, 0), (0, 8 - K)), 'constant')

    # (B, 8) -> (B, 1, 8)
    coef = coef[:, None, :]

    k1 = coef[:, :, 0:1]
    k2 = coef[:, :, 1:2]
    p1 = coef[:, :, 2:3]
    p2 = coef[:, :, 3:4]
    k3 = coef[:, :, 4:5]
    k4 = coef[:, :, 5:6]
    k5 = coef[:, :, 6:7]
    k6 = coef[:, :, 7:8]

    # (B, 2, N) -> (B, N, 2)
    p = p.transpose((0, 2, 1))
    r2 = F.sum(p * p, 2, keepdims=True)  # r^2

    # Compute initial guess
    X = (1 - r2 * (k1 + r2 * (3 * k1**2 - k2 + r2 *
                              (8 * k1 * k2 - 12 * k1**3 - k3)))) * p

    # Refinement by Newton-Raphson method
    for i in range(iteration):
        x = X[:, :, 0:1]
        y = X[:, :, 1:2]
        xy = F.prod(X, 2, keepdims=True)
        r2 = F.sum(X * X, 2, keepdims=True)  # r^2
        a = 1 + r2 * (k1 + r2 * (k2 + r2 * k3))
        b = 1 + r2 * (k4 + r2 * (k5 + r2 * k6))
        da = k1 + r2 * (2 * k2 + 3 * r2 * k3)
        db = k4 + r2 * (2 * k5 + 3 * r2 * k6)

        g = a / b
        dg = (da * b - a * db) / b**2

        J00 = g + 2 * x**2 * dg + 2 * y * p1 + 6 * x * p2
        J11 = g + 2 * y**2 * dg + 2 * x * p2 + 6 * y * p1
        J01 = 2 * F.prod(x, 2, keepdims=True) * dg + 2 * x * p1 + 2 * y * p2
        jacobian = F.stack([J00, J01, J01, J11], axis=2).reshape(B, N, 2, 2)

        d = g * X + 2 * xy * coef[:, :, 2:4] + coef[:, :,
                                                    3:1:-1] * (r2 + 2 * X * X)

        jacobian = jacobian.reshape(-1, 2, 2)
        f = (d - p).reshape(-1, 2)
        X = X - F.batch_matmul(F.batch_inv(jacobian), f).reshape(B, N, 2)

    # (B, N, 2) -> (B, 2, N)
    return X.transpose((0, 2, 1))
예제 #18
0
 def test_invalid_shape(self):
     with self.assertRaises(TypeError):
         functions.batch_inv(chainer.Variable(numpy.zeros(1, 2, 1)))
예제 #19
0
파일: test_inv.py 프로젝트: asi1024/chainer
 def check_forward(self, x_data):
     x = chainer.Variable(x_data)
     y = functions.batch_inv(x)
     x1 = self.x.astype(self.check_forward_dtype, copy=False)
     testing.assert_allclose(_inv(x1), y.data, **self.check_forward_options)
예제 #20
0
 def check_forward(self, x_data, atol=1e-7, rtol=1e-7):
     x = chainer.Variable(x_data)
     y = functions.batch_inv(x)
     testing.assert_allclose(
         _inv(self.x), y.data, **self.check_forward_options)
예제 #21
0
파일: test_inv.py 프로젝트: zwcdp/chainer
 def forward(self, inputs, device):
     x, = inputs
     return functions.batch_inv(x),
예제 #22
0
 def test_invalid_ndim(self):
     with self.assertRaises(TypeError):
         functions.batch_inv(chainer.Variable(numpy.zeros(2, 2)))
예제 #23
0
파일: test_inv.py 프로젝트: zwcdp/chainer
 def test_invalid_shape(self):
     x = chainer.Variable(numpy.zeros((1, 2, 1), dtype=numpy.float32))
     with self.assertRaises(type_check.InvalidType):
         functions.batch_inv(x)
예제 #24
0
 def test_invalid_shape(self):
     with self.assertRaises(TypeError):
         functions.batch_inv(chainer.Variable(numpy.zeros(1, 2, 1)))
예제 #25
0
파일: net.py 프로젝트: KaijiS/DAGMM
    def fwd(self, x, isTraining=False, gpu=-1):
        if gpu >= 0:
            xp = cuda.cupy
        else:
            xp = np

        # Compression Network
        # エンコード
        zc = self.Encoder(x)
        # デコード
        y = self.Decoder(zc)

        # 再構築誤差を計算 relativeユークリッド距離とコサイン類似度
        rEucDist = self.relativeEuclideanDistance(x, y)
        CosSim = self.cosineSimilarity(x, y)
        #潜在変数と再構築誤差を合体
        z = F.concat((zc, rEucDist, CosSim), axis=1)

        # Estimation Network
        gamma = self.Estimation(z)

        NumOfData, NumOfClass = gamma.shape
        _, zDim = z.shape

        # GMM
        # 各サンプルの各分布への帰属確率から、各分布の混合比、平均ベクトル、共分散行列を得る
        if chainer.config.train:
            phi = self._phi(gamma)
            mu = self._mu(z, gamma)
            sigma = self._sigma(z, gamma, mu)

            os.makedirs(self.dirGMMparameters, exist_ok=True)
            if chainer.cuda.available:
                np.savetxt(self.dirGMMparameters + "phi.csv",
                           np.array((phi.data).get()),
                           delimiter=",")
                np.savetxt(self.dirGMMparameters + "mu.csv",
                           np.array((mu.data).get()),
                           delimiter=",")
                np.savetxt(self.dirGMMparameters + "sigma.csv",
                           np.array(
                               (sigma.data).get()).reshape(NumOfClass, -1),
                           delimiter=",")
            else:
                np.savetxt(self.dirGMMparameters + "phi.csv",
                           np.array(phi.data),
                           delimiter=",")
                np.savetxt(self.dirGMMparameters + "mu.csv",
                           np.array(mu.data),
                           delimiter=",")
                np.savetxt(self.dirGMMparameters + "sigma.csv",
                           np.array(sigma.data).reshape(NumOfClass, -1),
                           delimiter=",")

        else:
            phi = Variable(
                xp.array(
                    np.loadtxt(self.dirGMMparameters + "phi.csv",
                               delimiter=",").astype(np.float32)))
            mu = Variable(
                xp.array(
                    np.loadtxt(self.dirGMMparameters + "mu.csv",
                               delimiter=",").astype(np.float32)))
            sigma = Variable(
                xp.array((np.loadtxt(self.dirGMMparameters + "sigma.csv",
                                     delimiter=",")).reshape(
                                         NumOfClass, zDim,
                                         zDim).astype(np.float32)))

        # エネルギーを計算
        # eps = 1e-3 #ランク落ちまたは、行列式が0になってしまう対策
        # sigma = sigma + Variable(xp.array(np.array(list(np.eye(zDim))*NumOfClass).reshape(NumOfClass,zDim,zDim).astype(np.float32)) * eps)
        sigmaInv = F.batch_inv(sigma)  # shape(3D) -> NumOfClass, zDim, zDim
        z_broadcast = F.broadcast_to(
            z, (NumOfClass, NumOfData,
                zDim))  # shape(3D) -> NumOfClass, NumOfData, zDim
        mu_broadcast = F.transpose(F.broadcast_to(
            mu, (NumOfData, NumOfClass, zDim)),
                                   axes=(1, 0, 2))  # shape(3D) -> NumOfClass,
        sa = z_broadcast - mu_broadcast
        listForEnr1 = [
            F.matmul(sa[i], sigmaInv[i]) for i in range(NumOfClass)
        ]  # shape(3D) -> NumOfClass, NumOfData, zDim
        listForEnr2 = [
            F.sum(listForEnr1[i] * sa[i], axis=1) for i in range(NumOfClass)
        ]  # shape(2D) -> NumOfClass, NumOfData
        varForEnr = F.stack(
            [listForEnr2[i] for i in range(len(listForEnr2))],
            axis=0)  # リストからVariableへ変換 # shape(2D) -> NumOfClass, NumOfData
        numer = F.exp(-(1 / 2) *
                      varForEnr)  # 分子の計算  # shape(2D) -> NumOfClass, NumOfData
        denom = F.transpose(
            F.broadcast_to(
                F.sqrt(F.batch_det(2 * math.pi * sigma)),
                (NumOfData,
                 NumOfClass)))  # 分母の計算  # shape(2D) -> NumOfClass, NumOfData
        phi_broadcast = F.transpose(
            F.broadcast_to(
                phi,
                (NumOfData, NumOfClass)))  # shape(2D) -> NumOfClass, NumOfData
        energy = -1 * F.log(
            F.sum(phi_broadcast * (numer / denom), axis=0,
                  keepdims=True))  # shape(2D) -> 1, NumOfData
        energy = F.transpose(energy)  # shape(2D) -> NumOfData,1

        if isTraining:
            return y, energy, sigma
        else:
            return z, energy, gamma, y
예제 #26
0
 def check_forward(self, x_data):
     x = chainer.Variable(x_data)
     y = functions.batch_inv(x)
     x1 = self.x.astype(self.check_forward_dtype, copy=False)
     testing.assert_allclose(_inv(x1), y.data, **self.check_forward_options)