Ejemplo n.º 1
0
    def __call__(self, x, mask):

        y = self.fwd(x)
        y_9x9 = F.reshape(y, (9, 9))
        sum_0 = F.sum(y_9x9, axis=0)
        sum_1 = F.sum(y_9x9, axis=1)
        prod_0 = F.prod(y_9x9, axis=0)
        prod_1 = F.prod(y_9x9, axis=1)
        sum_mask_1 = F.square(F.sum(y * self.seg_mask_1) - 45.0)
        sum_mask_2 = F.square(F.sum(y * self.seg_mask_2) - 45.0)
        sum_mask_3 = F.square(F.sum(y * self.seg_mask_3) - 45.0)
        sum_mask_4 = F.square(F.sum(y * self.seg_mask_4) - 45.0)
        sum_mask_5 = F.square(F.sum(y * self.seg_mask_5) - 45.0)
        sum_mask_6 = F.square(F.sum(y * self.seg_mask_6) - 45.0)
        sum_mask_7 = F.square(F.sum(y * self.seg_mask_7) - 45.0)
        sum_mask_8 = F.square(F.sum(y * self.seg_mask_8) - 45.0)
        sum_mask_9 = F.square(F.sum(y * self.seg_mask_9) - 45.0)

        loss = (F.sum(F.sum(F.reshape(F.square((y * mask) - x), (9, 9)), axis=0) +
                     F.square(sum_0-self.y_sum) +
                     F.log(F.square(prod_0-self.y_prod) + 1e-8) +
                     F.square(sum_1-self.y_sum)) +\
               sum_mask_1 + sum_mask_2 + sum_mask_3 + sum_mask_4 + sum_mask_5 + sum_mask_6 + sum_mask_7 + sum_mask_8 + sum_mask_9)
        #F.log(F.square(prod_0-self.y_prod) + 1e-8) +
        #F.log(F.square(prod_1-self.y_prod) + 1e-8))
        return loss
Ejemplo n.º 2
0
def distort_points(coef, x):
    """Apply distortion to given points.

    Args:
        coef (:class `~chainer.Variable` or :ref:`ndarray`):
            Distortion coefficients.
            A 2-D array of shape `(B, K)`
            K is 4 or 5 or 8. The elements corresponds to
            (k1, k2, p1, p2, [k3, [k4, k5 k6]])
            respectively.

        x (:class `~chainer.Variable` or :ref:`ndarray`):
            A 3-D array of shape `(B, 2, N)`
            
    Returns:
        ~chainer.Variable:
            A 3-D array of shape `(B, 2, N)`
    """
    xp = backend.get_array_module(x)
    _, K = coef.shape
    if K < 8:
        coef = F.pad(coef, ((0, 0), (0, 8 - K)), 'constant')
    coef = coef[:, :, None]

    # Compute
    # f = (1 + k1r^2 + k2r^4 + k3r^6) / (1 + k4r^2 + k5r^4 + k6r^6)
    r2 = F.sum(x * x, 1, keepdims=True)  # r^2
    f = (1 + r2 * (coef[:, 0:1] + r2 * (coef[:, 1:2] + r2 * coef[:, 4:5]))) / \
        (1 + r2 * (coef[:, 5:6] + r2 * (coef[:, 6:7] + r2 * coef[:, 7:8])))

    xy = F.prod(x, 1, keepdims=True)

    return x * f + 2 * xy * coef[:, 2:4] + coef[:, 3:1:-1] * (r2 + 2 * x * x)
Ejemplo n.º 3
0
    def __call__(self, x, mask):

        y = self.fwd(x)
        sum_0 = F.sum(y)
        prod_0 = F.prod(y)
        loss = (F.sum(F.square((y * mask) - x)) + F.square(sum_0 - 45.))
        #F.log(F.square(prod_0 - 362880.) + 1e-8))
        return loss
Ejemplo n.º 4
0
def _gau_kl(p_mu, p_ln_var, q_mu, q_ln_var):
    """
    Kullback-Liebler divergence from Gaussian p_mu,p_ln_var to Gaussian q_mu,q_ln_var.
    Diagonal covariances are assumed.  Divergence is expressed in nats.
    """
    p_var = F.exp(p_ln_var)
    q_var = F.exp(q_ln_var)
    # Determinants of diagonal covariances p_var, q_var
    dp_var = F.prod(p_var, axis=0)
    dq_var = F.prod(q_var, axis=0)
    # Inverse of diagonal covariance q_var
    iq_var = 1. / q_var
    # Difference between means p_mu, q_mu
    diff = q_mu - p_mu
    return F.sum(0.5 * (
        F.log(dq_var / dp_var)  # log |\Sigma_q| / |\Sigma_p|
        + F.sum(iq_var * p_var, axis=0)  # + tr(\Sigma_q^{-1} * \Sigma_p)
        + F.sum(diff * iq_var * diff,
                axis=0)  # + (\mu_q-\mu_p)^T\Sigma_q^{-1}(\mu_q-\mu_p)
        - len(p_mu)))  # - N
Ejemplo n.º 5
0
    def check_forward(self, x_data):
        x = chainer.Variable(x_data)
        y = functions.prod(x, axis=self.axis, keepdims=self.keepdims)
        self.assertEqual(y.data.dtype, self.dtype)
        y_expect = self.x.prod(axis=self.axis, keepdims=self.keepdims)

        if self.dtype == numpy.float16:
            options = {'atol': 1e-3, 'rtol': 1e-3}
        else:
            options = {}

        testing.assert_allclose(y_expect, y.data, **options)
Ejemplo n.º 6
0
    def check_forward(self, x_data):
        x = chainer.Variable(x_data)
        y = functions.prod(x, axis=self.axis, keepdims=self.keepdims)
        self.assertEqual(y.data.dtype, self.dtype)
        y_expect = self.x.prod(axis=self.axis, keepdims=self.keepdims)

        if self.dtype == numpy.float16:
            options = {'atol': 1e-3, 'rtol': 1e-3}
        else:
            options = {}

        testing.assert_allclose(y_expect, y.data, **options)
Ejemplo n.º 7
0
    def __call__(self, x, mask):

        y = self.fwd(x)
        sum_0 = F.sum(y)
        prod_0 = F.prod(y)
        diff_total = Variable(np.array([0.]))
        for i in range(8):
            for j in range(i + 1, 9):
                diff_total += (y.data[0][i] - y.data[0][j])**2
        loss = (  #F.sum(F.square((y * mask) - x)) +
            #F.square(sum_0 - 45.) +
            F.log(F.square(prod_0 - 362880.) + 1e-8))
        #F.log(F.square(diff_total - 540.0) + 1e-8))
        return loss
Ejemplo n.º 8
0
    def __call__(self, A, B, C, num_sample):
        # C -> [batch size, 3, 64, 64]
        v = self.encoder(C)
        # v -> [batch size, hidden size=128]
        param = self.decoder(v)

        occupancy = Occupancy_networks(param, A, B,
                                       num_sample)  # [batch, num_smaple, 600]

        #trans_prob = F.sum(F.softplus(-occupancy), axis=1)
        trans_prob = F.prod(occupancy, axis=1)  # -> [batch, 600]
        # l = 1.7322 / (num_sample-1)
        l = 9.0 / (num_sample - 1)
        trans_prob = (trans_prob + 1e-8)**l  # trans_prob = trans_prob^l

        return trans_prob
Ejemplo n.º 9
0
 def f(x):
     return functions.prod(x, self.axis, self.keepdims)
Ejemplo n.º 10
0
 def forward(self, inputs, device):
     x, = inputs
     y = functions.prod(x, axis=self.axis, keepdims=self.keepdims)
     return y,
Ejemplo n.º 11
0
def undistort_points(coef, p, iteration=5):
    """Remove distortion from given points.

    Args:
        coef (:class `~chainer.Variable` or :ref:`ndarray`):
            Distortion coefficients.
            A 2-D array of shape `(B, K)`
            K is 4 or 5 or 8. The elements corresponds to
            (k1, k2, p1, p2, [k3, [k4, k5 k6]])
            respectively.

        p (:class `~chainer.Variable` or :ref:`ndarray`):
            A 3-D array of shape `(B, 2, N)`
            
    Returns:
        ~chainer.Variable:
            A 3-D array of shape `(B, 2, N)`
    """

    xp = backend.get_array_module(p)
    B, _, N = p.shape
    _, K = coef.shape
    if K < 8:
        coef = F.pad(coef, ((0, 0), (0, 8 - K)), 'constant')

    # (B, 8) -> (B, 1, 8)
    coef = coef[:, None, :]

    k1 = coef[:, :, 0:1]
    k2 = coef[:, :, 1:2]
    p1 = coef[:, :, 2:3]
    p2 = coef[:, :, 3:4]
    k3 = coef[:, :, 4:5]
    k4 = coef[:, :, 5:6]
    k5 = coef[:, :, 6:7]
    k6 = coef[:, :, 7:8]

    # (B, 2, N) -> (B, N, 2)
    p = p.transpose((0, 2, 1))
    r2 = F.sum(p * p, 2, keepdims=True)  # r^2

    # Compute initial guess
    X = (1 - r2 * (k1 + r2 * (3 * k1**2 - k2 + r2 *
                              (8 * k1 * k2 - 12 * k1**3 - k3)))) * p

    # Refinement by Newton-Raphson method
    for i in range(iteration):
        x = X[:, :, 0:1]
        y = X[:, :, 1:2]
        xy = F.prod(X, 2, keepdims=True)
        r2 = F.sum(X * X, 2, keepdims=True)  # r^2
        a = 1 + r2 * (k1 + r2 * (k2 + r2 * k3))
        b = 1 + r2 * (k4 + r2 * (k5 + r2 * k6))
        da = k1 + r2 * (2 * k2 + 3 * r2 * k3)
        db = k4 + r2 * (2 * k5 + 3 * r2 * k6)

        g = a / b
        dg = (da * b - a * db) / b**2

        J00 = g + 2 * x**2 * dg + 2 * y * p1 + 6 * x * p2
        J11 = g + 2 * y**2 * dg + 2 * x * p2 + 6 * y * p1
        J01 = 2 * F.prod(x, 2, keepdims=True) * dg + 2 * x * p1 + 2 * y * p2
        jacobian = F.stack([J00, J01, J01, J11], axis=2).reshape(B, N, 2, 2)

        d = g * X + 2 * xy * coef[:, :, 2:4] + coef[:, :,
                                                    3:1:-1] * (r2 + 2 * X * X)

        jacobian = jacobian.reshape(-1, 2, 2)
        f = (d - p).reshape(-1, 2)
        X = X - F.batch_matmul(F.batch_inv(jacobian), f).reshape(B, N, 2)

    # (B, N, 2) -> (B, 2, N)
    return X.transpose((0, 2, 1))
Ejemplo n.º 12
0
 def test_invalid_axis_type_in_tuple(self):
     with self.assertRaises(TypeError):
         functions.prod(self.x, axis=(1, 'x'))
Ejemplo n.º 13
0
 def test_invalid_axis_type(self):
     with self.assertRaises(TypeError):
         functions.prod(self.x, axis=[0])
Ejemplo n.º 14
0
 def f(x):
     return functions.prod(x, self.axis, self.keepdims)
Ejemplo n.º 15
0
 def test_invalid_axis_type(self):
     with self.assertRaises(TypeError):
         functions.prod(self.x, axis=[0])
Ejemplo n.º 16
0
 def test_duplicate_axis(self):
     with self.assertRaises(ValueError):
         functions.prod(self.x, axis=(0, 0))
Ejemplo n.º 17
0
 def f(x):
     x = functions.prod(x, self.axis, self.keepdims)
     return x * x
Ejemplo n.º 18
0
 def test_duplicate_axis(self):
     with self.assertRaises(ValueError):
         functions.prod(self.x, axis=(0, 0))
Ejemplo n.º 19
0
 def test_invalid_axis_type_in_tuple(self):
     with self.assertRaises(TypeError):
         functions.prod(self.x, axis=(1, 'x'))
Ejemplo n.º 20
0
    def __call__(self, x):
        self.chi = F.concat((x, self.r))
        (self.nu, self.xi) = F.split_axis(self.l_dl(self.chi), [self.Y], 1)

        (self.kr, self.betar, self.kw, self.betaw, self.e, self.v, self.f, self.ga, self.gw, self.pi) = \
            F.split_axis(self.xi, self.xi_split_indices, 1)

        self.kr = F.reshape(self.kr, (self.R, self.W))  # R * W
        self.betar = 1 + F.softplus(self.betar)  # 1 * R
        # self.kw: 1 * W
        self.betaw = 1 + F.softplus(self.betaw)  # 1 * 1
        self.e = F.sigmoid(self.e)  # 1 * W
        # self.v : 1 * W
        self.f = F.sigmoid(self.f)  # 1 * R
        self.ga = F.sigmoid(self.ga)  # 1 * 1
        self.gw = F.sigmoid(self.gw)  # 1 * 1
        self.pi = F.softmax(F.reshape(self.pi,
                                      (self.R, 3)))  # R * 3 (softmax for 3)

        # self.wr : N * R
        self.psi_mat = 1 - F.broadcast_to(self.f,
                                          (self.N, self.R)) * self.wr  # N x R
        self.psi = F.prod(self.psi_mat, 1).reshape(self.N, 1)  # N x 1

        # self.ww, self.u : N * 1
        self.u = (self.u + self.ww - (self.u * self.ww)) * self.psi

        self.a = u2a(self.u.data)  # N * 1
        self.cw = C(self.M.data, self.kw.data, self.betaw.data)  # N * 1
        self.ww = F.matmul(
            F.matmul(self.a, self.ga) + F.matmul(self.cw, 1.0 - self.ga),
            self.gw)  # N * 1
        self.M = self.M * (xp.ones(
            (self.N, self.W)).astype(xp.float32) - F.matmul(
                self.ww, self.e)) + F.matmul(self.ww, self.v)  # N * W
        if self.K > 0:
            self.p = (1.0 - F.matmul(Variable(xp.ones((self.N, 1)).astype(xp.float32)), F.reshape(F.sum(self.ww), (1, 1)))) \
                     * self.p + self.ww  # N * 1
            self.p.data = xp.sort(self.p.data, 0)
            self.p.data[0:-self.K] = 0.
            self.p.data[-self.K:] = self.p.data[-self.K:] / xp.sum(
                self.p.data[-self.K:])
            self.ww.data = xp.sort(self.ww.data, 0)
            self.ww.data[0:-self.K] = 0.
            self.ww.data[-self.K:] = self.ww[-self.K:].data / xp.sum(
                self.ww.data[-self.K:])
            self.wwrep = F.matmul(
                self.ww, Variable(xp.ones(
                    (1, self.N)).astype(xp.float32)))  # N * N
            self.ww_p_product = xp.zeros((self.N, self.N)).astype(xp.float32)
            self.ww_p_product[-self.K:, -self.K:] = F.matmul(
                self.ww[-self.K:, -self.K:],
                F.transpose(self.p[-self.K:, -self.K:])).data
            self.L = (1.0 - self.wwrep - F.transpose(
                self.wwrep)) * self.L + self.ww_p_product  # N * N
            self.L = self.L * (xp.ones(
                (self.N, self.N)) - xp.eye(self.N))  # force L[i,i] == 0
            self.L.data[self.L.data < 1 / self.K] = 0.
        else:
            self.p = (1.0 - F.matmul(Variable(xp.ones((self.N, 1)).astype(xp.float32)),
                                     F.reshape(F.sum(self.ww), (1, 1)))) \
                     * self.p + self.ww  # N * 1
            self.wwrep = F.matmul(
                self.ww, Variable(xp.ones(
                    (1, self.N)).astype(xp.float32)))  # N * N
            self.L = (1.0 - self.wwrep -
                      F.transpose(self.wwrep)) * self.L + F.matmul(
                          self.ww, F.transpose(self.p))  # N * N
            self.L = self.L * (xp.ones(
                (self.N, self.N)) - xp.eye(self.N))  # force L[i,i] == 0
        self.fo = F.matmul(self.L, self.wr)  # N * R
        self.ba = F.matmul(F.transpose(self.L), self.wr)  # N * R

        self.cr = C(self.M.data, self.kr.data, self.betar.data)

        self.bacrfo = F.concat((
            F.reshape(F.transpose(self.ba), (self.R, self.N, 1)),
            F.reshape(F.transpose(self.cr), (self.R, self.N, 1)),
            F.reshape(F.transpose(self.fo), (self.R, self.N, 1)),
        ), 2)  # R * N * 3
        self.pi = F.reshape(self.pi, (self.R, 3, 1))  # R * 3 * 1
        self.wr = F.transpose(
            F.reshape(F.batch_matmul(self.bacrfo, self.pi),
                      (self.R, self.N)))  # N * R

        self.r = F.reshape(F.matmul(F.transpose(self.M), self.wr),
                           (1, self.R * self.W))  # W * R (-> 1 * RW)

        self.y = self.l_Wr(self.r) + self.nu  # 1 * Y
        return self.y
Ejemplo n.º 21
0
 def forward(self, inputs, device):
     x, = inputs
     y = functions.prod(x, axis=self.axis, keepdims=self.keepdims)
     return y,