Esempio n. 1
0
 def check_value_check(self):
     if self.valid:
         # Check if it throws nothing
         functions.dstack(self.xs)
     else:
         with self.assertRaises(type_check.InvalidType):
             functions.dstack(self.xs)
Esempio n. 2
0
 def check_value_check(self):
     if self.valid:
         # Check if it throws nothing
         functions.dstack(self.xs)
     else:
         with self.assertRaises(type_check.InvalidType):
             functions.dstack(self.xs)
Esempio n. 3
0
    def __call__(self, x0, x1, x, t=None, train=True):
        # item embedding
        e0 = self.embedId(x0)
        e1 = self.embedId(x1)

        # user embedding
        r0 = functions.mean(e0, axis=1)
        r1 = functions.mean(e1, axis=1)

        # weight depending on users
        r = functions.dstack((r0, r1))
        r = functions.reshape(r, (-1, 2))
        r = functions.expand_dims(r, axis=0)
        r = functions.expand_dims(r, axis=0)
        w = self.linear(r)
        w = functions.reshape(w, (-1, self.n_factor))

        # output
        ei = functions.expand_dims(self.embedId(x), axis=1)
        w = functions.expand_dims(w, axis=1)
        v = functions.matmul(w, ei, transb=True)
        v = functions.reshape(v, shape=(-1, 1))
        v = self.bias(v)
        if train:
            loss = functions.sigmoid_cross_entropy(v, t)
            chainer.reporter.report({'loss': loss}, self)
            return loss
        else:
            return functions.sigmoid(v)
Esempio n. 4
0
    def __call__(self, x):
        h = self.ingredient(x)
        if self.pooling_type == 'max':
            h = self.max(h)
        elif self.pooling_type == 'avg':
            h = self.avg(h)
        elif self.pooling_type == 'both':
            h1 = self.max(h)
            h2 = self.avg(h)
            h = F.array.concat.concat((h1, h2), 1)

        #path a
        a = F.dropout(F.relu(self.fc_A_0(h)), train=self.train, ratio=0.5)
        a = F.dropout(F.relu(self.fc_A_1(a)), train=self.train, ratio=0.5)
        a = self.fc_A_2(a)

        b = F.dropout(F.relu(self.fc_B_0(h)), train=self.train, ratio=0.5)
        b = F.dropout(F.relu(self.fc_B_1(b)), train=self.train, ratio=0.5)
        b = self.fc_B_2(b)

        c = F.dropout(F.relu(self.fc_C_0(h)), train=self.train, ratio=0.5)
        c = F.dropout(F.relu(self.fc_C_1(c)), train=self.train, ratio=0.5)
        c = self.fc_C_2(c)

        d = F.dropout(F.relu(self.fc_D_0(h)), train=self.train, ratio=0.5)
        d = F.dropout(F.relu(self.fc_D_1(d)), train=self.train, ratio=0.5)
        d = self.fc_D_2(d)

        e = F.dropout(F.relu(self.fc_E_0(h)), train=self.train, ratio=0.5)
        e = F.dropout(F.relu(self.fc_E_1(e)), train=self.train, ratio=0.5)
        e = self.fc_E_2(e)
        return F.dstack((a, b, c, d, e))
Esempio n. 5
0
    def max_pool_avg_pool(self, hs):
        num_output = len(hs[0])
        houts = []
        i = 0
        shape = hs[0][i].shape
        h = F.dstack([F.reshape(h[i], (shape[0], -1)) for h in hs])
        x = 1.0 * F.max(h, 2)
        x = F.reshape(x, shape)
        houts.append(x)

        for i in range(1, num_output):
            shape = hs[0][i].shape
            h = F.dstack([F.reshape(h[i], (shape[0], -1)) for h in hs])
            x = 1.0 * F.sum(h, 2) / h.shape[2]
            x = F.reshape(x, shape)
            houts.append(x)
        return houts
Esempio n. 6
0
 def max_pool_concat(self, hs):
     num_output = len(hs[0])
     houts = []
     i = 0
     x = F.max(F.dstack([h[i] for h in hs]), 2)
     houts.append(x)
     for i in range(1, num_output):
         #x = 0
         #for h in hs:
         #    x = x + h[i]
         x = F.concat([h[i] for h in hs], 1)
         houts.append(x)  # Merged branch exit and main exit
     return houts
Esempio n. 7
0
 def concat_max_pool(self, hs):
     num_output = len(hs[0])
     houts = []
     i = 0
     x = F.concat([h[i] for h in hs], 1)
     houts.append(x)
     for i in range(1, num_output):
         shape = hs[0][i].shape
         h = F.dstack([F.reshape(h[i], (shape[0], -1)) for h in hs])
         x = 1.0 * F.max(h, 2)
         x = F.reshape(x, shape)
         houts.append(x)
     return houts
Esempio n. 8
0
 def avg_pool_concat(self, hs):
     num_output = len(hs[0])
     houts = []
     i = 0
     h = F.dstack([h[i] for h in hs])
     x = 1.0 * F.sum(h, 2) / h.shape[2]
     houts.append(x)
     for i in range(1, num_output):
         #x = 0
         #for h in hs:
         #    x = x + h[i]
         x = F.concat([h[i] for h in hs], 1)
         houts.append(x)  # Merged branch exit and main exit
     return houts
Esempio n. 9
0
def pose_vec2mat(vec, filler, xp=np):
    """Converts 6DoF parameters to transformation matrix

    Args:
        vec: 6DoF parameters in the order of rx, ry, rz, tx, ty, tz -- [N, 6]
    Returns:
        A transformation matrix -- [N, 4, 4]
    """
    # start, stop = create_timer()
    r, t = vec[:, :3], vec[:, 3:]
    rot_mat = euler2mat(r, xp=xp)
    # print_timer(start, stop, 'euler2mat')
    batch_size = rot_mat.shape[0]
    t = t.reshape(batch_size, 3, 1)
    transform_mat = F.dstack((rot_mat, t))
    transform_mat = F.hstack((transform_mat, filler))
    return transform_mat
Esempio n. 10
0
    def __call__(self, x):
        h = self.ingredient(x)
        if self.pooling_type == 'max':
            h = self.max(h)
        elif self.pooling_type == 'avg':
            h = self.avg(h)
        elif self.pooling_type == 'both':
            h1 = self.max(h)
            h2 = self.avg(h)
            h = F.array.concat.concat((h1, h2), 1)
        h = F.dropout(F.relu(self.fc1(h)), train=self.train, ratio=0.5)

        #path a
        a = F.dropout(F.relu(self.fc_A_1(h)), train=self.train, ratio=0.5)
        a = self.fc_A_2(a)

        b = F.dropout(F.relu(self.fc_B_1(h)), train=self.train, ratio=0.5)
        b = self.fc_B_2(b)

        return F.dstack((a, b))
Esempio n. 11
0
 def func(*xs):
     return functions.dstack(xs)
Esempio n. 12
0
    def check_forward(self, xs_data):
        xs = [chainer.Variable(x) for x in xs_data]
        y = functions.dstack(xs)

        expect = numpy.dstack(self.xs)
        testing.assert_allclose(y.data, expect)
Esempio n. 13
0
    def check_forward(self, xs_data):
        xs = [chainer.Variable(x) for x in xs_data]
        y = functions.dstack(xs)

        expect = numpy.dstack(self.xs)
        testing.assert_allclose(y.data, expect)
Esempio n. 14
0
 def func(*xs):
     y = functions.dstack(xs)
     return y * y
Esempio n. 15
0
 def func(*xs):
     y = functions.dstack(xs)
     return y * y
def multibox_loss(mb_locs, mb_confs, gt_mb_locs, gt_mb_labels, k,
                  binarize=False, arm_confs=None, arm_locs=None):
    """Computes multibox losses.

    Different from :obj:`chainercv.MultiboxCoder`, Cascared offset regression
    and negative anchor filtering and arm binarization loss is supported.

    This is a loss function used in [#]_.
    This function returns :obj:`loc_loss` and :obj:`conf_loss`.
    :obj:`loc_loss` is a loss for localization and
    :obj:`conf_loss` is a loss for classification.
    The formulas of these losses can be found in
    the equation (2) and (3) in the original paper.

    .. [#] Shifeng Zhang, Longyin Wen, Xiao Bian, Zhen Lei, Stan Z. Li.
       Single-Shot Refinement Neural Network for Object Detection.

    Args:
        mb_locs (chainer.Variable or array): The offsets and scales
            for predicted bounding boxes.
            Its shape is :math:`(B, K, 4)`,
            where :math:`B` is the number of samples in the batch and
            :math:`K` is the number of default bounding boxes.
        mb_confs (chainer.Variable or array): The classes of predicted
            bounding boxes.
            Its shape is :math:`(B, K, n\_class)`.
            This function assumes the first class is background (negative).
        gt_mb_locs (chainer.Variable or array): The offsets and scales
            for ground truth bounding boxes.
            Its shape is :math:`(B, K, 4)`.
        gt_mb_labels (chainer.Variable or array): The classes of ground truth
            bounding boxes.
            Its shape is :math:`(B, K)`.
        k (float): A coefficient which is used for hard negative mining.
            This value determines the ratio between the number of positives
            and that of mined negatives. The value used in the original paper
            is :obj:`3`.
        binarize(bool): If True, conf loss objective is binarized (Any class or
            background).
        arm_confs(chainer.Variable or None): If not `None`, negative anchor
            filtering is enabled. Indexes where :obj:`arm_confs` <= 0.01,
            will not be used to training.
        arm_locs(chainer.Variable or None): If not `None`, cascaded offset
            regression is enabled.

    Returns:
        tuple of chainer.Variable:
        This function returns two :obj:`chainer.Variable`: :obj:`loc_loss` and
        :obj:`conf_loss`.
    """
    variance = (0.1, 0.2)

    mb_locs = chainer.as_variable(mb_locs)
    mb_confs = chainer.as_variable(mb_confs)
    gt_mb_locs = chainer.as_variable(gt_mb_locs)
    gt_mb_labels = chainer.as_variable(gt_mb_labels)

    xp = chainer.cuda.get_array_module(gt_mb_labels.array)

    if arm_locs is not None:
        if isinstance(arm_locs, chainer.Variable):
            arm_locs = arm_locs.array.copy()
        else:
            arm_locs = arm_locs.copy()

        w_offset = arm_locs[:, :, 2:] + mb_locs[:, :, 2:]
        x_offset = xp.exp(arm_locs[:, :, 2:] * variance[1]) * mb_locs[:, :, :2]
        x_offset += arm_locs[:, :, :2]
        mb_locs = F.dstack((x_offset, w_offset))

    positive = gt_mb_labels.array > 0
    n_positive = positive.sum()
    if n_positive == 0:
        z = chainer.Variable(xp.zeros((), dtype=np.float32))
        return z, z

    loc_loss = F.huber_loss(mb_locs, gt_mb_locs, 1, reduce='no')
    if arm_confs is not None:
        if isinstance(arm_locs, chainer.Variable):
            arm_confs = arm_confs.array.copy()
        else:
            arm_confs = arm_confs.copy()

        objectness = xp.exp(arm_confs)
        negativeness = xp.exp(1 - arm_confs)
        objectness /= objectness + negativeness
        objectness[objectness <= 0.01] = 0
        objectness[objectness > 0.01] = 1
        objectness = objectness.reshape(objectness.shape[0],
                                        objectness.shape[1])
        n_positive = (positive * objectness).sum()
    else:
        objectness = None

    loc_loss = F.sum(loc_loss, axis=-1)
    loc_loss *= positive.astype(loc_loss.dtype)
    if objectness is not None:
        loc_loss *= objectness.astype(loc_loss.dtype)
    loc_loss = F.sum(loc_loss) / n_positive

    conf_loss = _elementwise_softmax_cross_entropy(mb_confs, gt_mb_labels,
                                                   binarize)

    hard_negative = _hard_negative(conf_loss.array, positive, k, objectness)
    if arm_confs is not None:
        positive *= objectness.astype(positive.dtype)
    conf_loss *= xp.logical_or(positive, hard_negative).astype(conf_loss.dtype)
    conf_loss = F.sum(conf_loss) / n_positive

    return loc_loss, conf_loss
Esempio n. 17
0
 def func(*xs):
     return functions.dstack(xs)
Esempio n. 18
0
 def forward(self, inputs, device):
     xs = inputs
     y = functions.dstack(xs)
     return y,