Esempio n. 1
0
    def check_invalid_option(self, xp):
        a = xp.asarray(self.a)
        p = xp.asarray(self.p)
        n = xp.asarray(self.n)

        with self.assertRaises(ValueError):
            functions.triplet(a, p, n, reduce='invalid_option')
Esempio n. 2
0
    def check_invalid_option(self, xp):
        a = xp.asarray(self.a)
        p = xp.asarray(self.p)
        n = xp.asarray(self.n)

        with self.assertRaises(ValueError):
            functions.triplet(a, p, n, reduce='invalid_option')
Esempio n. 3
0
    def check_forward(self, a_data, p_data, n_data):
        a_val = chainer.Variable(a_data)
        p_val = chainer.Variable(p_data)
        n_val = chainer.Variable(n_data)
        loss = functions.triplet(a_val, p_val, n_val, self.margin, self.reduce)
        if self.reduce == 'mean':
            self.assertEqual(loss.data.shape, ())
        else:
            self.assertEqual(loss.data.shape, (self.batchsize, ))
        self.assertEqual(loss.data.dtype, numpy.float32)
        loss_value = cuda.to_cpu(loss.data)

        #
        # Compute expected value
        #
        loss_expect = numpy.empty((self.a.shape[0], ), dtype=numpy.float32)
        for i in six.moves.range(self.a.shape[0]):
            ad, pd, nd = self.a[i], self.p[i], self.n[i]
            dp = numpy.sum((ad - pd)**2)
            dn = numpy.sum((ad - nd)**2)
            loss_expect[i] = max((dp - dn + self.margin), 0)
        if self.reduce == 'mean':
            loss_expect = loss_expect.mean()
        numpy.testing.assert_allclose(loss_expect,
                                      loss_value,
                                      rtol=1e-4,
                                      atol=1e-4)
Esempio n. 4
0
    def check_forward(self, a_data, p_data, n_data):
        a_val = chainer.Variable(a_data)
        p_val = chainer.Variable(p_data)
        n_val = chainer.Variable(n_data)
        loss = functions.triplet(a_val, p_val, n_val, self.margin, self.reduce)
        if self.reduce == 'mean':
            self.assertEqual(loss.data.shape, ())
        else:
            self.assertEqual(loss.data.shape, (self.batchsize,))
        self.assertEqual(loss.data.dtype, self.dtype)
        loss_value = cuda.to_cpu(loss.data)

        #
        # Compute expected value
        #
        loss_expect = numpy.empty((self.a.shape[0],), dtype=self.dtype)
        for i in six.moves.range(self.a.shape[0]):
            ad, pd, nd = self.a[i], self.p[i], self.n[i]
            dp = numpy.sum((ad - pd) ** 2)
            dn = numpy.sum((ad - nd) ** 2)
            loss_expect[i] = max((dp - dn + self.margin), 0)
        if self.reduce == 'mean':
            loss_expect = loss_expect.mean()
        numpy.testing.assert_allclose(
            loss_expect, loss_value, **self.check_forward_options)
 def __call__(self, x, y):
     anchor, positive, negative = (x[:, :in_size], x[:,
                                                     in_size:(in_size * 2)],
                                   x[:, (in_size * 2):])
     anchor_ = self.predictor(anchor)
     positive_ = self.predictor(positive)
     negative_ = self.predictor(negative)
     loss = F.triplet(anchor_, positive_, negative_, margin=triplet_margin)
     reporter.report({'loss': loss}, self)
     return loss
Esempio n. 6
0
 def __call__(self, x_a, x_p, x_n):
     h_a, h_p, h_n = (self.predictor(x) for x in (x_a, x_p, x_n))
     if self.train_linear_only:
         h_a.unchain()
         h_p.unchain()
         h_n.unchain()
     y_a, y_p, y_n = (self.linear(h) for h in (h_a, h_p, h_n))
     loss = F.triplet(y_a, y_p, y_n, margin=1)
     report({'loss': loss}, self)
     return loss
Esempio n. 7
0
 def _compute_triplet_loss(self, mb_locs, mb_confs):
     mb_boxs = [self._decode_bbox(mb_loc) for mb_loc in mb_locs.array]
     labeled_features = self._filter_overlapping_bboxs(mb_boxs, mb_confs)
     anchors, positives, negatives = self._build_triplets(labeled_features)
     batch_size = len(anchors)
     if not anchors:
         return 0, chainer.Variable(self.xp.zeros((), dtype=np.float32))
     anchors = F.stack(anchors)
     positives = F.stack(positives)
     negatives = F.stack(negatives)
     return batch_size, F.triplet(anchors, positives, negatives)
Esempio n. 8
0
    def __call__(self, anchor, positive, negative):
        anchor, positive, negative = map(self.model,
                                         (anchor, positive, negative))
        loss = F.triplet(anchor, positive, negative, self.margin)
        chainer.reporter.report({'loss': loss}, self)

        p = ((anchor.array - positive.array)**2).sum(axis=1)
        n = ((anchor.array - negative.array)**2).sum(axis=1)
        accuracy = self.xp.mean(p < n)
        chainer.reporter.report({'accuracy': accuracy}, self)
        return loss
    def update_core(self):
        batch = self._iterators['main'].next()
        batch_size = len(batch)
        optimizer = self._optimizers['main']
        model = optimizer.target

        in_arrays = self.converter(batch, self.device)
        in_vars = tuple(chainer.Variable(x) for x in in_arrays)

        feats = model(in_vars[0])
        anchor_feats = feats[:batch_size / 3]
        pos_feats = feats[batch_size / 3:batch_size / 3 * 2]
        neg_feats = feats[batch_size / 3 * 2:]

        loss = F.triplet(anchor_feats, pos_feats, neg_feats)
        reporter.report({'loss': loss}, model)

        loss.backward()
        optimizer.update()
        model.cleargrads()
Esempio n. 10
0
    def __call__(self, anchor, positive, negative):
        """
        It takes a triplet of variables as inputs
        :param anchor: The anchor example variable. The shape should be (N, K),
                where N denotes the minibatch size, and K denotes the dimension of the anchor.
        :param positive: The positive example variable. The shape should be the same as anchor.
        :param negative: The negative example variable. The shape should be the same as anchor.
        :return:
            Type:~chainer.varibales:
            A variable holding a scalar that is the loss value calculated.
        """
        self.anchor = self.model(anchor)
        self.positive = self.model(positive)
        self.negative = self.model(negative)
        # Using margin = 0.2, reduce = "mean" to triplet function
        self.loss = F.triplet(anchor=self.anchor,
                              positive=self.positive,
                              negative=self.negative)

        return self.loss
Esempio n. 11
0
    def check_forward(self, a_data, p_data, n_data):
        a_val = chainer.Variable(a_data)
        p_val = chainer.Variable(p_data)
        n_val = chainer.Variable(n_data)
        loss = functions.triplet(a_val, p_val, n_val, self.margin)
        self.assertEqual(loss.data.shape, ())
        self.assertEqual(loss.data.dtype, numpy.float32)
        loss_value = float(cuda.to_cpu(loss.data))

        #
        # Compute expected value
        #
        loss_expect = 0
        for i in six.moves.range(self.a.shape[0]):
            ad, pd, nd = self.a[i], self.p[i], self.n[i]
            dp = numpy.sum((ad - pd) ** 2)
            dn = numpy.sum((ad - nd) ** 2)
            loss_expect += max((dp - dn + self.margin), 0)
        loss_expect /= self.a.shape[0]
        self.assertAlmostEqual(loss_expect, loss_value, places=5)
Esempio n. 12
0
 def f(a, p, n):
     return functions.triplet(a,
                              p,
                              n,
                              margin=self.margin,
                              reduce=self.reduce)
Esempio n. 13
0
 def f(a, p, n):
     return functions.triplet(
         a, p, n, margin=self.margin, reduce=self.reduce)
Esempio n. 14
0
 def __call__(self, x_a, x_p, x_n):
     z_a, z_p, z_n = self.forward(x_a), self.forward(x_p), self.forward(x_n)
     loss = F.triplet(z_a, z_p, z_n)
     chainer.report({'loss': loss}, self)
     return loss
Esempio n. 15
0
 def __call__(self, x_a, x_p, x_n):
     y_a, y_p, y_n = (self.predictor(x) for x in (x_a, x_p, x_n))
     loss = F.triplet(y_a, y_p, y_n)
     report({'loss': loss}, self)
     return loss