def forward(self, imgs, gt_mb_locs, gt_mb_labels):
        mb_locs, mb_confs = self.model(imgs)
        loc_loss, conf_loss = multibox_loss(mb_locs, mb_confs, gt_mb_locs, gt_mb_labels, self.k)
        loss = loc_loss * self.alpha + conf_loss

        chainer.reporter.report({'loss': loss, 'loss/loc': loc_loss, 'loss/conf': conf_loss}, self)

        return loss
Beispiel #2
0
    def _check_forward(self, mb_locs_local, mb_confs_local, gt_mb_locs_local,
                       gt_mb_labels_local, k):
        loc_loss_local, conf_loss_local = multibox_loss(
            mb_locs_local, mb_confs_local, gt_mb_locs_local,
            gt_mb_labels_local, k, self.comm)

        loc_loss_local = cuda.to_cpu(loc_loss_local.array)
        conf_loss_local = cuda.to_cpu(conf_loss_local.array)
        loc_loss = self.comm.allreduce_obj(loc_loss_local) / self.comm.size
        conf_loss = self.comm.allreduce_obj(conf_loss_local) / self.comm.size

        expect_loc_loss, expect_conf_loss = multibox_loss(
            self.mb_locs, self.mb_confs, self.gt_mb_locs, self.gt_mb_labels, k)
        np.testing.assert_almost_equal(loc_loss,
                                       expect_loc_loss.array,
                                       decimal=2)
        np.testing.assert_almost_equal(conf_loss,
                                       expect_conf_loss.array,
                                       decimal=2)
    def __call__(self, imgs, gt_mb_locs, gt_mb_labs):
        mb_locs, mb_confs = self.model(imgs)
        loc_loss, conf_loss = multibox_loss(
            mb_locs, mb_confs, gt_mb_locs, gt_mb_labs, self.k)
        loss = loc_loss * self.alpha + conf_loss

        chainer.reporter.report(
            {'loss': loss, 'loss/loc': loc_loss, 'loss/conf': conf_loss},
            self)

        return loss
    def _check_forward(self, mb_locs_local, mb_confs_local, gt_mb_locs_local,
                       gt_mb_labels_local, k):
        loc_loss_local, conf_loss_local = multibox_loss(
            mb_locs_local, mb_confs_local, gt_mb_locs_local,
            gt_mb_labels_local, k, self.comm)

        loc_loss_local = cuda.to_cpu(loc_loss_local.array)
        conf_loss_local = cuda.to_cpu(conf_loss_local.array)
        from mpi4py import MPI
        self.comm.mpi_comm.Allreduce(MPI.IN_PLACE, loc_loss_local)
        self.comm.mpi_comm.Allreduce(MPI.IN_PLACE, conf_loss_local)

        loc_loss, conf_loss = multibox_loss(self.mb_locs, self.mb_confs,
                                            self.gt_mb_locs, self.gt_mb_labels,
                                            k)
        np.testing.assert_almost_equal(loc_loss_local,
                                       loc_loss.array,
                                       decimal=2)
        np.testing.assert_almost_equal(conf_loss_local,
                                       conf_loss.array,
                                       decimal=2)
Beispiel #5
0
    def __call__(self, imgs, gt_mb_locs, gt_mb_labels):
        mb_locs, mb_confs = self.model(imgs)
        loc_loss, conf_loss = multibox_loss(mb_locs, mb_confs, gt_mb_locs,
                                            gt_mb_labels, self.k)
        loss = loc_loss * self.alpha + conf_loss

        chainer.reporter.report(
            {
                self.loss_labels[0]: loss,
                self.loss_labels[1]: loc_loss,
                self.loss_labels[2]: conf_loss
            }, self)

        return loss
Beispiel #6
0
    def __call__(self, params_detection_loss, params_segmentation_loss):
        mb_locs, mb_confs, gt_mb_locs, gt_mb_labels, k = params_detection_loss
        score, mask_gt = params_segmentation_loss
        loc_loss, conf_loss = multibox_loss(mb_locs, mb_confs, gt_mb_locs,
                                            gt_mb_labels, k)
        detection_loss = loc_loss * self.alpha + conf_loss

        segmentation_loss = mask_loss(score, mask_gt)
        loss_pre = detection_loss + segmentation_loss
        if self.param1.array is None:
            self.param1.initialize([1])
        if self.param2.array is None:
            self.param2.initialize([1])

        param1_precision = chainer.functions.exp(-self.param1)
        param2_precision = chainer.functions.exp(-self.param2)
        loss = detection_loss * param1_precision + 2 * self.param1 \
               + segmentation_loss * param2_precision + self.param2

        return loss, (loss, loss_pre, loc_loss, conf_loss, segmentation_loss,
                      self.param1, self.param2)
Beispiel #7
0
    def __call__(self, imgs, gt_mb_locs, gt_mb_labels, gt_mask):
        detection_result, segmentation_result = self.model(imgs)
        # mb_locs, mb_confs = self.model(imgs)
        detection = False
        segmentation = False
        if detection_result is not None:
            detection = True
        if segmentation_result is not None:
            segmentation = True

        if segmentation and detection:
            self.multi_task_loss = self.multi_task_loss
        else:
            self.multi_task_loss = False

        if detection:
            mb_locs, mb_confs = detection_result
        if segmentation:
            score = segmentation_result

        if not self.multi_task_loss:
            if detection:
                loc_loss, conf_loss = multibox_loss(mb_locs, mb_confs,
                                                    gt_mb_locs, gt_mb_labels,
                                                    self.k)
                loss_detection = loc_loss * self.alpha + conf_loss
            if segmentation:
                loss_segmentation = F.softmax_cross_entropy(score, gt_mask)

            if segmentation and detection:
                if self.use_dynamic_loss:
                    pass
                else:
                    loss = 2 * self.loss_split * loss_detection + 2 * (
                        1 - self.loss_split) * loss_segmentation

                chainer.reporter.report(
                    {
                        'loss': loss,
                        'loss/loc': loc_loss,
                        'loss/conf': conf_loss,
                        'loss/mask': loss_segmentation,
                        'loss/split': self.loss_split
                    }, self)
            elif segmentation:
                loss = loss_segmentation
                chainer.reporter.report(
                    {
                        'loss': loss,
                        'loss/mask': loss_segmentation
                    }, self)

            elif detection:
                loss = loss_detection
                chainer.reporter.report(
                    {
                        'loss': loss,
                        'loss/loc': loc_loss,
                        'loss/conf': conf_loss
                    }, self)
        else:
            loss, report_values = self.multi_loss(
                (mb_locs, mb_confs, gt_mb_locs, gt_mb_labels, self.k),
                (score, gt_mask))
            # loss, (loss_pre, loc_loss, conf_loss, segmentation_loss)
            chainer.reporter.report(
                {
                    'loss': report_values[0].data[0],
                    'loss/pre': report_values[1],
                    'loss/loc': report_values[2],
                    'loss/conf': report_values[3],
                    'loss/mask': report_values[4],
                    'param_det': report_values[5].data[0],
                    'param_seg': report_values[6].data[0]
                }, self)
        return loss
Beispiel #8
0
    def _check_forward(self, mb_locs, mb_confs, gt_mb_locs, gt_mb_labels, k):
        if self.variable:
            mb_locs = chainer.Variable(mb_locs)
            mb_confs = chainer.Variable(mb_confs)
            gt_mb_locs = chainer.Variable(gt_mb_locs)
            gt_mb_labels = chainer.Variable(gt_mb_labels)

        loc_loss, conf_loss = multibox_loss(mb_locs, mb_confs, gt_mb_locs,
                                            gt_mb_labels, k)

        self.assertIsInstance(loc_loss, chainer.Variable)
        self.assertEqual(loc_loss.shape, ())
        self.assertEqual(loc_loss.dtype, mb_locs.dtype)

        self.assertIsInstance(conf_loss, chainer.Variable)
        self.assertEqual(conf_loss.shape, ())
        self.assertEqual(conf_loss.dtype, mb_confs.dtype)

        if self.variable:
            mb_locs = mb_locs.array
            mb_confs = mb_confs.array
            gt_mb_locs = gt_mb_locs.array
            gt_mb_labels = gt_mb_labels.array

        mb_locs = cuda.to_cpu(mb_locs)
        mb_confs = cuda.to_cpu(mb_confs)
        gt_mb_locs = cuda.to_cpu(gt_mb_locs)
        gt_mb_labels = cuda.to_cpu(gt_mb_labels)
        loc_loss = cuda.to_cpu(loc_loss.array)
        conf_loss = cuda.to_cpu(conf_loss.array)

        n_positive_total = 0
        expect_loc_loss = 0
        expect_conf_loss = 0
        for i in six.moves.xrange(gt_mb_labels.shape[0]):
            n_positive = 0
            negatives = []
            for j in six.moves.xrange(gt_mb_labels.shape[1]):
                loc = F.huber_loss(mb_locs[np.newaxis, i, j],
                                   gt_mb_locs[np.newaxis, i, j], 1).array
                conf = F.softmax_cross_entropy(mb_confs[np.newaxis, i, j],
                                               gt_mb_labels[np.newaxis, i,
                                                            j]).array

                if gt_mb_labels[i, j] > 0:
                    n_positive += 1
                    expect_loc_loss += loc
                    expect_conf_loss += conf
                else:
                    negatives.append(conf)

            n_positive_total += n_positive
            if n_positive > 0:
                expect_conf_loss += sum(sorted(negatives)[-n_positive * k:])

        if n_positive_total == 0:
            expect_loc_loss = 0
            expect_conf_loss = 0
        else:
            expect_loc_loss /= n_positive_total
            expect_conf_loss /= n_positive_total

        np.testing.assert_almost_equal(loc_loss, expect_loc_loss, decimal=2)
        np.testing.assert_almost_equal(conf_loss, expect_conf_loss, decimal=2)
Beispiel #9
0
    def _check_forward(self, mb_locs, mb_confs, gt_mb_locs, gt_mb_labels, k):
        if self.variable:
            mb_locs = chainer.Variable(mb_locs)
            mb_confs = chainer.Variable(mb_confs)
            gt_mb_locs = chainer.Variable(gt_mb_locs)
            gt_mb_labels = chainer.Variable(gt_mb_labels)

        loc_loss, conf_loss = multibox_loss(
            mb_locs, mb_confs, gt_mb_locs, gt_mb_labels, k)

        self.assertIsInstance(loc_loss, chainer.Variable)
        self.assertEqual(loc_loss.shape, ())
        self.assertEqual(loc_loss.dtype, mb_locs.dtype)

        self.assertIsInstance(conf_loss, chainer.Variable)
        self.assertEqual(conf_loss.shape, ())
        self.assertEqual(conf_loss.dtype, mb_confs.dtype)

        if self.variable:
            mb_locs = mb_locs.array
            mb_confs = mb_confs.array
            gt_mb_locs = gt_mb_locs.array
            gt_mb_labels = gt_mb_labels.array

        mb_locs = cuda.to_cpu(mb_locs)
        mb_confs = cuda.to_cpu(mb_confs)
        gt_mb_locs = cuda.to_cpu(gt_mb_locs)
        gt_mb_labels = cuda.to_cpu(gt_mb_labels)
        loc_loss = cuda.to_cpu(loc_loss.array)
        conf_loss = cuda.to_cpu(conf_loss.array)

        n_positive_total = 0
        expect_loc_loss = 0
        expect_conf_loss = 0
        for i in six.moves.xrange(gt_mb_labels.shape[0]):
            n_positive = 0
            negatives = []
            for j in six.moves.xrange(gt_mb_labels.shape[1]):
                loc = F.huber_loss(
                    mb_locs[np.newaxis, i, j],
                    gt_mb_locs[np.newaxis, i, j], 1).array
                conf = F.softmax_cross_entropy(
                    mb_confs[np.newaxis, i, j],
                    gt_mb_labels[np.newaxis, i, j]).array

                if gt_mb_labels[i, j] > 0:
                    n_positive += 1
                    expect_loc_loss += loc
                    expect_conf_loss += conf
                else:
                    negatives.append(conf)

            n_positive_total += n_positive
            if n_positive > 0:
                expect_conf_loss += sum(sorted(negatives)[-n_positive * k:])

        if n_positive_total == 0:
            expect_loc_loss = 0
            expect_conf_loss = 0
        else:
            expect_loc_loss /= n_positive_total
            expect_conf_loss /= n_positive_total

        np.testing.assert_almost_equal(
            loc_loss, expect_loc_loss, decimal=2)
        np.testing.assert_almost_equal(
            conf_loss, expect_conf_loss, decimal=2)