コード例 #1
0
ファイル: ssd.py プロジェクト: DaikiOnodera/ReNomIMG
    def loss(self, x, y, neg_pos_ratio=3.0, negatives_for_hard=100.0):
        batch_size = y.shape[0]
        num_boxes = y.shape[2]
        conf_loss = rm.sum(rm.softmax_cross_entropy(x[:, 4:-8, :],
                                                    y[:, 4:-8, :],
                                                    reduce_sum=False),
                           axis=1)
        loc_loss = rm.sum(rm.smoothed_l1(x[:, :4, :],
                                         y[:, :4, :],
                                         reduce_sum=False),
                          axis=1)

        num_pos = np.sum(y[:, -8, :], axis=1)
        pos_loc_loss = rm.sum(loc_loss * (y[:, -8, :]), axis=1)
        pos_conf_loss = rm.sum(conf_loss * y[:, -8, :], axis=1)

        num_neg = np.minimum(neg_pos_ratio * num_pos, num_boxes - num_pos)
        has_min = num_neg > 0
        has_min = np.any(has_min).astype('float')

        num_neg = np.concatenate(
            [num_neg, [(1 - has_min) * negatives_for_hard]])
        num_neg_batch = np.min(num_neg[(num_neg > 0)])

        num_neg_batch = int(num_neg_batch)
        confs_start = 5  # 4+0(background label) + 1
        confs_end = confs_start + self.num_class - 1

        max_confs = np.max(x[:, confs_start:confs_end, :].as_ndarray(), axis=1)
        indices = (max_confs *
                   (1 - y[:, -8, :])).argsort()[:, ::-1][:, :num_neg_batch]

        batch_idx = np.expand_dims(range(0, batch_size), 1)
        batch_idx = np.tile(batch_idx, (1, num_neg_batch))
        full_indices = (batch_idx.reshape(-1) * int(num_boxes) +
                        indices.reshape(-1))

        neg_conf_loss = conf_loss.reshape(-1)[full_indices]
        neg_conf_loss = neg_conf_loss.reshape((batch_size, num_neg_batch))
        neg_conf_loss = rm.sum(neg_conf_loss, axis=1)

        total_loss = neg_conf_loss + pos_conf_loss
        total_loss /= (num_pos + float(num_neg_batch))

        num_pos = np.where(np.not_equal(num_pos, 0), num_pos,
                           np.ones_like(num_pos))
        total_loss = total_loss + (pos_loc_loss / num_pos)
        loss = rm.sum(total_loss)
        return loss
コード例 #2
0
    def loss(self, x, y, neg_pos_ratio=3.0):
        pos_samples = (y[:, :, 5] == 0)[..., None]
        N = np.sum(pos_samples)
        pos_Ns = np.sum(pos_samples, axis=1)
        neg_Ns = np.clip(neg_pos_ratio * pos_Ns, 0, y.shape[1])

        # Loc loss
        loc_loss = rm.sum(
            rm.smoothed_l1(x[..., :4], y[..., 1:5], reduce_sum=False) *
            pos_samples)

        # this is for hard negative mining.
        np_x = x[..., 4:].as_ndarray()
        max_np_x = np.max(np_x)
        loss_c = np.log(
            np.sum(np.exp(np_x.reshape(-1, self.num_class) - max_np_x),
                   axis=1,
                   keepdims=True) + 1e-8) + max_np_x
        loss_c -= np_x[..., 0].reshape(-1, 1)
        loss_c = loss_c.reshape(len(x), -1)
        loss_c[pos_samples.astype(
            np.bool)[..., 0]] = np.Inf  # Cut positive samples.

        sorted_index = np.argsort(-1 * loss_c,
                                  axis=1)  # Arg sort by dicending order.
        index_rank = np.argsort(sorted_index, axis=1)
        neg_samples = index_rank < neg_Ns
        samples = (neg_samples[..., None] + pos_samples).astype(np.bool)
        conf_loss = rm.sum(
            rm.softmax_cross_entropy(x[..., 4:].transpose(0, 2, 1),
                                     y[..., 5:].transpose(0, 2, 1),
                                     reduce_sum=False).transpose(0, 2, 1) *
            samples)

        loss = conf_loss + loc_loss
        return loss / (N / len(x))
コード例 #3
0
ファイル: test_ad_vs_nd.py プロジェクト: vochicong/ReNom
 def func(node, x):
     return sum(rm.smoothed_l1(node, x, delta, reduce_sum=False))
コード例 #4
0
ファイル: test_ad_vs_nd.py プロジェクト: vochicong/ReNom
 def func(node, x):
     return rm.smoothed_l1(node, x, delta)