def _loss(self, scores, labels):
     scores = tf.squeeze(scores, axis=-1)
     diff_scores = scores[..., tf.newaxis] - scores[:, tf.newaxis]
     diff_labes = labels[..., tf.newaxis] - labels[:, tf.newaxis]
     diff_labes = (1.0 + diff_labes / (self._k / 2.0)**2) / 2.0
     add_extra_tensor('diff_labes', diff_labes)
     loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=diff_labes,
                                                    logits=diff_scores)
     return tf.reduce_mean(loss, axis=-1)
 def _normalize_labels(self, class_hist, class_hist0, class_hist1,
                       neg_class_hist):
     rank = _pairwise_scores(class_hist0,
                             class_hist1,
                             neg_class_hist,
                             type='sum')
     is_fg = tf.equal(rank, int(self._k / 2)**2)
     add_extra_tensor('is_fg', is_fg)
     add_extra_tensor('rank', rank)
     return rank, is_fg
    def _normalize_labels(self, class_hist, class_hist0, class_hist1,
                          neg_class_hist):
        # Match happens when at least min_match_frac*k objects
        # belongs to the same foreground class.
        labels, fg, bg = _onehot_labels(class_hist, self._k, neg_class_hist)
        add_extra_tensor('class_hist_{}'.format(self._k), class_hist)

        if self._class_agnostic:
            return (tf.to_float(fg), tf.squeeze(fg, axis=-1))
        else:
            return (tf.to_float(labels), tf.squeeze(fg, axis=-1))
    def _loss(self, scores, labels):
        add_extra_tensor('pw_scores_k{}'.format(self._k), scores)
        add_extra_tensor('pw_labels_k{}'.format(self._k), labels)

        if self._loss_type == 'cross_entropy':
            loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=labels,
                                                           logits=scores)
        elif self._loss_type == 'l2':
            diff = scores - labels
            loss = 0.5 * tf.multiply(diff, diff)

        return tf.reduce_sum(loss, axis=-1)
def _clear_negative_classes(class_hist, neg_class_hist):
    ## "not", "tile", and then "and" with class_hist
    if neg_class_hist is not None:
        # [MBS, num_classes + 1]
        neg_shape = shape_utils.combined_static_and_dynamic_shape(
            neg_class_hist)
        # [MBS*N, B, num_classes + 1]
        class_hist_shape = shape_utils.combined_static_and_dynamic_shape(
            class_hist)

        N = class_hist_shape[0] // neg_shape[0]
        B = class_hist_shape[1]
        add_extra_tensor('neg_class_hist_N{}'.format(N), neg_class_hist)
        add_extra_tensor('class_hist_N{}'.format(N), class_hist)
        # [MBS, 1, 1, num_classes + 1]
        neg_class_hist = neg_class_hist[:, tf.newaxis, tf.newaxis]
        # [MBS, N, B, num_classes + 1]
        neg_class_hist = tf.tile(neg_class_hist, [1, N, B, 1])
        neg_class_hist = tf.reshape(neg_class_hist, class_hist_shape)
        not_neg_class_hist = tf.logical_not(neg_class_hist)
        add_extra_tensor('not_neg_class_hist_tiled_N{}'.format(N),
                         not_neg_class_hist)

        not_neg_class_hist = tf.cast(not_neg_class_hist,
                                     dtype=class_hist.dtype)
        class_hist = tf.multiply(class_hist, not_neg_class_hist)
        add_extra_tensor('final_class_hist_N{}'.format(N), class_hist)
    ##
    return class_hist
    def _subsample_inds(self, cross_co, instance_loss):
        cobj_energy = cross_co.get('energy')
        if self._is_training:
            if self._stratified_subsampling:
                ## RANDOM/HARD_EXAMPLE SAMPLING FROM STRATA
                return self._subsample_stratified(
                    instance_loss, cross_co.get('matched_class'),
                    cross_co.get('target_class'))
            else:
                ## RANDOM/HARD_EXAMPLE (optionally +BALANCED) SAMPLING FROM TOP-K
                positive_fraction = self._positive_balance_fraction
                return self._subsample_from_topk(cross_co.get('is_target'),
                                                 cobj_energy, instance_loss,
                                                 positive_fraction)
        else:
            #if cross_co.has_key('boxes'):
            #  nms_idx = nms_tf_wrapper.batch_nms(cross_co.get('boxes'),
            #                                     cobj_energy,
            #                                     self._ncobj_proposals,
            #                                     threshold=0.5, use_py_fn=True)
            #util.add_extra_tensor('boxes_{}'.format(self._k), cross_co.get('boxes'))
            #util.add_extra_tensor('energy_{}'.format(self._k), cobj_energy)
            #util.add_extra_tensor('nms_idx_{}'.format(self._k), nms_idx)
            #nms_idx2 = nms_tf_wrapper.batch_nms(cross_co.get('boxes'),
            #                                    cobj_energy,
            #                                    self._ncobj_proposals,
            #                                    threshold=0.8,
            #                                    use_py_fn=False)
            #util.add_extra_tensor('nms_idx2_{}'.format(self._k), nms_idx2)

            ncobj = cobj_energy.shape[1]
            assert (ncobj is not None and ncobj >= self._ncobj_proposals)
            if self._ncobj_proposals == ncobj:
                return None
            _, topk_idx = tf.nn.top_k(cobj_energy,
                                      k=self._ncobj_proposals,
                                      sorted=False)
            #all_true = tf.ones_like(cobj_energy, dtype=tf.bool)
            #topk_idx = util.batched_topk_or_pad_inds_with_resampling(
            #                    all_true, cobj_energy,
            #                    self._ncobj_proposals)
            #from IPython import embed;embed()
            util.add_extra_tensor('topk_idx_{}'.format(self._k), topk_idx)
            return topk_idx
Exemple #7
0
    def build(self,
              fea,
              neg_fea,
              matched_class0,
              neg_matched_class,
              reuse_vars=False,
              scope=None):
        '''
      Args:
        fea: feature tensor of size [MBS*K, 1, B, 1, d] of positive bags
        neg_fea: feature tensor of size [MBS, N*B, 1, 1, d] of negative bags
        matched_class0: [MBS*K, B, num_classes+1]
        neg_matched_class: [MBS*N, B, num_classes+1]
    '''
        with tf.variable_scope(scope,
                               'negative_attention',
                               reuse=reuse_vars,
                               values=[fea, neg_fea]):
            neg_fea_shape = shape_utils.combined_static_and_dynamic_shape(
                neg_fea)
            orig_neg_fea = neg_fea
            #[MBS*K, 1, B, 1, d] -> [MBS*K, B, d]
            orig_fea_shape = shape_utils.combined_static_and_dynamic_shape(fea)
            orig_fea = fea
            fea = tf.squeeze(fea, [1, 3])
            fea_shape = shape_utils.combined_static_and_dynamic_shape(fea)
            #[MBS*K, B, d] -> [MBS, K*B, d]
            fea = tf.reshape(fea, [neg_fea_shape[0], -1] + fea_shape[2:])
            add_extra_tensor('input_fea', fea)
            add_extra_tensor('input_neg_fea', neg_fea)
            #[MBS, K*B, 1, 1, d]
            fea = fea[:, :, tf.newaxis, tf.newaxis, :]

            if self._convline is not None:
                #[MBS, (K+N)*B, 1, 1, d]
                combined_fea = tf.concat([fea, neg_fea], axis=1)
                combined_fea = self._convline.build(combined_fea,
                                                    scope='combined_convline')
                #combined_fea = tf.squeeze(combined_fea, [2,3])
                fea = combined_fea[:, :-neg_fea_shape[1], ...]
                neg_fea = combined_fea[:, -neg_fea_shape[1]:, ...]
                add_extra_tensor('conv_fea', fea)
                add_extra_tensor('conv_neg_fea', neg_fea)

            fea = tf.squeeze(fea, [2, 3])
            neg_fea = tf.squeeze(neg_fea, [2, 3])

            gt_alphas = None
            loss = None
            if neg_matched_class is not None and matched_class0 is not None:
                # [MBS, N*B, num_class+1]
                neg_matched_class = tf.reshape(neg_matched_class,
                                               neg_fea_shape[:2] + [-1])
                neg_cls_shape = shape_utils.combined_static_and_dynamic_shape(
                    neg_matched_class)
                # [MBS, K*B, num_class+1] (k is 1 always...)
                matched_class0 = tf.reshape(
                    matched_class0, [neg_cls_shape[0], -1, neg_cls_shape[2]])
                # [MBS, K*B, N*B]
                gt_alphas = tf.matmul(matched_class0[..., 1:],
                                      neg_matched_class[..., 1:],
                                      transpose_b=True)

            #[MBS, N*B+1, d]
            neg_fea = tf.pad(neg_fea, [[0, 0], [1, 0], [0, 0]])
            if self._use_gt_labels and gt_alphas is not None:
                #[MBS, K*B, N*B]
                alphas = gt_alphas / (
                    tf.reduce_sum(gt_alphas, axis=-1, keep_dims=True) + 1e-7)
                #[MBS, K*B, N*B+1] TODO: not sure about this
                alphas = tf.pad(alphas, [[0, 0], [0, 0], [1, 0]])
            else:
                scores, pairs, joined_fea, _ = self._similarity_cal.build(
                    fea, neg_fea, None, None, 2, None, reuse_vars)
                #[MBS, K*B, N*B+1] scores
                scores = tf.reshape(
                    scores[...,
                           1], [neg_fea_shape[0], -1, neg_fea_shape[1] + 1])
                add_extra_tensor('scores', scores)
                if self._add_loss and gt_alphas is not None:
                    gt_alphas = tf.cast(gt_alphas, tf.bool)
                    is_in_neg = tf.reduce_any(gt_alphas,
                                              axis=-1,
                                              keep_dims=True)
                    not_in_neg = tf.logical_not(is_in_neg)
                    labels = tf.to_float(
                        tf.concat([not_in_neg, gt_alphas], axis=-1))
                    labels = labels / tf.reduce_sum(
                        labels, axis=-1, keep_dims=True)
                    logits = scores
                    loss = tf.nn.softmax_cross_entropy_with_logits(
                        labels=labels, logits=logits)
                    loss = tf.reduce_mean(loss)
                #[MBS, K*B, N*B+1]
                alphas = tf.nn.softmax(scores, dim=2)
            add_extra_tensor('alphas', alphas)
            neg_fea_to_attend, pos_fea = None, None
            if self._concat_type == 'NEGATIVE_IN_ORIG':
                # orig_neg_fea is [MBS, N*B, 1, 1, d] -> [MBS, N*B, d]
                orig_neg_fea = tf.squeeze(orig_neg_fea, [2, 3])
                # [MBS, N*B+1, d]
                orig_neg_fea = tf.pad(orig_neg_fea, [[0, 0], [1, 0], [0, 0]])
                neg_fea_to_attend = orig_neg_fea
                pos_fea = orig_fea
            elif self._concat_type == 'NEGATIVE_IN_FEA':
                neg_fea_to_attend = neg_fea
                pos_fea = orig_fea
            elif self._concat_type == 'CONCAT_IN_FEA':
                neg_fea_to_attend = neg_fea
                pos_fea = tf.reshape(fea, orig_fea_shape[:4] + [-1])
            else:
                raise ValueError('concat type {} is not defined'.format(
                    self._concat_type))
            # [MBS, K*B, N*B+1] * [MBS, N*B+1, d] -> [MBS, K*B, d]
            attended_neg_feas = tf.matmul(alphas, neg_fea_to_attend)
            add_extra_tensor('attended_neg_feas', attended_neg_feas)
            attended_neg_feas = tf.reshape(attended_neg_feas,
                                           orig_fea_shape[:4] + [-1])
            fea01 = tf.concat((pos_fea, attended_neg_feas), axis=-1)
            return fea01, loss
    def _loss(self, scores, labels):
        def _target_vals(tensor):
            if self._bag_target_class is None:
                return tensor
            score_inds = tf.cast(self._bag_target_class[..., tf.newaxis],
                                 tf.int32) - 1
            tr_tensor = tf.transpose(tensor, [0, 2, 1])
            tr_tensor = util.batched_gather(score_inds, tr_tensor)
            return tf.transpose(tr_tensor, [0, 2, 1])

        add_extra_tensor('scores_b{}'.format(self._k), scores)
        add_extra_tensor('labels_b{}'.format(self._k), labels)

        scores = _target_vals(scores)
        labels = _target_vals(labels)

        loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=labels,
                                                       logits=scores)
        add_extra_tensor('scores_{}'.format(self._k), scores)
        add_extra_tensor('labels_{}'.format(self._k), labels)
        add_extra_tensor('loss_{}'.format(self._k), loss)

        if self._focal_loss:
            gamma = 2.0
            alpha = 1.0
            sigm = tf.nn.sigmoid(scores)
            weights = tf.where(tf.cast(labels, dtype=tf.bool), 1.0 - sigm,
                               sigm)

            weights = alpha * tf.pow(weights, gamma)
            loss = weights * loss
            add_extra_tensor('weights_{}'.format(self._k), weights)
            add_extra_tensor('sigm_{}'.format(self._k), sigm)

        return tf.reduce_sum(loss, axis=-1)
    def _loss(self, scores, labels):
        add_extra_tensor('pw_scores_k{}'.format(self._k), scores)
        add_extra_tensor('pw_labels_k{}'.format(self._k), labels)
        loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=labels,
                                                       logits=scores)

        if True:
            gamma = 2.0
            alpha = 1.0
            sigm = tf.nn.sigmoid(scores)
            weights = tf.where(tf.cast(labels, dtype=tf.bool), 1.0 - sigm,
                               sigm)

            weights = alpha * tf.pow(weights, gamma)
            loss = weights * loss
            add_extra_tensor('weights', weights)
            add_extra_tensor('sigm', sigm)
            add_extra_tensor('labels', labels)
            add_extra_tensor('loss_k{}'.format(self._k), loss)

        return tf.reduce_sum(loss, axis=-1)
    def _normalize_labels(self, class_hist, class_hist0, class_hist1,
                          neg_class_hist):

        add_extra_tensor('class_hist_k{}'.format(self._k), class_hist)
        add_extra_tensor('class_hist0_k{}'.format(self._k), class_hist0)
        add_extra_tensor('class_hist1_k{}'.format(self._k), class_hist1)
        neg_class_hist = None
        energy = _pairwise_scores(class_hist0,
                                  class_hist1,
                                  neg_class_hist,
                                  type='max')
        labels = []
        for i in range(self.score_size):
            labs = tf.greater(energy, i)
            labels.append(labs)
        labels = tf.cast(tf.stack(labels, axis=-1), dtype=tf.float32)

        add_extra_tensor('labels_k{}'.format(self._k), labels)
        add_extra_tensor('labs_k{}'.format(self._k), labs)
        add_extra_tensor('energy_k{}'.format(self._k), energy)
        return labels, labs
    def _build(self,
               fea0,
               fea1,
               ind0,
               ind1,
               score_size,
               neg_fea,
               matched_class0,
               neg_matched_class,
               reuse_vars,
               scope,
               tile_fea1=True):
        fea0_shape = shape_utils.combined_static_and_dynamic_shape(fea0)
        assert (len(fea0_shape) == 3)
        m = fea0_shape[1]

        if fea1 is None:
            # shape: [N, M, 1, K]
            fea01 = fea0[:, :, tf.newaxis]
        else:
            fea1_shape = shape_utils.combined_static_and_dynamic_shape(fea1)

            if tile_fea1:
                assert (len(fea1_shape) == 3)
                l = fea1_shape[1]
                # shape = [N, M, L, K]
                fea1 = tf.tile(fea1[:, tf.newaxis], [1, m, 1, 1])
            else:
                assert (len(fea1_shape) == 4)
                l = fea1_shape[2]

            # shape = [N, M, L, K]
            fea0 = tf.tile(fea0[:, :, tf.newaxis], [1, 1, l, 1])
            # shape = [N, M, L, 2K]
            fea01 = tf.concat((fea0, fea1), axis=-1)

            ### Rearange the order of the input features
            ### This is required to have a symmetric funciton
            if False:  #neg_fea is None:
                fea01_shape = shape_utils.combined_static_and_dynamic_shape(
                    fea01)
                rfea01 = tf.reshape(fea01, fea0_shape[:2] + [l, 2, -1])
                absum = tf.reduce_sum(tf.abs(rfea01), axis=-1)
                order = tf.argmax(absum, axis=-1)
                rf = tf.reshape(rfea01, [-1, 2, fea0_shape[-1]])
                oo = tf.reshape(order, [-1, 1])
                xfea0 = util.batched_gather(oo, rf)
                xfea1 = util.batched_gather(1 - oo, rf)
                xfea01 = tf.concat((xfea0, xfea1), axis=1)
                fea01 = tf.reshape(xfea01, fea01_shape)

        # shape = [N, 1, M, L, 2K] or [N, 1, M, 1, K]
        fea01 = fea01[:, tf.newaxis]

        if self._joining_convline is not None:
            fea01 = self._joining_convline.build(fea01, scope='join')
        self._joined_fea = tf.squeeze(fea01, 1)

        if self._stop_gradient:
            fea01 = tf.stop_gradient(fea01)

        #### ADD BACKGROUND FEATURE
        negative_loss = None
        if neg_fea is not None:
            if self._negative_attention is None:  #just averaged
                #neg_fea is [MBS, 1, 1, 1, d]
                #fea01 is [MBS*k_shot, 1, M, P, C] where P is 1 or L
                fea01_shape = shape_utils.combined_static_and_dynamic_shape(
                    fea01)
                neg_fea_shape = shape_utils.combined_static_and_dynamic_shape(
                    neg_fea)
                mbs = neg_fea_shape[0]
                k = fea01_shape[0] // mbs
                util.add_extra_tensor('fea01_before', fea01)
                # list of size MBS with [1,1,1,1,d] shape tensors
                util.add_extra_tensor('neg_fea_before', neg_fea)
                neg_fea_mbs = tf.split(neg_fea, mbs, axis=0)
                # list of size MBS with [k, 1, M, P, d] shape tensors
                util.add_extra_tensor('neg_fea_mbs', neg_fea_mbs)
                neg_fea_mbs_tiled = [
                    tf.tile(n, [k, 1, fea01_shape[2], fea01_shape[3], 1])
                    for n in neg_fea_mbs
                ]
                util.add_extra_tensor('neg_fea_mbs_tiled', neg_fea_mbs_tiled)
                # neg_fea will be [MBS*k, 1, M, P, d] tensor
                neg_fea = tf.concat(neg_fea_mbs_tiled, axis=0)
                util.add_extra_tensor('neg_fea', neg_fea)
                #fea01 becomes [MBS*k_shot, 1, M, P, C+d]
                fea01 = tf.concat((fea01, neg_fea), axis=-1)
                util.add_extra_tensor('fea01_after', fea01)
            else:
                #[MBS*k_shot, 1, M, 1, C]
                #fea01 = tf.Print(fea01, tf.nn.moments(fea01, axes=[0,1,2,3,4]) +
                #                        tf.nn.moments(neg_fea, axes=[0,1,2,3,4]))
                fea01, negative_loss = self._negative_attention.build(
                    fea01, neg_fea, matched_class0, neg_matched_class,
                    reuse_vars)
        ####

        # shape = [N, 1, M', M, 2K] or [N, 1, M, 1, K]
        if self._convline is not None:
            fea01 = self._convline.build(fea01, scope='convline')

        with slim.arg_scope(self._fc_hyperparameters):
            with slim.arg_scope([slim.fully_connected],
                                activation_fn=None,
                                normalizer_fn=None,
                                normalizer_params=None):
                scores = slim.fully_connected(fea01, score_size, scope='score')
                if self._sum_output:
                    with tf.variable_scope('score', reuse=True):
                        w = tf.get_variable('weights')
                        b = tf.get_variable('biases')
                        sw = tf.reduce_sum(w, axis=-1)
                        sb = tf.reduce_sum(b)
                        scores = (tf.tensordot(fea01, w[:, 0], [[4], [0]]) +
                                  sb)[..., tf.newaxis]

                scores = tf.squeeze(scores, 1)

                if self._target_score_inds is not None and score_size > 1:
                    score_inds = self._target_score_inds[..., tf.newaxis]
                    scores_trans = tf.transpose(scores, [0, 3, 1, 2])
                    scores_trans = util.batched_gather(score_inds,
                                                       scores_trans)
                    scores = tf.transpose(scores_trans, [0, 2, 3, 1])

                return scores, negative_loss