Exemple #1
0
    def build(self, predictions, targets, inputs=None):
        """ Prints the number of each kind of prediction """
        self.built = True
        pshape = predictions.get_shape()

        if len(pshape) == 1 or (len(pshape) == 2 and int(pshape[1]) == 1):
            self.name = self.name or "binary_prediction_counts"
            self.tensor = tf.unique_with_counts(predictions)
        else:
            self.name = self.name or "categorical_prediction_counts"
            self.tensor = self.tensor = tf.unique_with_counts(
                tf.argmax(predictions, dimension=1))
        self.tensor.m_name = self.name
Exemple #2
0
    def build(self, predictions, targets, inputs=None):
        """ Prints the number of each kind of prediction """
        self.built = True
        pshape = predictions.get_shape()
        self.inner_metric.build(predictions, targets, inputs)

        with tf.name_scope(self.name):
            if len(pshape) == 1 or (len(pshape) == 2 and int(pshape[1]) == 1):
                self.name = self.name or "binary_prediction_counts"
                y, idx, count = tf.unique_with_counts(tf.argmax(predictions))
                self.tensor = tf.Print(self.inner_metric, [y, count], name=self.inner_metric.name)
            else:
                self.name = self.name or "categorical_prediction_counts"
                y, idx, count = tf.unique_with_counts(tf.argmax(predictions, dimension=1))
                self.tensor = tf.Print(self.inner_metric.tensor, [y, count], name=self.inner_metric.name)
Exemple #3
0
def normalize_unique(x):
    print("normalize_unique: " + str(x))
    with tf.device('/cpu:0'):
        ___, idx, count = tf.unique_with_counts(x)
    counts = tf.gather(count, idx)
    print("counts: " + str(tf.cast(1 / counts, tf.float32)))
    return tf.cast(1 / counts, tf.float32)
Exemple #4
0
def reduce_batch_weighted_counts(x, weights=None):
  """Performs batch-wise reduction to produce (possibly weighted) counts.

  Args:
    x: Input `Tensor`.
    weights: (Optional) Weights input `Tensor`.

  Returns:
    a named tuple of...
      The unique values in x
      The sum of the weights for each unique value in x if weights are provided,
        else None
  """
  if isinstance(x, tf.SparseTensor):
    x = x.values
  if weights is None:
    # TODO(b/112916494): Always do batch wise reduction once possible.

    return ReducedBatchWeightedCounts(tf.reshape(x, [-1]), None, None, None)
  # TODO(b/134075780): Revisit expected weights shape when input is sparse.
  x, weights = assert_same_shape(x, weights)
  weights = tf.reshape(weights, [-1])
  x = tf.reshape(x, [-1])
  unique_x_values, unique_idx, _ = tf.unique_with_counts(x, out_idx=tf.int64)
  summed_weights_per_x = tf.math.unsorted_segment_sum(
      weights, unique_idx, tf.size(input=unique_x_values))
  return ReducedBatchWeightedCounts(unique_x_values, summed_weights_per_x, None,
                                    None)
Exemple #5
0
    def call(self, inputs, mask=None):
        features, feature_graph_index = inputs
        feature_graph_index = tf.reshape(feature_graph_index, (-1,))
        _, _, count = tf.unique_with_counts(feature_graph_index)
        m = kb.dot(features, self.m_weight)
        if self.use_bias:
            m += self.m_bias

        self.h = tf.zeros(tf.stack(
            [tf.shape(input=features)[0], tf.shape(input=count)[0], self.n_hidden]))
        self.c = tf.zeros(tf.stack(
            [tf.shape(input=features)[0], tf.shape(input=count)[0], self.n_hidden]))
        q_star = tf.zeros(tf.stack(
            [tf.shape(input=features)[0], tf.shape(input=count)[0], 2 * self.n_hidden]))
        for i in range(self.T):
            self.h, c = self._lstm(q_star, self.c)
            e_i_t = tf.reduce_sum(
                input_tensor=m * repeat_with_index(self.h, feature_graph_index), axis=-1)
            exp = tf.exp(e_i_t)
            # print(exp.shape)
            seg_sum = tf.transpose(
                a=tf.math.segment_sum(
                    tf.transpose(a=exp, perm=[1, 0]),
                    feature_graph_index),
                perm=[1, 0])
            seg_sum = tf.expand_dims(seg_sum, axis=-1)
            # print(seg_sum.shape)
            a_i_t = exp / tf.squeeze(
                repeat_with_index(seg_sum, feature_graph_index))
            # print(a_i_t.shape)
            r_t = tf.transpose(a=tf.math.segment_sum(
                tf.transpose(a=tf.multiply(m, a_i_t[:, :, None]), perm=[1, 0, 2]),
                feature_graph_index), perm=[1, 0, 2])
            q_star = kb.concatenate([self.h, r_t], axis=-1)
        return q_star
Exemple #6
0
def get_center_loss(features, labels, alpha, num_classes):
    len_features = features.get_shape()[1]
    centers = tf.get_variable('centers', [num_classes, len_features],
                              dtype=tf.float32,
                              initializer=tf.constant_initializer(0),
                              trainable=False)
    labels = tf.reshape(labels, [-1])

    ##############################################################
    # centers0=tf.unsorted_segment_mean(features,labels,num_classes)
    # EdgeWeights=tf.ones((num_classes,num_classes))-tf.eye(num_classes)
    # margin=tf.constant(1000,dtype="float32")
    # norm = lambda x: tf.reduce_sum(tf.square(x), 1)
    # center_pairwise_dist = tf.transpose(norm(tf.expand_dims(centers0, 2) - tf.transpose(centers0)))
    # loss_0= tf.reduce_sum(tf.multiply(tf.maximum(0.0, margin-center_pairwise_dist),EdgeWeights))
    ################################################################

    centers_batch = tf.gather(centers, labels)
    diff = centers_batch - features
    unique_label, unique_idx, unique_count = tf.unique_with_counts(labels)
    appear_times = tf.gather(unique_count, unique_idx)
    appear_times = tf.reshape(appear_times, [-1, 1])
    diff = diff / tf.cast((1 + appear_times), tf.float32)
    diff = alpha * diff

    loss = tf.nn.l2_loss(features - centers_batch)
    centers_update_op = tf.scatter_sub(centers, labels, diff)
    return loss, centers_update_op
Exemple #7
0
def get_top_elements(list_of_elements, max_user_contribution):
  """Gets the top max_user_contribution words from the input list.

  Note that the returned set of top words will not necessarily be sorted.

  Args:
    list_of_elements: A tensor containing a list of elements.
    max_user_contribution: The maximum number of elements to keep.

  Returns:
    A tensor of a list of strings.
    If the total number of unique words is less than or equal to
    max_user_contribution, returns the set of unique words.
  """
  words, _, counts = tf.unique_with_counts(list_of_elements)
  if tf.size(words) > max_user_contribution:
    # This logic is influenced by the focus on global heavy hitters and
    # thus implements clipping by chopping the tail of the distribution
    # of the words as present on a single client. Another option could
    # be to provide pick max_words_per_user random words out of the unique
    # words present locally.
    top_indices = tf.argsort(
        counts, axis=-1, direction='DESCENDING')[:max_user_contribution]
    top_words = tf.gather(words, top_indices)
    return top_words
  return words
def get_center_loss(layer_feature,
                    layer,
                    labels_one_layer,
                    ini_feat,
                    alpha=0.5,
                    num_classes=21):

    feature_shape = layer_feature.get_shape()[1]

    # centers = tf.get_variable('centers_' + layer, dtype=tf.float32,
    #                             initializer = ini_feat, trainable=False)
    centers = tf.get_variable('centers_' + layer, [num_classes, feature_shape],
                              dtype=tf.float32,
                              initializer=tf.constant_initializer(0),
                              trainable=False)

    centers_batch = tf.gather(centers, labels_one_layer)
    loss = tf.nn.l2_loss(layer_feature - centers_batch)
    loss = tf.div(loss, tf.cast(feature_shape, tf.float32))

    diff = centers_batch - layer_feature
    unique_label, unique_idx, unique_count = tf.unique_with_counts(
        labels_one_layer)
    appear_times = tf.gather(unique_count, unique_idx)
    appear_times = tf.reshape(appear_times, [-1, 1])

    diff = diff / tf.cast((1 + appear_times), tf.float32)
    diff = alpha * diff

    centers_update_op = tf.scatter_sub(centers, labels_one_layer, diff)

    return loss, centers_update_op
def center_loss_v2(features, labels, *args, **kargs):
    config = args[0]
    alpha = config.aplha
    num_classes = config.num_classes
    with tf.variable_scope(config.scope + "_center_loss"):
        len_features = features.get_shape()[1]

        centers = tf.get_variable(
            'centers', [num_classes, len_features],
            dtype=tf.float32,
            initializer=tf.contrib.layers.xavier_initializer(),
            trainable=False)

        centers_batch = tf.gather(centers, labels)

        loss = tf.nn.l2_loss(features - centers_batch)

        diff = centers_batch - features

        unique_label, unique_idx, unique_count = tf.unique_with_counts(labels)
        appear_times = tf.gather(unique_count, unique_idx)
        appear_times = tf.reshape(appear_times, [-1, 1])

        diff = diff / tf.cast((1 + appear_times), tf.float32)
        diff = alpha * diff

        centers = tf.scatter_sub(centers, labels, diff)

        return loss, centers
Exemple #10
0
def module(reuse_variables, labels, features, inferences):
    num_c = FLAGS.num_classes
    dim_f = FLAGS.feature_dim
    with tf.variable_scope("memory", reuse=reuse_variables):
        keys = tf.get_variable('keys',
                               shape=[num_c, dim_f],
                               dtype=tf.float32,
                               initializer=tf.constant_initializer(0.0),
                               trainable=False)
        values = tf.get_variable('values',
                                 shape=[num_c, num_c],
                                 dtype=tf.float32,
                                 initializer=tf.constant_initializer(
                                     1.0 / float(num_c)),
                                 trainable=False)

    diff_key = tf.gather(keys, labels) - features
    diff_value = tf.gather(values, labels) - inferences

    y, idx, count = tf.unique_with_counts(labels)
    count_n = tf.expand_dims(tf.gather(count, idx), 1)

    diff_key = diff(diff_key, count_n, FLAGS.eta)
    diff_value = diff(diff_value, count_n, FLAGS.eta)

    keys = tf.scatter_sub(keys, labels, diff_key)
    values = normalize(tf.scatter_sub(values, labels, diff_value))

    return keys, values
Exemple #11
0
    def get_center_loss(self, features, labels, alpha, num_classes):

        len_features = features.get_shape()[1]
        print(len_features, '-len feature')
        centers = tf.get_variable('centers', [num_classes, len_features],
                                  dtype=tf.float32,
                                  initializer=tf.constant_initializer(0),
                                  trainable=False)
        labels = tf.reshape(labels, [-1])
        print(labels, '   labels')

        centers_batch = tf.gather(centers, labels)
        loss = tf.nn.l2_loss(features - centers_batch)

        diff = centers_batch - features

        unique_label, unique_idx, unique_count = tf.unique_with_counts(labels)
        appear_times = tf.gather(unique_count, unique_idx)
        appear_times = tf.reshape(appear_times, [-1, 1])

        diff = diff / tf.cast((1 + appear_times), tf.float32)
        diff = alpha * diff

        centers_update_op = tf.scatter_sub(centers, labels, diff)

        return loss, centers, centers_update_op
def center_loss(features, label, alfa, nrof_classes):
    """Center loss based on the paper "A Discriminative Feature Learning Approach for Deep Face Recognition"
       (http://ydwen.github.io/papers/WenECCV16.pdf)
       https://blog.csdn.net/u014380165/article/details/76946339
    """
    label = tf.argmax(label, axis=1)
    label = tf.cast(label, dtype=tf.int64)
    nrof_features = features.get_shape()[1]
    #训练过程中,需要保存当前所有类中心的全连接预测特征centers, 每个batch的计算都要先读取已经保存的centers
    centers = tf.get_variable('centers', [nrof_classes, nrof_features],
                              dtype=tf.float32,
                              initializer=tf.constant_initializer(0),
                              trainable=False)
    label = tf.reshape(label, [-1])
    centers_batch = tf.gather(centers, label)  #获取当前batch对应的类中心特征
    # diff = (1 - alfa) * (centers_batch - features)#计算当前的类中心与特征的差异,用于Cj的的梯度更新,这里facenet的作者做了一个 1-alfa操作,比较奇怪,和原论文不同

    # 当前mini-batch的特征值与它们对应的中心值之间的差
    diff = centers_batch - features

    # 获取mini-batch中同一类别样本出现的次数,了解原理请参考原文公式(4)
    unique_label, unique_idx, unique_count = tf.unique_with_counts(label)
    appear_times = tf.gather(unique_count, unique_idx)
    appear_times = tf.reshape(appear_times, [-1, 1])

    diff = tf.div(diff, tf.cast((1 + appear_times), tf.float32))
    diff = alfa * diff

    centers = tf.scatter_sub(
        centers, label, diff
    )  #更新梯度Cj,对于上图中步骤6,tensorflow会将该变量centers保留下来,用于计算下一个batch的centerloss
    loss = tf.reduce_mean(tf.square(features -
                                    centers_batch))  #计算当前的centerloss 对应于Lc
    return loss, centers
Exemple #13
0
def sample_node_with_src(src_nodes, n, share_sample=False):
  """
  for each src node, sample "n" nodes with the same type

  Args:
    src_nodes: A 1-d `Tensor` of `int64`
    n: A scalar value of int
  Returns:
    A 2-dim Tensor, the first dim should be equal to dim of src_node.
    The second dim should be equal to n.
  """
  types = base._LIB_OP.get_node_type(src_nodes)
  y, idx, count = tf.unique_with_counts(types, out_idx=tf.int32)

  if share_sample:
    count = tf.ones_like(count, dtype=tf.int32)
    out_idx = idx
  else:
    out_idx = base._LIB_OP.inflate_idx(idx)

  rows = tf.shape(y)[0]

  ta = tf.TensorArray(dtype=tf.int64, size=rows, infer_shape=False)
  init_state = (0, (y, count, n, ta))
  condition = lambda i, _: i < rows
  _, (_, _, _, ta_final) = tf.while_loop(condition, _iter_body, init_state)
  tensor_final = ta_final.concat()

  return tf.gather(params=tensor_final, indices=out_idx, axis=0)
def find_knn(test_x, train_x, train_y, k):
    """

    :param test_x: a sample to test
    :param train_x: the data for training
    :param train_y: the training target
    :param k: the number of nearest neighbours
    :return: the prediction target
    """

    # compute the distances between train and test data points
    distances = Q1.distanceFunc(test_x, train_x)
    neg_distance = -distances
    # take top k element
    _, indices = tf.nn.top_k(neg_distance, k=k)

    # build a N2 dim vector, with targets for the test data points
    shape = test_x.shape[0]
    prediction_y = tf.zeros([shape], tf.int32)

    # find the nearest neighbor of each point
    for i in range(shape):
        k_neighbors = tf.gather(train_y, indices[i, :])

        # find the most possible neighbor
        values, _, counts = tf.unique_with_counts(
            tf.reshape(k_neighbors, shape=[-1]))
        _, max_count_idx = tf.nn.top_k(counts, k=1)
        prediction = tf.gather(values, max_count_idx)

        # add the dense to the prediction set
        sparse_test_target = tf.SparseTensor([[i]], prediction, [shape])
        prediction_y = tf.add(prediction_y,
                              tf.sparse_tensor_to_dense(sparse_test_target))
    return prediction_y
Exemple #15
0
def pred_loss_ensemble(last_layer, out_W, labels):
    with tf.variable_scope('ensemble'):
        out_W = tf.expand_dims(out_W, axis=0)  #[1,100,4]
        out_W = tf.tile(
            out_W,
            [last_layer.get_shape().as_list()[0], 1, 1])  # (None, 100, 4)
        logits = tf.matmul(last_layer, out_W)  # (None, ens, 4)
        print("ensemble logits shape", logits.get_shape().as_list())

        labels = tf.expand_dims(labels, axis=1)  #[None,1,4]
        ensemble_size = logits.get_shape().as_list()[1]
        labels = tf.tile(labels, [1, ensemble_size, 1])  # (None, ens, 4)
        print("ensemble labels shape", labels.get_shape().as_list())
        cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=logits,
                                                                labels=labels)
        loss_per_sample = tf.reduce_mean(cross_entropy, axis=1)
        loss_ent = tf.reduce_mean(loss_per_sample)

        ensemble_predict = tf.argmax(logits, axis=2)  # (None, ens)
        batch_predictions = []
        for i in range(ensemble_predict.get_shape().as_list()
                       [0]):  # per sample, has to be 1-D.
            elements, idx, count = tf.unique_with_counts(ensemble_predict[i])
            most_ind = tf.argmax(count)
            most_label = elements[most_ind]
            batch_predictions.append(most_label)

        predict = tf.convert_to_tensor(batch_predictions)
        print("ensemble predict shape", predict.get_shape().as_list())
        return predict, loss_ent
Exemple #16
0
def _has_enough_pixels_of_each_object_in_first_frame(label,
                                                     decoder_output_stride):
    """Checks if for each object (incl. background) enough pixels are visible.

  During test time, we will usually not see a reference frame in which only
  very few pixels of one object are visible. These cases can be problematic
  during training, especially if more than the 1-nearest neighbor is used.
  That's why this function can be used to detect and filter these cases.

  Args:
    label: Label tensor of shape [num_frames, height, width, 1].
    decoder_output_stride: Integer, the stride of the decoder output.

  Returns:
    Boolean, whether the labels have enough pixels of each object in the first
      frame.
  """
    h, w = train_utils.resolve_shape(label)[1:3]
    h_sub = model.scale_dimension(h, 1.0 / decoder_output_stride)
    w_sub = model.scale_dimension(w, 1.0 / decoder_output_stride)
    label_downscaled = tf.squeeze(tf.image.resize_nearest_neighbor(
        label[0, tf.newaxis], [h_sub, w_sub], align_corners=True),
                                  axis=0)
    _, _, counts = tf.unique_with_counts(tf.reshape(label_downscaled, [-1]))
    has_enough_pixels_per_object = tf.reduce_all(
        tf.greater_equal(counts, MIN_LABEL_COUNT))
    return has_enough_pixels_per_object
Exemple #17
0
def get_center_loss(features, labels, alpha, num_classes):
    """
    Arguments:
        features: Tensor,shape [batch_size, feature_length].
        labels: Tensor,shape [batch_size].#not the one hot label
        alpha:  center upgrade learning rate
        num_classes: how many classes.

    Return:
        loss: Tensor,
        centers: Tensor
        centers_update_op:
    """
    len_features = features.get_shape()[1]
    centers = tf.get_variable('centers', [num_classes, len_features],
                              dtype=tf.float32,
                              initializer=tf.constant_initializer(0),
                              trainable=False)
    labels = tf.reshape(labels, [-1])
    centers_batch = tf.gather(centers, labels)
    loss = tf.nn.l2_loss(features - centers_batch)
    diff = centers_batch - features
    unique_label, unique_idx, unique_count = tf.unique_with_counts(labels)
    appear_times = tf.gather(unique_count, unique_idx)
    appear_times = tf.reshape(appear_times, [-1, 1])
    diff = diff / tf.cast((1 + appear_times), tf.float32)
    diff = alpha * diff
    centers_update_op = tf.scatter_sub(centers, labels, diff)
    # need to update after every epoch, the key is to update the center of the classes.

    return loss, centers, centers_update_op
Exemple #18
0
def get_center_loss(Features, Labels, alpha, num_classes, scope, reuse):
    with tf.variable_scope(scope, reuse=reuse):
        len_features = Features.get_shape()[1]
        centers = tf.get_variable('centers', [num_classes, len_features],
                                  dtype=tf.float32,
                                  initializer=tf.constant_initializer(0),
                                  trainable=False)

        Labels = tf.reshape(Labels, [-1])

        centers_batch = tf.gather(centers, Labels)
        numerator = tf.norm(Features - centers_batch, axis=-1)
        f = tf.expand_dims(Features, axis=1)
        f = tf.tile(f, [1, centers.shape[0], 1])
        denominator = tf.norm(f - centers, axis=-1)
        denominator = 1e-8 + tf.reduce_sum(denominator, axis=-1) - numerator
        loss_weight = (num_classes - 1) * numerator / denominator

        diff = centers_batch - Features

        unique_label, unique_idx, unique_count = tf.unique_with_counts(Labels)
        appear_times = tf.gather(unique_count, unique_idx)
        appear_times = tf.reshape(appear_times, [-1, 1])

        diff = diff / tf.cast((1 + appear_times), tf.float32)
        diff = alpha * diff
        centers_update_op = tf.scatter_sub(centers, Labels, diff)

    return loss_weight, centers, centers_update_op
Exemple #19
0
    def call(self, y_true, y_pred):
        embedding = y_pred[:, :self.feature_dim]
        labels = tf.argmax(y_true, axis=1)
        centers_batch = tf.gather(self.centers, labels)
        # loss = tf.reduce_mean(tf.square(embedding - centers_batch))
        loss = tf.reduce_mean(tf.square(embedding - centers_batch), axis=-1)

        # Update centers
        # diff = (1 - self.alpha) * (centers_batch - embedding)
        diff = centers_batch - embedding
        unique_label, unique_idx, unique_count = tf.unique_with_counts(labels)
        appear_times = tf.gather(unique_count, unique_idx)
        appear_times = tf.reshape(appear_times, [-1, 1])

        diff = diff / tf.cast((1 + appear_times), tf.float32)
        diff = self.alpha * diff
        # print(centers_batch.shape, self.centers.shape, labels.shape, diff.shape)
        self.centers.assign(
            tf.tensor_scatter_nd_sub(self.centers, tf.expand_dims(labels, 1),
                                     diff))
        # centers_batch = tf.gather(self.centers, labels)
        if self.logits_loss:
            return self.logits_loss(
                y_true, y_pred[:, self.feature_dim:]) + loss * self.factor
        else:
            return loss * self.factor
Exemple #20
0
def _reduce_vocabulary_inputs(x, weights, labels=None):
  """Reduces vocabulary inputs.

  Args:
    x: Input `Tensor` for vocabulary analyzer.
    weights: Weights `Tensor` for vocabulary analyzer.
    labels: (optional) Binary Labels `Tensor` for vocabulary analyzer.

  Returns:
    A tuple of 3 `Tensor`s:
      * unique values
      * total weights sum for unique values
      * sum of positive weights for unique values when labels is provided,
        otherwise, None.
  """
  unique = tf.unique_with_counts(x, out_idx=tf.int64)

  summed_weights = tf.unsorted_segment_sum(weights, unique.idx,
                                           tf.size(unique.y))
  if labels is None:
    summed_positive_weights = None
    counts = None
  else:
    less_assert = tf.Assert(tf.less_equal(tf.reduce_max(labels), 1), [labels])
    greater_assert = tf.Assert(tf.greater_equal(
        tf.reduce_min(labels), 0), [labels])
    with tf.control_dependencies([less_assert, greater_assert]):
      labels = tf.identity(labels)
    positive_weights = (
        tf.cast(labels, tf.float32) * tf.cast(weights, tf.float32))
    summed_positive_weights = tf.unsorted_segment_sum(
        positive_weights, unique.idx, tf.size(unique.y))
    counts = unique.count

  return (unique.y, summed_weights, summed_positive_weights, counts)
        def _get_Lesion_ID(_meta_ID_csv):
            def _get_ID(_raw_id_data):
                _is_empty_idx = _find_empty_strings(_raw_id_data)
                return _replace_tensor_values(_raw_id_data, 'NA',
                                              _is_empty_idx)

            _full_id_list = _get_ID(_meta_ID_csv[1:, 3])
            _unique_ids, _id_locations, _repeat_counts = tf.unique_with_counts(
                _full_id_list)
            #Find Index of all values images with no provided lession ID
            _NA_id_idx = tf.constant(
                tf.where(tf.strings.regex_full_match(_unique_ids, 'NA')))
            #Edit repeat counts so that all unlabled ID's are assumed to be unique
            _repeat_counts = _replace_tensor_values(
                tf.cast(_repeat_counts, dtype=tf.int32),
                tf.cast(1, dtype=tf.int32), tf.cast(_NA_id_idx,
                                                    dtype=tf.int32))
            #Find lesion ID index number for all lesions with repeated images, i.e. non unique images
            _repeat_ids = tf.where(
                tf.math.greater(_repeat_counts, tf.constant([1])))
            #Find the index of all non repeated lesion IDs
            _repeat_idx = tf.where(
                tf.math.equal(tf.cast(_id_locations, dtype=tf.int32),
                              tf.cast(_repeat_ids, dtype=tf.int32)))
            #set all index's of repeated id's to zeros, then use tf.where to make boolian. _repeated = False gives non repeated ID's
            _sf_repeated = _replace_tensor_values(
                tf.cast(tf.zeros(tf.size(_full_id_list)), dtype=tf.int32),
                tf.cast(1, dtype=tf.int32),
                tf.expand_dims(tf.cast(tf.squeeze(_repeat_idx[:, 1]),
                                       dtype=tf.int32),
                               axis=1))
            return _full_id_list, _sf_repeated, _unique_ids, _id_locations
        def center_loss_imp(features, labels, alpha, num_class):
            num_features = features.get_shape()[1]
            centers = tf.get_variable('centers', [num_class, num_features],
                                      dtype=tf.float32,
                                      initializer=tf.constant_initializer(0),
                                      trainable=False)

            labels = tf.reshape(labels, [-1])
            centers_batch = tf.gather(centers, labels)

            center_loss = tf.nn.l2_loss(features - centers_batch,
                                        name="center_loss")  ##center_loss
            # center_loss = tf.div(tf.nn.l2_loss(features - centers_batch), int(num_features), name="center_loss")##center_loss
            #update centers
            diff = centers_batch - features
            unique_label, unique_idx, unique_count = tf.unique_with_counts(
                labels)
            appear_times = tf.gather(unique_count, unique_idx)
            appear_times = tf.reshape(appear_times, [-1, 1])

            diff = diff / tf.cast((appear_times + 1), tf.float32)
            diff = alpha * diff
            centers = tf.scatter_sub(centers, labels, diff)

            return center_loss, centers
Exemple #23
0
def predict_1d_knn_target(knnTarget, rowIndex):
    # Apply majority vote to 'rowIndex'th row (K-length 1D vector)
    row = tf.gather_nd(knnTarget, rowIndex)
    y, idx, count = tf.unique_with_counts(row)
    majorCount, majorIndex = tf.nn.top_k(count, k=1)
    predRowTarget = tf.gather(y, majorIndex)
    return predRowTarget
Exemple #24
0
def center_loss_v2(config, features, labels, centers=None, **kargs):
	alpha = config.alpha
	num_classes = config.num_classes
	with tf.variable_scope(config.scope+"_center_loss"):
		print("==center loss==")
		len_features = features.get_shape()[1]
		if not centers:
			centers = tf.get_variable('centers', 
							[num_classes, len_features], 
							dtype=tf.float32,
							initializer=tf.contrib.layers.xavier_initializer(),
							trainable=False)
			print("==add center parameters==")
	 
		centers_batch = tf.gather(centers, labels)

		loss = tf.nn.l2_loss(features - centers_batch)
	 
		diff = centers_batch - features
	 
		unique_label, unique_idx, unique_count = tf.unique_with_counts(labels)
		appear_times = tf.gather(unique_count, unique_idx)
		appear_times = tf.reshape(appear_times, [-1, 1])
	 
		diff = diff / tf.cast((1 + appear_times), tf.float32)
		diff = alpha * diff

		centers_update_op = tf.scatter_sub(centers, labels, diff)

		tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, centers_update_op)
		
		return loss, centers
Exemple #25
0
def get_git_loss(embeddings, labels, num_classes):
    centers = tf.get_variable(name='centers',
                              shape=[num_classes,
                                     embeddings.get_shape()[1]],
                              dtype=tf.float32,
                              initializer=tf.constant_initializer(0),
                              trainable=False)
    labels = tf.reshape(labels, [-1])
    centers_batch = tf.gather(centers, labels)
    loss_c = tf.reduce_mean(tf.square(embeddings - centers_batch))

    diffs = (embeddings[:, tf.newaxis] - centers_batch[tf.newaxis, :])
    diffs_shape = tf.shape(diffs)
    print(diffs.get_shape())

    mask = 1 - tf.eye(diffs_shape[0], diffs_shape[1], dtype=diffs.dtype)
    diffs = diffs * mask[:, :, tf.newaxis]

    loss_g = tf.reduce_mean(tf.divide(1, 1 + tf.square(diffs)))

    diff = centers_batch - embeddings
    unique_label, unique_idx, unique_count = tf.unique_with_counts(labels)
    appear_times = tf.gather(unique_label, unique_idx)
    appear_times = tf.reshape(appear_times, [-1, 1])

    diff = tf.divide(diff, tf.cast(1 + appear_times, dtype=tf.float32))
    diff = 0.5 * diff
    centers_update_op = tf.scatter_sub(centers, labels, diff)

    loss = lambda_c * loss_c + lambda_g * loss_g

    return loss, centers_update_op
def angular_center_loss(features, label, classes):

    label = tf.argmax(label, axis=1)
    label = tf.cast(label, dtype=tf.int64)
    feature_dim = features.get_shape()[1]
    centers = tf.get_variable("angular_centers", [classes, feature_dim],
                              dtype=tf.float32,
                              initializer=tf.constant_initializer(0),
                              trainable=False)
    label = tf.reshape(label, [-1])

    centers_batch = tf.gather(centers, label)
    features_norm = tf.nn.l2_normalize(features, dim=1)
    centers_batch_norm = tf.nn.l2_normalize(centers_batch, dim=1)

    unique_label, unique_idx, unique_count = tf.unique_with_counts(label)
    appear_times = tf.gather(unique_count, unique_idx)
    appear_times = tf.reshape(appear_times, [-1, 1])

    update = tf.div(features_norm, tf.cast(appear_times, tf.float32))
    # updated_centers_batch = tf.add(centers_batch_norm, update)
    # updated_centers_batch_norm = tf.nn.l2_normalize(updated_centers_batch)

    centers = tf.scatter_add(centers, label, update)
    centers = tf.nn.l2_normalize(centers, dim=1)
    loss = -tf.reduce_sum(tf.multiply(features_norm, centers_batch_norm))

    return loss, centers
Exemple #27
0
def closest_class_prediction(pairwise_distances, labels):
    """
    Helper function to gather predictions for top-1 accuracy calculation

    :param pairwise_distances: nxn matrix with cosine distances within a batch
    :param labels: nx1 ids of identities
    :return:
    """

    max_values = tf.reduce_max(pairwise_distances)
    diag_replacer = tf.tile(
        tf.reduce_max(pairwise_distances)[None, ...],
        [tf.shape(pairwise_distances)[0]])

    # The distance of embedding to itself will be 0, so we're replacing it with the max value
    replaced_diag = tf.linalg.set_diag(pairwise_distances, diag_replacer)

    indecies_min = tf.arg_min(replaced_diag, 1)

    predictions_raw = tf.gather(labels, indecies_min)

    # Filter classes with one instance only
    classes, _, counts = tf.unique_with_counts(labels)
    classes_not_for_acuracy = classes[tf.equal(counts, 1)]

    _, indecies_to_keep = tf.setdiff1d(labels, classes_not_for_acuracy)

    labels_selected = tf.gather(labels, indecies_to_keep)

    predictions = tf.gather(predictions_raw, indecies_to_keep)

    return predictions, labels_selected
Exemple #28
0
def get_center_loss(features, labels, alpha=0.1, num_classes=3):

    # Embedding的維度
    len_features = features.get_shape()[1]

    # 初始化center
    centers = tf.get_variable('centers', [num_classes, len_features], dtype=tf.float32,
                              initializer=tf.constant_initializer(0), trainable=False)

    centers_batch = tf.gather(centers, labels)

    # 計算Loss
    loss = tf.nn.l2_loss(features - centers_batch)

    # 更新各個類別的Center
    diff = centers_batch - features

    unique_label, unique_idx, unique_count = tf.unique_with_counts(labels)
    appear_times = tf.gather(unique_count, unique_idx)
    appear_times = tf.reshape(appear_times, [-1, 1])

    diff = diff / tf.cast((1 + appear_times), tf.float32)
    diff = alpha * diff

    centers_update_op = tf.scatter_sub(centers, labels, diff)

    return loss, centers, centers_update_op
def center_loss(features, label, alpha, nrof_classes):
    """Center loss based on the paper "A Discriminative Feature Learning Approach for Deep Face Recognition"
       (http://ydwen.github.io/papers/WenECCV16.pdf)
    """
    # 获取特征向量长度
    nrof_features = features.get_shape()[1]

    # 生成可以共享的变量centers
    with tf.variable_scope('center', reuse=True):
        centers = tf.get_variable('centers')
    label = tf.reshape(label, [-1])

    # 取出对应label下对应的center值,注意label里面的值可能会重复,因为一个标签下有可能会出现多个人
    centers_batch = tf.gather(centers, label)

    # 求特征点到中心的距离并乘以一定的系数,alfa是center的更新速度,越大代表更新的越慢
    diff = centers_batch - features

    # 获取一个batch中同一样本出现的次数,这里需要理解论文中的更新公式
    unique_label, unique_idx, unique_count = tf.unique_with_counts(label)
    appear_times = tf.gather(unique_count, unique_idx)
    appear_times = tf.reshape(appear_times, [-1, 1])

    diff = diff / tf.cast((1 + appear_times), tf.float32)
    diff = alpha * diff

    # 更新center,输出是将对应于label的centers减去对应的diff,如果同一个标签出现多次,那么就减去多次
    centers = tf.scatter_sub(centers, label, diff)

    # 求center loss,这里是将l2_loss里面的值进行平方相加,再除以2,并没有进行开方
    loss = tf.nn.l2_loss(features - centers_batch)
    return loss, centers
def _has_enough_pixels_of_each_object_in_first_frame(
    label, decoder_output_stride):
  """Checks if for each object (incl. background) enough pixels are visible.

  During test time, we will usually not see a reference frame in which only
  very few pixels of one object are visible. These cases can be problematic
  during training, especially if more than the 1-nearest neighbor is used.
  That's why this function can be used to detect and filter these cases.

  Args:
    label: Label tensor of shape [num_frames, height, width, 1].
    decoder_output_stride: Integer, the stride of the decoder output.

  Returns:
    Boolean, whether the labels have enough pixels of each object in the first
      frame.
  """
  h, w = train_utils.resolve_shape(label)[1:3]
  h_sub = model.scale_dimension(h, 1.0 / decoder_output_stride)
  w_sub = model.scale_dimension(w, 1.0 / decoder_output_stride)
  label_downscaled = tf.squeeze(
      tf.image.resize_nearest_neighbor(label[0, tf.newaxis], [h_sub, w_sub],
                                       align_corners=True), axis=0)
  _, _, counts = tf.unique_with_counts(
      tf.reshape(label_downscaled, [-1]))
  has_enough_pixels_per_object = tf.reduce_all(
      tf.greater_equal(counts, MIN_LABEL_COUNT))
  return has_enough_pixels_per_object
Exemple #31
0
def cal_loss(features,labels,nrof_classes,centrloss_alpha=0.6,centerloss_lambda=0.01,w_init=tf.constant_initializer(0)):
    """Center loss based on the paper "A Discriminative Feature Learning Approach for Deep Face Recognition"
       (http://ydwen.github.io/papers/WenECCV16.pdf)

        Args:

        features: embeddings feature 
        labels:  batch labels
        nrof_classes: number of classes
        centrloss_alpha: center loss hyparamter 
        centerloss_lambda:center loss hyparameter 
        w_init_:initialize the center
    """
    #get feature length
    nrof_features = features.get_shape()[1]
    #create centers
    centers = tf.get_variable('centers', [nrof_classes, nrof_features], dtype=tf.float32,initializer=w_init, trainable=False)
    labels = tf.reshape(labels, [-1])
    centers_batch = tf.gather(centers, labels)
    loss = tf.nn.l2_loss(features - centers_batch)
    #ready to update centers
    diff =(centers_batch - features)
    unique_label, unique_idx, unique_count = tf.unique_with_counts(labels)
    appear_times = tf.gather(unique_count, unique_idx)
    appear_times = tf.reshape(appear_times, [-1, 1])
    
    diff = diff / tf.cast((1 + appear_times), tf.float32)
    diff = centrloss_alpha * diff
    centers = tf.scatter_sub(centers, labels, diff)
    return centerloss_lambda*loss, centers
Exemple #32
0
def sampling_typed_IS_rs(nodes_nbrs, nbr_segment, edge_features, num_sample):
    unique_nbrs = tf.unique_with_counts(nbr_segment)
    p = 1.0 / tf.cast(tf.gather(unique_nbrs.count, unique_nbrs.idx),
                      tf.float32)

    num_nbrs = tf.size(unique_nbrs.y)

    q = tf.gather(
        tf.ones(num_nbrs) / tf.cast(num_nbrs, dtype=tf.float32),
        unique_nbrs.idx)

    samples = tf.unique(
        tf.cast(tf.multinomial(tf.log([q]), num_sample)[0], tf.int32)).y

    infos = tf.sparse_to_dense(tf.reshape(tf.contrib.framework.sort(samples),
                                          [-1, 1]),
                               output_shape=tf.shape(unique_nbrs.idx),
                               sparse_values=tf.ones_like(samples,
                                                          dtype=tf.int32))

    partitions = tf.gather(infos, unique_nbrs.idx)

    samples_to_gather = tf.cast(
        tf.dynamic_partition(tf.range(tf.size(partitions)), partitions, 2)[1],
        tf.int32)

    sampled_p = tf.gather(p, samples_to_gather)
    sampled_q = tf.gather(tf.gather(q, unique_nbrs.idx), samples_to_gather)

    sampled_unique_nodes = tf.unique_with_counts(
        tf.gather(nbr_segment, samples_to_gather))

    weight1 = tf.cast(tf.gather(sampled_unique_nodes.count,
                                sampled_unique_nodes.idx),
                      dtype=tf.float32)

    weight = sampled_p / (sampled_q * weight1)
    num_sampled_edges = tf.size(samples_to_gather)
    num_sampled_nbrs = tf.size(samples)
    sampled_nbrs = tf.gather(nodes_nbrs, samples_to_gather)
    sampled_segments = tf.cast(tf.gather(nbr_segment, samples_to_gather),
                               tf.int32)
    sampled_features = tf.gather(edge_features, samples_to_gather)
    return [
        weight, num_sampled_edges, num_sampled_nbrs, sampled_nbrs,
        sampled_segments, sampled_features
    ]
def entropy(labels):      
    _,_,counts = tf.unique_with_counts(labels)
    probs = counts / K.sum(counts)
    class_ent = -1*probs*K.log(probs)
    ent = K.sum(class_ent)
    z = tf.divide(labels,labels) * tf.cast(ent,dtype = tf.float32)
    print(z.shape)
    return ent
Exemple #34
0
def majority_vote(atuple):
    predictions_per_token, tokens_to_keep = atuple
    x = tf.boolean_mask(predictions_per_token, tokens_to_keep)
    y, idx, count = tf.unique_with_counts(
        x)  # y contains unique elements (classes) in x
    index_of_class_with_max_tokens = tf.argmax(count)
    label = tf.gather(y, index_of_class_with_max_tokens)
    return tf.cast(label, tf.int32)
    def testInt32(self):
        x = np.random.randint(2, high=10, size=7000)
        with self.test_session() as sess:
            y, idx, count = tf.unique_with_counts(x)
            tf_y, tf_idx, tf_count = sess.run([y, idx, count])

        self.assertEqual(len(x), len(tf_idx))
        self.assertEqual(len(tf_y), len(np.unique(x)))
        for i in range(len(x)):
            self.assertEqual(x[i], tf_y[tf_idx[i]])
        for value, count in zip(tf_y, tf_count):
            self.assertEqual(count, np.sum(x == value))
    def testString(self):
        indx = np.random.randint(65, high=122, size=7000)
        x = [chr(i) for i in indx]

        with self.test_session() as sess:
            y, idx, count = tf.unique_with_counts(x)
            tf_y, tf_idx, tf_count = sess.run([y, idx, count])

        self.assertEqual(len(x), len(tf_idx))
        self.assertEqual(len(tf_y), len(np.unique(x)))
        for i in range(len(x)):
            self.assertEqual(x[i], tf_y[tf_idx[i]].decode("ascii"))
        for value, count in zip(tf_y, tf_count):
            v = [1 if x[i] == value.decode("ascii") else 0 for i in range(7000)]
            self.assertEqual(count, sum(v))
Exemple #37
0
def encode_annos(labels, bboxes, anchors, num_classes):
  """Encode annotations for losses computations.
  All the output tensors have a fix shape(none dynamic dimention).

  Args:
    labels: 1-D with shape `[num_bounding_boxes]`.
    bboxes: 2-D with shape `[num_bounding_boxes, 4]`. Format [ymin, xmin, ymax, xmax]
    anchors: 4-D tensor with shape `[num_anchors, 4]`. Format [cx, cy, w, h]

  Returns:
    input_mask: 2-D with shape `[num_anchors, 1]`, indicate which anchor to be used to cal loss.
    labels_input: 2-D with shape `[num_anchors, num_classes]`, one hot encode for every anchor.
    box_delta_input: 2-D with shape `[num_anchors, 4]`. Format [dcx, dcy, dw, dh]
    box_input: 2-D with shape '[num_anchors, 4]'. Format [ymin, xmin, ymax, xmax]
  """
  with tf.name_scope("Encode_annotations") as scope:
    num_anchors = config.ANCHORS
    # num_bboxes = tf.shape(bboxes)[0]

    # Cal iou, find the target anchor
    with tf.name_scope("Matching") as subscope:
      ious = batch_iou_fast(xywh_to_yxyx(anchors), bboxes)
      anchor_indices = tf.reshape(tf.arg_max(ious, dimension=1), shape=[-1, 1])  # target anchor indices
      # anchor_indices = tf.Print(anchor_indices, [anchor_indices], "anchor_indices", summarize=100)

      # discard duplicate # unique_idx wrong
      anchor_indices, idx, count = tf.unique_with_counts(tf.reshape(anchor_indices, shape=[-1]))
      ori_idx = tf.cumsum(tf.pad(count, [[1, 0]]))[:-1]
      anchor_indices = tf.reshape(anchor_indices, shape=[-1, 1])
      bboxes = tf.gather(bboxes, tf.unique(ori_idx)[0])
      labels = tf.gather(labels, tf.unique(ori_idx)[0])
      ious = tf.gather(ious, tf.unique(ori_idx)[0])
      num_bboxes = tf.shape(anchor_indices)[0]

      # TODO(shizehao):deal with duplicate
      # with tf.name_scope("Deal_with_duplicate"):
      #   dup_anchor_indices, indices_in_a, dup_anchor_indices_with_dup = find_dup(tf.reshape(anchor_indices, shape=[-1]))
      #
      #   # reset duplicated corresponding anchor
      #   conflicted_ious = tf.gather(ious, indices_in_a)
      #   top_k_anchor_indices = tf.nn.top_k(conflicted_ious, k=20).indices  # shape = [num_conflicted_bboxes, 20]
      #   dup_group_idx = tf.where(tf.equal(dup_anchor_indices_with_dup, tf.reshape(dup_anchor_indices, shape=[-1, 1])))
      #   seg_group = tf.unstack(dup_group_idx, axis=1)[0]


      with tf.name_scope("Deal_with_noneoverlap"):
        # find the none-overlap bbox
        bbox_indices = tf.reshape(tf.range(num_bboxes), shape=[-1, 1])
        # bbox_indices = tf.Print(bbox_indices, [bbox_indices], "bbox_indices", summarize=100)

        # anchor_indices = tf.Print(anchor_indices, [anchor_indices], "anchor_indices", summarize=100)
        iou_indices = tf.concat([bbox_indices, tf.cast(anchor_indices, dtype=tf.int32)], axis=1)
        # iou_indices = tf.Print(iou_indices, [iou_indices], "iou_indices", summarize=100)

        target_iou = tf.gather_nd(ious, iou_indices)
        # target_iou = tf.Print(target_iou,[target_iou],"target_iou",summarize=100)

        none_overlap_bbox_indices = tf.where(target_iou <= 0)  # 1-D
        # none_overlap_bbox_indices = tf.Print(none_overlap_bbox_indices, [none_overlap_bbox_indices], "none_overlap_bbox_indices", summarize=100)

        # find it's corresponding anchor
        target_bbox = tf.gather_nd(bboxes, none_overlap_bbox_indices)
        # target_bbox = tf.Print(target_bbox, [target_bbox], "target_bbox", summarize=100)

        closest_anchor_indices = arg_closest_anchor(target_bbox, xywh_to_yxyx(anchors))  # 1-D
        # closest_anchor_indices = tf.Print(closest_anchor_indices, [closest_anchor_indices, tf.gather(anchors, closest_anchor_indices)], "closest_anchor_indices", summarize=100)

      with tf.name_scope("Update_anchor_indices"):
        anchor_indices = tf.reshape(anchor_indices, shape=[-1])
        anchor_indices = update_tensor(anchor_indices, none_overlap_bbox_indices, closest_anchor_indices)
        anchor_indices = tf.reshape(anchor_indices, shape=[-1, 1])


    with tf.name_scope("Delta") as subscope:
      target_anchors = tf.gather_nd(anchors, anchor_indices)
      bboxes = yxyx_to_xywh(bboxes)
      delta = batch_delta(bboxes, target_anchors)



    with tf.name_scope("Scattering") as subscope:
      # bbox
      box_input = tf.scatter_nd(anchor_indices,
                                bboxes,
                                shape=[num_anchors, 4]
                                )

      # label
      labels_input = tf.scatter_nd(anchor_indices,
                                   tf.one_hot(labels, num_classes),
                                   shape=[num_anchors, num_classes]
                                   )

      # delta
      box_delta_input = tf.scatter_nd(anchor_indices,
                                      delta,
                                      shape=[num_anchors, 4]
                                      )





      # anchor mask
      # unique_indices, _ = tf.unique(tf.reshape(anchor_indices, shape=[-1]))
      # unique_indices = tf.Print(unique_indices, [unique_indices], summarize=100)
      # num_bboxes = tf.Print(num_bboxes, [num_bboxes])
      input_mask = tf.scatter_nd(anchor_indices,
                                 tf.ones([num_bboxes]),
                                 shape=[num_anchors])
      input_mask = tf.reshape(input_mask, shape=[-1, 1])

  return input_mask, labels_input, box_delta_input, box_input
def discriminative_loss_single(
        prediction,
        correct_label,
        feature_dim,
        label_shape,
        delta_v,
        delta_d,
        param_var,
        param_dist,
        param_reg):
    """
    论文equ(1)提到的实例分割损失函数
    :param prediction: inference of network
    :param correct_label: instance label
    :param feature_dim: feature dimension of prediction
    :param label_shape: shape of label
    :param delta_v: cut off variance distance
    :param delta_d: cut off cluster distance
    :param param_var: weight for intra cluster variance
    :param param_dist: weight for inter cluster distances
    :param param_reg: weight regularization
    """

    # 像素对齐为一行
    correct_label = tf.reshape(
        correct_label, [
            label_shape[1] * label_shape[0]])
    reshaped_pred = tf.reshape(
        prediction, [
            label_shape[1] * label_shape[0], feature_dim])

    # 统计实例个数
    unique_labels, unique_id, counts = tf.unique_with_counts(correct_label)
    counts = tf.cast(counts, tf.float32)
    num_instances = tf.size(unique_labels)

    # 计算pixel embedding均值向量
    segmented_sum = tf.unsorted_segment_sum(
        reshaped_pred, unique_id, num_instances)
    mu = tf.div(segmented_sum, tf.reshape(counts, (-1, 1)))
    mu_expand = tf.gather(mu, unique_id)

    # 计算公式的loss(var)
    distance = tf.norm(tf.subtract(mu_expand, reshaped_pred), axis=1)
    distance = tf.subtract(distance, delta_v)
    distance = tf.clip_by_value(distance, 0., distance)
    distance = tf.square(distance)

    l_var = tf.unsorted_segment_sum(distance, unique_id, num_instances)
    l_var = tf.div(l_var, counts)
    l_var = tf.reduce_sum(l_var)
    l_var = tf.divide(l_var, tf.cast(num_instances, tf.float32))

    # 计算公式的loss(dist)
    mu_interleaved_rep = tf.tile(mu, [num_instances, 1])
    mu_band_rep = tf.tile(mu, [1, num_instances])
    mu_band_rep = tf.reshape(
        mu_band_rep,
        (num_instances *
         num_instances,
         feature_dim))

    mu_diff = tf.subtract(mu_band_rep, mu_interleaved_rep)

    # 去除掩模上的零点
    intermediate_tensor = tf.reduce_sum(tf.abs(mu_diff), axis=1)
    zero_vector = tf.zeros(1, dtype=tf.float32)
    bool_mask = tf.not_equal(intermediate_tensor, zero_vector)
    mu_diff_bool = tf.boolean_mask(mu_diff, bool_mask)

    mu_norm = tf.norm(mu_diff_bool, axis=1)
    mu_norm = tf.subtract(2. * delta_d, mu_norm)
    mu_norm = tf.clip_by_value(mu_norm, 0., mu_norm)
    mu_norm = tf.square(mu_norm)

    l_dist = tf.reduce_mean(mu_norm)

    # 计算原始Discriminative Loss论文中提到的正则项损失
    l_reg = tf.reduce_mean(tf.norm(mu, axis=1))

    # 合并损失按照原始Discriminative Loss论文中提到的参数合并
    param_scale = 1.
    l_var = param_var * l_var
    l_dist = param_dist * l_dist
    l_reg = param_reg * l_reg

    loss = param_scale * (l_var + l_dist + l_reg)

    return loss, l_var, l_dist, l_reg
Exemple #39
0
def main():
    # trainData, validData, testData, trainTarget, validTarget, testTarget = data_segmentation("data.npy", "target.npy", 0)
    trainData, validData, testData, trainTarget, validTarget, testTarget = data_segmentation("data.npy", "target.npy", 1)
    print("shape of training data")
    print(trainData.shape)
    print(trainTarget.shape)
    print("shape of testing data")
    print(testData.shape)
    print("shape of validation data")
    print(validData.shape)

    X = tf.placeholder(tf.float32, name = 'input_x')
    Z = tf.placeholder(tf.float32, name = 'input_z')
    adjusted_train_target = trainTarget + 1

    dist = calculate_euclidean_distance(X, Z)

    best_k = 1
    best_validation_accuracy = 0
    for k in [1, 5, 10, 25, 50, 100, 200]:
        sess = tf.InteractiveSession()
        r = calculate_responsibilities(dist, k=k, nn_weight=1)
        prediction = tf.multiply(r, adjusted_train_target)
        prediction = sess.run(prediction, feed_dict={X: trainData, Z: validData})

        classifications = []
        if k == 10:
            last_image_indices = []
            image_desired = 89

        for i, pred in enumerate(prediction):
            unique_count = tf.unique_with_counts(pred)
            y, idx, count = sess.run(unique_count, feed_dict={X: trainData, Z: validData})
            if k == 10 and i == image_desired:
                last_image_indices = idx

            top_two_count = np.argpartition(-1 * count, (0, 1))
            if y[top_two_count[0]] == 0:
                highest_class = y[top_two_count[1]]
            else:
                highest_class = y[top_two_count[0]]
            classifications.append(highest_class - 1)

        validation_accuracy = accuracy_score(validTarget, classifications)
        print("Validation accuracy for k={:d} is {:f}".format(k, validation_accuracy))
        if validation_accuracy > best_validation_accuracy:
            best_k = k
            best_validation_accuracy = validation_accuracy

        if k == 10:
            neighbour_indices = [i for i, x in enumerate(last_image_indices) if x != 0]

            images = []
            for i in neighbour_indices:
                images.append(trainData[i].reshape(32, 32))
            images.append(validData[image_desired].reshape(32, 32))
            images = np.array(images)

            images = np.concatenate((images), axis=1)
            plt.title("Ten neighbours as calculated by k-NN algorithm followed by validation image")
            plt.imshow(images, cmap='gray')
            plt.show()

    print("The best k is {:d} with a corresponding validation accuracy of {:f}".format(best_k, best_validation_accuracy))
Exemple #40
0
    dtype=tf.float32,
    validate_shape=False # This shape will evolve, so we need to remove any TensorFlow optim here
)
word_dict = tf.Variable(
    initial_value=[], 
    name='word_dict', 
    dtype=tf.string,
    validate_shape=False,
    trainable=False
)
textfile = tf.placeholder(tf.string)

# Update word dict
splitted_textfile = tf.string_split(textfile, " ")
tmp_word_dict = tf.concat([word_dict, splitted_textfile.values], 0)
tmp_word_dict, word_idx, word_count = tf.unique_with_counts(tmp_word_dict)
assign_word_dict = tf.assign(word_dict, tmp_word_dict, validate_shape=False)
with tf.control_dependencies([assign_word_dict]):
    word_dict_value = word_dict.read_value()
    missing_nb_dim = tf.shape(word_dict_value)[0] - tf.shape(embed)[0]
    missing_nb_dim = tf.Print(missing_nb_dim, data=[missing_nb_dim, word_dict_value], message="missing_nb_dim, word_dict:", summarize=10)

# Update embed
def update_embed_func():
    new_columns = tf.random_normal([missing_nb_dim, 1], mean=-1, stddev=4)
    new_embed = tf.concat([embed, new_columns], 0)
    assign_op = tf.assign(embed, new_embed, validate_shape=False)
    return assign_op

should_update_embed = tf.less(0, missing_nb_dim)
assign_to_embed = tf.cond(should_update_embed, update_embed_func, lambda: embed)
    def compute_loss(self, input_tensor, binary_label, instance_label, name):
        """
        计算LaneNet模型损失函数
        :param input_tensor:
        :param binary_label:
        :param instance_label:
        :param name:
        :return:
        """
        with tf.variable_scope(name):
            # 前向传播获取logits
            inference_ret = self._build_model(input_tensor=input_tensor, name='inference')
            # 计算二值分割损失函数
            decode_logits = inference_ret['logits']
            binary_label_plain = tf.reshape(
                binary_label,
                shape=[binary_label.get_shape().as_list()[0] *
                       binary_label.get_shape().as_list()[1] *
                       binary_label.get_shape().as_list()[2]])
            # 加入class weights
            unique_labels, unique_id, counts = tf.unique_with_counts(binary_label_plain)
            counts = tf.cast(counts, tf.float32)
            inverse_weights = tf.divide(1.0,
                                        tf.log(tf.add(tf.divide(tf.constant(1.0), counts),
                                                      tf.constant(1.02))))
            inverse_weights = tf.gather(inverse_weights, binary_label)
            binary_segmenatation_loss = tf.losses.sparse_softmax_cross_entropy(
                labels=binary_label, logits=decode_logits, weights=inverse_weights)
            binary_segmenatation_loss = tf.reduce_mean(binary_segmenatation_loss)

            # 计算discriminative loss损失函数
            decode_deconv = inference_ret['deconv']
            # 像素嵌入
            pix_embedding = self.conv2d(inputdata=decode_deconv, out_channel=4, kernel_size=1,
                                        use_bias=False, name='pix_embedding_conv')
            pix_embedding = self.relu(inputdata=pix_embedding, name='pix_embedding_relu')
            # 计算discriminative loss
            image_shape = (pix_embedding.get_shape().as_list()[1], pix_embedding.get_shape().as_list()[2])
            disc_loss, l_var, l_dist, l_reg = \
                lanenet_discriminative_loss.discriminative_loss(
                    pix_embedding, instance_label, 4, image_shape, 0.5, 3.0, 1.0, 1.0, 0.001)

            # 合并损失
            l2_reg_loss = tf.constant(0.0, tf.float32)
            for vv in tf.trainable_variables():
                if 'bn' in vv.name:
                    continue
                else:
                    l2_reg_loss = tf.add(l2_reg_loss, tf.nn.l2_loss(vv))
            l2_reg_loss *= 0.001
            total_loss = 0.5 * binary_segmenatation_loss + 0.5 * disc_loss + l2_reg_loss

            ret = {
                'total_loss': total_loss,
                'binary_seg_logits': decode_logits,
                'instance_seg_logits': pix_embedding,
                'binary_seg_loss': binary_segmenatation_loss,
                'discriminative_loss': disc_loss
            }

            return ret
def island_loss(features, label, alpha, nrof_classes, nrof_features, lamda1=10):
    """Center loss based on the paper "Island Loss for Learning Discriminative Features in Facial Expression Recognition"
       (https://github.com/SeriaZheng/EmoNet/blob/master/loss_function/loss_paper/Island_loss.pdf)
    """
    # 生成可以共享的变量centers
    with tf.variable_scope('center', reuse=True):
        centers = tf.get_variable('centers')
    label = tf.reshape(label, [-1])

    # 取出对应label下对应的center值,注意label里面的值可能会重复,因为一个标签下有可能会出现多个人
    centers_batch = tf.gather(centers, label)

    # 求特征点到中心的距离并乘以一定的系数,diff1为center loss
    diff1 = centers_batch - features

    # 获取一个batch中同一样本出现的次数,这里需要理解论文中的更新公式
    unique_label, unique_idx, unique_count = tf.unique_with_counts(label)
    appear_times = tf.gather(unique_count, unique_idx)
    appear_times = tf.reshape(appear_times, [-1, 1])

    diff1 = diff1 / tf.cast((1 + appear_times), tf.float32)
    diff1 = alpha * diff1

    # diff2为island loss的center更新项
    diff2 = tf.get_variable('diff2', [nrof_classes, nrof_features], dtype=tf.float32,
                              initializer=tf.constant_initializer(0), trainable=False)
    for i in range(nrof_classes):
        for j in range(nrof_classes):
            if i!=j:
                diff2 = tf.scatter_add(diff2, i,
                                       (tf.gather(centers, i) / tf.sqrt(
                                           tf.reduce_sum(tf.square(tf.gather(centers, i)))) * tf.sqrt(
                                           tf.reduce_sum(tf.square(tf.gather(centers, j)))))
                                       - tf.multiply(
                                           (tf.reduce_sum(
                                               tf.multiply(tf.gather(centers, i), tf.gather(centers, j))) / tf.sqrt(
                                               tf.reduce_sum(tf.square(tf.gather(centers, i)))) *
                                            tf.pow(tf.sqrt(tf.reduce_sum(tf.square(tf.gather(centers, j)))), 3)),
                                           tf.gather(centers, j)))
    diff2 = diff2 * lamda1 / (nrof_classes - 1)
    diff2 = alpha * diff2

    # 求center loss,这里是将l2_loss里面的值进行平方相加,再除以2,并没有进行开方
    loss1 = tf.nn.l2_loss(features - centers_batch)

    # 求island loss
    loss2 = tf.zeros(1)
    for i in range(nrof_classes):
        for j in range(nrof_classes):
            if i!=j:
                loss2 = tf.add(tf.add(tf.reduce_sum(tf.multiply(tf.gather(centers, i), tf.gather(centers, j))) / (
                        tf.sqrt(tf.reduce_sum(tf.square(tf.gather(centers, i)))) *
                        tf.sqrt(tf.reduce_sum(tf.square(tf.gather(centers, j))))), tf.ones(1)), loss2)
    loss2 = lamda1 * loss2

    loss = tf.add(loss1,loss2)

    # 更新center,输出是将对应于label的centers减去对应的diff,如果同一个标签出现多次,那么就减去多次(diff1与centers维度不同)
    centers = tf.scatter_sub(centers, label, diff1)
    # diff2维度与centers相同可以直接减
    centers = tf.subtract(centers, diff2)

    return loss, centers