コード例 #1
0
ファイル: metrics.py プロジェクト: yutoc/tflearn
    def build(self, predictions, targets, inputs=None):
        """ Prints the number of each kind of prediction """
        self.built = True
        pshape = predictions.get_shape()
        self.inner_metric.build(predictions, targets, inputs)

        with tf.name_scope(self.name):
            if len(pshape) == 1 or (len(pshape) == 2 and int(pshape[1]) == 1):
                self.name = self.name or "binary_prediction_counts"
                y, idx, count = tf.unique_with_counts(tf.argmax(predictions))
                self.tensor = tf.Print(self.inner_metric, [y, count],
                                       name=self.inner_metric.name)
            else:
                self.name = self.name or "categorical_prediction_counts"
                y, idx, count = tf.unique_with_counts(
                    tf.argmax(predictions, dimension=1))
                self.tensor = tf.Print(self.inner_metric.tensor, [y, count],
                                       name=self.inner_metric.name)
コード例 #2
0
    def __init__(self, K=5, dist_threshold=14):
        '''
        Why such dist_threshold value?
        See notebook: notebooks/experiments_with_classification.ipynb
        :param K:
        :param dist_threshold:
        '''

        # current training data
        self.X_train = None
        self.y_train = None
        self.idx_to_lbl = None
        self.lbl_to_idx = None
        self.y_train_idx = None

        # main params
        self.dist_threshold_value = dist_threshold
        self.K = K

        # placeholders
        self.xtr = tf.placeholder(tf.float32, [None, EMBEDDING_SIZE],
                                  name='X_train')
        self.ytr = tf.placeholder(tf.float32, [None], name='y_train')
        self.xte = tf.placeholder(tf.float32, [EMBEDDING_SIZE], name='x_test')
        self.dist_threshold = tf.placeholder(tf.float32,
                                             shape=(),
                                             name="dist_threshold")

        ############ build model ############

        # model
        distance = tf.reduce_sum(tf.abs(tf.subtract(self.xtr, self.xte)),
                                 axis=1)
        values, indices = tf.nn.top_k(tf.negative(distance),
                                      k=self.K,
                                      sorted=False)
        nn_dist = tf.negative(values)
        self.valid_nn_num = tf.reduce_sum(
            tf.cast(nn_dist < self.dist_threshold, tf.float32))
        nn = []
        for i in range(self.K):
            nn.append(self.ytr[indices[i]])  # taking the result indexes

        # saving list in tensor variable
        nearest_neighbors = nn
        # this will return the unique neighbors the count will return the most common's index
        self.y, idx, self.count = tf.unique_with_counts(nearest_neighbors)
        self.pred = tf.slice(self.y,
                             begin=[tf.argmax(self.count, 0)],
                             size=tf.constant([1], dtype=tf.int64))[0]
コード例 #3
0
ファイル: base.py プロジェクト: YuTpa/meta-blocks
    def stratify_by_cluster(size, clusters, parallel_iterations=8):
        unique_clusters, _, cluster_counts = tf.unique_with_counts(clusters)
        num_unique_clusters = tf.size(unique_clusters)

        def cond_fn(size_left, _cluster_counts_left, _cluster_sizes):
            return tf.greater(size_left, 0)

        def body_fn(size_left, cluster_counts_left, cluster_sizes):
            # Determine available clusters.
            cluster_mask = tf.greater(cluster_counts_left, 0)
            available_clusters = tf.where(cluster_mask)[:, 0]
            # Uniformly select clusters from available.
            indices = tf.random.uniform(dtype=tf.int32,
                                        shape=(size_left, ),
                                        maxval=tf.size(available_clusters))
            cluster_indices = tf.gather(available_clusters, indices, axis=0)
            cluster_sizes = tf.tensor_scatter_nd_add(
                cluster_sizes,
                indices=tf.expand_dims(cluster_indices, -1),
                updates=tf.ones_like(cluster_indices, dtype=tf.int32),
            )
            # Truncate cluster sizes as necessary.
            cluster_sizes = tf.minimum(cluster_sizes, cluster_counts)
            cluster_counts_left = cluster_counts - cluster_sizes
            size_left = size - tf.reduce_sum(cluster_sizes)
            return size_left, cluster_counts_left, cluster_sizes

        # Ideal stratification.
        min_size = tf.math.floordiv(size, num_unique_clusters)
        cluster_sizes_init = tf.tile([min_size], [num_unique_clusters])
        cluster_sizes_init = tf.minimum(cluster_sizes_init, cluster_counts)
        cluster_counts_left_init = cluster_counts - cluster_sizes_init
        size_left_init = size - tf.reduce_sum(cluster_sizes_init)

        # Keep sampling uniformly from available clusters to add up to size.
        _, _, cluster_sizes = tf.while_loop(
            cond=cond_fn,
            body=body_fn,
            loop_vars=[
                size_left_init, cluster_counts_left_init, cluster_sizes_init
            ],
            back_prop=False,
            parallel_iterations=parallel_iterations,
            name="stratified-sampling",
        )

        return cluster_sizes, unique_clusters
コード例 #4
0
def center_loss(labels, features, alpha=ALPHA, num_classes=NUM_CLASSES):
    """
    获取center loss及更新样本的center
    :param labels: Tensor,表征样本label,非one-hot编码,shape应为(batch_size,).
    :param features: Tensor,表征样本特征,最后一个fc层的输出,shape应该为(batch_size, num_classes).
    :param alpha: 0-1之间的数字,控制样本类别中心的学习率,细节参考原文.
    :param num_classes: 整数,表明总共有多少个类别,网络分类输出有多少个神经元这里就取多少.
    :return: Tensor, center-loss, shape因为(batch_size,)
    """
    # 获取特征的维数,例如256维
    len_features = features.get_shape()[1]
    # 建立一个Variable,shape为[num_classes, len_features],用于存储整个网络的样本中心,
    # 设置trainable=False是因为样本中心不是由梯度进行更新的
    centers = tf.get_variable('centers', [num_classes, len_features], dtype=tf.float32,
                              initializer=tf.constant_initializer(0), trainable=False)
    # 将label展开为一维的,如果labels已经是一维的,则该动作其实无必要
    labels = tf.reshape(labels, [-1])

    # 根据样本label,获取mini-batch中每一个样本对应的中心值
    centers_batch = tf.gather(centers, labels)

    # 当前mini-batch的特征值与它们对应的中心值之间的差
    diff = centers_batch - features

    # 获取mini-batch中同一类别样本出现的次数,了解原理请参考原文公式(4)
    unique_label, unique_idx, unique_count = tf.unique_with_counts(labels)
    appear_times = tf.gather(unique_count, unique_idx)
    appear_times = tf.reshape(appear_times, [-1, 1])

    diff = diff / tf.cast((1 + appear_times), tf.float32)
    diff = alpha * diff

    # 更新centers
    centers_update_op = tf.scatter_sub(centers, labels, diff)

    # 这里使用tf.control_dependencies更新centers
    with tf.control_dependencies([centers_update_op]):
        # 计算center-loss
        c_loss = tf.nn.l2_loss(features - centers_batch)

    return c_loss
コード例 #5
0
ytr = tf.placeholder("float", [None, 10])
xte = tf.placeholder("float", [784])

# Euclidean Distance
distance = tf.negative(
    tf.sqrt(
        tf.reduce_sum(tf.square(tf.subtract(xtr, xte)), reduction_indices=1)))
# Prediction: Get min distance neighbors
values, indices = tf.nn.top_k(distance, k=K, sorted=False)

nearest_neighbors = []
for i in range(K):
    nearest_neighbors.append(tf.argmax(ytr[indices[i]], 0))

neighbors_tensor = tf.stack(nearest_neighbors)
y, idx, count = tf.unique_with_counts(neighbors_tensor)
pred = tf.slice(y,
                begin=[tf.argmax(count, 0)],
                size=tf.constant([1], dtype=tf.int64))[0]

accuracy = 0.

# Initializing the variables
init = tf.initialize_all_variables()

# Launch the graph
with tf.Session() as sess:
    sess.run(init)

    # loop over test data
    for i in range(len(Xte)):
コード例 #6
0
ファイル: models.py プロジェクト: tranhungnghiep/AnalyzeKGE
    def loss_def(self):
        """
        Compute loss
        - Forward compute score, with dropout, batchnorm, ...
        - Cross-entropy loss, with regularization loss, label smoothing, score scaling, ...
        :return:
        """
        print("h shape:")
        print(self.h.get_shape().as_list())  # (batch_size + batch_size * neg_ratio,)
        print("t shape:")
        print(self.t.get_shape().as_list())
        print("r shape:")
        print(self.r.get_shape().as_list())
        print("y shape:")
        print(self.y.get_shape().as_list())

        # Define score function for all positive triples and negative triples
        score = self.compute_score(self.h, self.t, self.r)

        # Define loss op to minimize with train_op in Config.
        # loss for one mini batch. Note: mean
        if self.config.loss_mode == 'cross-entropy':
            # directly binary cross-entropy, using tf function
            self.loss_op = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=self.y, logits=score))
        elif self.config.loss_mode == 'softplus':
            # rewrite loss by softplus, change label from 1/0 to 1/-1
            # softplus is the same as bare binary cross-entropy: pushes score > 0 for y==1, < 0 for y==-1
            self.y = self.y * 2 - 1
            self.loss_op = tf.reduce_mean(tf.nn.softplus(- self.y * score))
        elif self.config.loss_mode == 'softmax-cross-entropy':
            # this is directly softmax cross-entropy, using tf function
            # softmax sum pos/neg class exponential and push up/down together
            # need to normalize labels to distribution, and specify full distribution axes
            y_distribution = self.y / tf.reduce_sum(self.y, axis=1, keepdims=True)
            self.loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=y_distribution, logits=score, axis=1))

        # emb l2 reg loss, only on active emb to reduce computation. Note: mean. Separately for better tuning lmbda.
        # FOR ADAPTIVE REG WEIGHT: count ent/rel frequency in batch, reduce mean unique ent/rel, multiply weight
        if self.config.lmbda_ent > 0:
            # FOR ADAPTIVE REG WEIGHT:
            unique, idx, count = tf.unique_with_counts(tf.concat([self.h, self.t], axis=0))
            weight = tf.cast(count / tf.reduce_sum(count), tf.float32)
            # FOR N3 REG:
            if self.config.reg_n3:
                self.loss_op += self.config.lmbda_ent * tf.reduce_mean(weight * tf.reduce_mean(tf.abs(tf.gather(self.ent_embs, unique)) ** 3, axis=[1, 2]))
            else:
                self.loss_op += self.config.lmbda_ent * tf.reduce_mean(weight * tf.reduce_mean(tf.gather(self.ent_embs, unique) ** 2, axis=[1, 2]))

        if self.config.lmbda_rel > 0:
            # FOR ADAPTIVE REG WEIGHT:
            unique, idx, count = tf.unique_with_counts(self.r)
            weight = tf.cast(count / tf.reduce_sum(count), tf.float32)
            # FOR N3 REG:
            if self.config.reg_n3:
                self.loss_op += self.config.lmbda_rel * tf.reduce_mean(weight * tf.reduce_mean(tf.abs(tf.gather(self.rel_embs, unique)) ** 3, axis=[1, 2]))
            else:
                self.loss_op += self.config.lmbda_rel * tf.reduce_mean(weight * tf.reduce_mean(tf.gather(self.rel_embs, unique) ** 2, axis=[1, 2]))

        # combinator params l2 reg loss manually. Note: mean.
        if self.config.lmbda_params > 0:
            self.loss_op += self.config.lmbda_params * tf.reduce_mean(self.wv ** 2)
コード例 #7
0
x_train_ph = tf.placeholder(tf.float32, shape=x_train.shape)
y_train_ph = tf.placeholder(tf.float32, shape=y_train.shape)
x_test_ph = tf.placeholder(tf.float32, shape=x_test.shape[1:])
# Calculate L1-distances as negative to allow picking first top K entries after DESC sorting
distances = tf.negative(
    tf.reduce_sum(tf.reduce_sum(tf.abs(tf.subtract(x_train_ph, x_test_ph)),
                                axis=1),
                  axis=1))
# Find top K entries after DESC sorting
top_k_values, top_k_indices = tf.nn.top_k(distances, k=k_max + 1, sorted=True)
top_k_max_labels = tf.gather(y_train_ph, top_k_indices)
predictions = []
# Calculate predictions for different k - [1, k_max]
for k in range(1, k_max + 1):
    top_k_labels = tf.slice(top_k_max_labels, begin=[0], size=[k])
    unique_classes, ids, top_k_labels_counts = tf.unique_with_counts(
        top_k_labels)
    prediction = tf.gather(unique_classes, tf.argmax(top_k_labels_counts))
    predictions.append(prediction)
predictions = tf.stack(predictions)

# Start TensorFlow Session
correct_predictions_nums = np.zeros(k_max)
with tf.Session() as session:
    for i in tqdm(range(0, n)):
        predicted_values = session.run(predictions,
                                       feed_dict={
                                           x_test_ph: x_test[i],
                                           x_train_ph: x_train,
                                           y_train_ph: y_train
                                       })
        for k in range(0, k_max):
コード例 #8
0
    def add_single_image_info(self, image_id, eval_dict):
        groundtruth_boxes = eval_dict[
            standard_fields.InputDataFields.groundtruth_boxes]
        groundtruth_classes = eval_dict[
            standard_fields.InputDataFields.groundtruth_classes]
        detection_boxes = eval_dict[
            standard_fields.DetectionResultFields.detection_boxes]
        detection_scores = eval_dict[
            standard_fields.DetectionResultFields.detection_scores]
        detection_classes = eval_dict[
            standard_fields.DetectionResultFields.detection_classes]

        groundtruth_has_rotation = groundtruth_classes > 1
        groundtruth_boxes_with_rotation = groundtruth_boxes[
            groundtruth_has_rotation]

        #ensure classes are both not 'dot' class, so they have a meaningful rotation value
        detection_within_score = detection_scores > self._score_threshold
        detection_class_has_rotation = detection_classes > 1
        detection_has_rotation_and_score = tf.logical_and(
            detection_within_score, detection_class_has_rotation)
        detection_boxes_within_score = detection_boxes[
            detection_has_rotation_and_score]
        detection_classes_within_score = detection_classes[
            detection_has_rotation_and_score]

        gt_boxlist = box_list.BoxList(
            tf.convert_to_tensor(groundtruth_boxes_with_rotation))
        det_boxlist = box_list.BoxList(
            tf.convert_to_tensor(detection_boxes_within_score))

        detection_y_rotation_angles = eval_dict[
            additional_fields.DetectionResultFields.y_rotation_angles]
        groundtruth_y_rotation_angles = eval_dict[
            additional_fields.GroundtruthResultFields.y_rotation_angles]
        detection_y_rotation_angles_within_score = detection_y_rotation_angles[
            detection_has_rotation_and_score]

        for iou_threshold, assigner in self._iou_thresholds_and_assigners:
            cls_targets, cls_weights, reg_targets, reg_weights, match = assigner.assign(
                det_boxlist, gt_boxlist)

            fg_detections = match >= 0
            fg_detection_boxes = detection_boxes_within_score[fg_detections, :]
            fg_matches = match[fg_detections]

            fg_matches_argsort = tf.argsort(fg_matches)
            fg_matches_sorted = tf.gather(fg_matches, fg_matches_argsort)

            gt_match_indices, fg_match_sorted_indices_with_repeats, fg_match_sorted_indices_counts = tf.unique_with_counts(
                fg_matches_sorted)
            fg_match_sorted_indices_no_repeats = tf.cumsum(
                tf.pad(fg_match_sorted_indices_counts, [[1, 0]]))[:-1]

            fg_match_indices_no_repeats = tf.gather(
                fg_matches_argsort, fg_match_sorted_indices_no_repeats)

            def get_matches_and_angle_difference(fg_match_idx_tensor,
                                                 gt_match_idx_tensor):
                if debug_get_matching_boxes:
                    gt_matching_detection_boxes = tf.gather(
                        groundtruth_boxes_with_rotation,
                        gt_match_idx_tensor,
                        axis=0)
                    fg_matching_detection_boxes = tf.gather(
                        fg_detection_boxes, fg_match_idx_tensor, axis=0)
                    pass

                fg_matching_detection_y_rot_angles = tf.gather(
                    detection_y_rotation_angles_within_score,
                    fg_match_idx_tensor,
                    axis=0)

                groundtruth_y_rotation_angles_matches = tf.gather(
                    groundtruth_y_rotation_angles, gt_match_idx_tensor, axis=0)
                groundtruth_has_y_rot = tf.math.logical_not(
                    tf.math.equal(groundtruth_y_rotation_angles_matches, 0))
                groundtruth_existant_y_rot_angle = groundtruth_y_rotation_angles_matches[
                    groundtruth_has_y_rot]

                detection_existant_y_rot_angle = fg_matching_detection_y_rot_angles[
                    groundtruth_has_y_rot]

                angle_diff = detection_existant_y_rot_angle - groundtruth_existant_y_rot_angle
                angle_diff_unwrapped = tf.math.atan2(tf.math.sin(angle_diff),
                                                     tf.math.cos(angle_diff))
                angle_diff_abs = tf.math.abs(angle_diff_unwrapped)

                n_angle_matches = len(angle_diff)

                return n_angle_matches, angle_diff_abs

            num_angle_matches, abs_angle_differences = get_matches_and_angle_difference(
                fg_match_indices_no_repeats, gt_match_indices)
            angle_diff_sum_square = tf.reduce_sum(
                tf.math.square(abs_angle_differences * 180 / np.pi))
            match_angle_diff_histogram = tf.histogram_fixed_width(
                abs_angle_differences * 180 / np.pi,
                self._histogram_range,
                nbins=self._num_angle_bins,
                dtype=tf.dtypes.int32)

            self.total_num_angle_matches[iou_threshold] += num_angle_matches
            self.total_angle_diff_sum_squared[
                iou_threshold] += angle_diff_sum_square
            self.angle_histograms[iou_threshold] += match_angle_diff_histogram