Exemple #1
0
 def loss_function(true, pred):
     class_selectors = tf.cast(K.argmax(true, axis=-1), tf.int32)
     class_selectors = [
         K.equal(i, class_selectors) for i in range(len(weights_list))
     ]
     class_selectors = [K.cast(x, K.floatx()) for x in class_selectors]
     weights = [sel * w for sel, w in zip(class_selectors, weights_list)]
     weight_multiplier = weights[0]
     for i in range(1, len(weights)):
         weight_multiplier = weight_multiplier + weights[i]
     loss = original_loss_function(true, pred)
     loss = loss * weight_multiplier
     return loss
Exemple #2
0
def sparse_accuracy_ignoring_last_label(y_true, y_pred):
    nb_classes = K.int_shape(y_pred)[-1]
    y_pred = K.reshape(y_pred, (-1, nb_classes))

    y_true = K.one_hot(tf.to_int32(K.flatten(y_true)), nb_classes + 1)
    unpacked = tf.unstack(y_true, axis=-1)
    legal_labels = ~tf.cast(unpacked[-1], tf.bool)
    y_true = tf.stack(unpacked[:-1], axis=-1)

    return K.sum(
        tf.to_float(legal_labels & K.equal(K.argmax(
            y_true, axis=-1), K.argmax(y_pred, axis=-1)))) / K.sum(
                tf.to_float(legal_labels))
def yolo5_boxes_and_scores(feats, anchors, num_classes, input_shape,
                           image_shape, scale_x_y):
    '''Process Conv layer output'''
    box_xy, box_wh, box_confidence, box_class_probs = yolo5_decode(
        feats, anchors, num_classes, input_shape, scale_x_y=scale_x_y)
    boxes = yolo3_correct_boxes(box_xy, box_wh, input_shape, image_shape)
    boxes = K.reshape(boxes, [-1, 4])
    # check if only 1 class for different score
    box_scores = tf.cond(
        K.equal(K.constant(value=num_classes, dtype='int32'), 1),
        lambda: box_confidence, lambda: box_confidence * box_class_probs)
    box_scores = K.reshape(box_scores, [-1, num_classes])
    return boxes, box_scores
def acc(y_true, y_pred):
    # both are of shape ( _, Ty, VOCAB_SIZE )
    targ = K.argmax(y_true, axis=-1)
    pred = K.argmax(y_pred, axis=-1)
    correct = K.cast(K.equal(targ, pred),
                     dtype='float32')  #cast bool tensor to float

    # 0 is padding, don't include those- mask is tensor representing non-pad value
    mask = K.cast(K.greater(targ, 0),
                  dtype='float32')  #cast bool-tensor to float
    n_correct = K.sum(mask * correct)  #
    n_total = K.sum(mask)
    return n_correct / n_total
Exemple #5
0
 def viterbi_accuracy(y_true, y_pred):
     # -1e10 to avoid zero at sum(mask)
     mask = K.cast(
         K.all(K.greater(y_pred, -1e10), axis=2), K.floatx())
     shape = tf.shape(y_pred)
     sequence_lengths = tf.ones(shape[0], dtype=tf.int32) * (shape[1])
     y_pred, _ = crf_decode(y_pred, self.transitions, sequence_lengths)
     if self.sparse_target:
         y_true = K.argmax(y_true, 2)
     y_pred = K.cast(y_pred, 'int32')
     y_true = K.cast(y_true, 'int32')
     corrects = K.cast(K.equal(y_true, y_pred), K.floatx())
     return K.sum(corrects * mask) / K.sum(mask)
Exemple #6
0
def plate_acc(y_true, y_pred):
    '''
    How many plates were correctly classified
    If Ground Truth is ABC 123
    Then prediction ABC 123 would score 1
    else ABD 123 would score 0
    Avg these results (1 + 0) / 2 -> Gives .5 accuracy
    (Half of the plates were completely corrected classified)
    '''
    y_true = K.reshape(y_true, shape=(-1, 7, 37))
    y_pred = K.reshape(y_pred, shape=(-1, 7, 37))
    et = K.equal(K.argmax(y_true), K.argmax(y_pred))
    return K.mean(K.cast(K.all(et, axis=-1, keepdims=False), dtype='float32'))
def create_weight_mask(y_true, y_pred, weights):
    nb_cl = len(weights)
    weights = tf.cast(weights, dtype=tf.float32)
    final_mask = K.zeros_like(y_pred[:, 0])
    y_pred_max = K.max(y_pred, axis=1)
    y_pred_max = K.reshape(y_pred_max, (K.shape(y_pred)[0], 1))
    y_pred_max_mat = K.equal(y_pred, y_pred_max)
    y_pred_max_mat = tf.cast(y_pred_max_mat, dtype=tf.float32)
    for c_p, c_t in product(range(nb_cl), range(nb_cl)):
        final_mask += weights[c_t, c_p] * y_true[:, c_t] * y_pred_max_mat[:,
                                                                          c_p]

    return final_mask
def iou(y_true, y_pred, label=0):
    """
    Return the Intersection over Union (IoU) for a given label.
    Args:
        y_true: the expected y values as a one-hot
        y_pred: the predicted y values as a one-hot or softmax output
        label: the label to return the IoU for
    Returns:
        the IoU for the given label
    """
    # extract the label values using the argmax operator then
    # calculate equality of the predictions and truths to the label
    y_true = K.cast(K.equal(K.argmax(y_true), label), K.floatx())
    y_pred = K.cast(K.equal(K.argmax(y_pred), label), K.floatx())
    #y_pred = K.cast(K.greater(y_pred, 0.5), K.floatx())
    # calculate the |intersection| (AND) of the labels
    intersection = K.sum(y_true * y_pred)
    # calculate the |union| (OR) of the labels
    union = K.sum(y_true) + K.sum(y_pred) - intersection
    # avoid divide by zero - if the union is zero, return 1
    # otherwise, return the intersection over union
    return K.switch(K.equal(union, 0), 1.0, intersection / union)
Exemple #9
0
    def _focal(y_true, y_pred):
        """ Compute the focal loss given the target tensor and the predicted tensor.

        As defined in https://arxiv.org/abs/1708.02002

        Args
            y_true: Tensor of target data from the generator with shape (B, N, num_classes).
            y_pred: Tensor of predicted data from the network with shape (B, N, num_classes).

        Returns
            The focal loss of y_pred w.r.t. y_true.
        """
        labels = y_true[:, :, :-1]
        anchor_state = y_true[:, :,
                              -1]  # -1 for ignore, 0 for background, 1 for object
        classification = y_pred

        # filter out "ignore" anchors
        indices = tf.where(K.not_equal(anchor_state, -1))
        labels = tf.gather_nd(labels, indices)
        classification = tf.gather_nd(classification, indices)

        # compute the focal loss
        alpha_factor = K.ones_like(labels) * alpha
        alpha_factor = tf.where(K.equal(labels, 1), alpha_factor,
                                1 - alpha_factor)
        focal_weight = tf.where(K.equal(labels, 1), 1 - classification,
                                classification)
        focal_weight = alpha_factor * focal_weight**gamma

        cls_loss = focal_weight * K.binary_crossentropy(labels, classification)

        # compute the normalizer: the number of positive anchors
        normalizer = tf.where(K.equal(anchor_state, 1))
        normalizer = K.cast(K.shape(normalizer)[0], K.floatx())
        normalizer = K.maximum(K.cast_to_floatx(1.0), normalizer)

        return K.sum(cls_loss) / normalizer
Exemple #10
0
    def my_accu(y_true, y_pred):

        c1_len = 8 * 32
        conv1_anc = y_pred[:, :c1_len]
        conv1_pos = y_pred[:, c1_len:(c1_len * 2)]
        conv1_neg = y_pred[:, (c1_len * 2):(c1_len * 3)]

        s_len = c1_len * 3

        c2_len = 128 * 32
        conv2_anc = y_pred[:, s_len:(s_len + c2_len)]
        conv2_pos = y_pred[:, (s_len + c2_len):(s_len + (c2_len * 2))]
        conv2_neg = y_pred[:, (s_len + (c2_len * 2)):(s_len + (c2_len * 3))]

        s_len = s_len + (c2_len * 3)

        embed_anc = y_pred[:, s_len:(s_len + e_len)]
        embed_pos = y_pred[:, (s_len + e_len):(s_len + (e_len * 2))]
        embed_neg = y_pred[:, (s_len + (e_len * 2)):(s_len + (e_len * 3))]

        s_len = s_len + (e_len * 3)

        out_anc = y_pred[:, s_len:(s_len + n_cls)]
        out_pos = y_pred[:, (s_len + n_cls):(s_len + (n_cls * 2))]
        out_neg = y_pred[:, (s_len + (n_cls * 2)):(s_len + (n_cls * 3))]

        tru_anc = y_true[:, :n_cls]
        tru_pos = y_true[:, n_cls:(n_cls * 2)]
        tru_neg = y_true[:, (n_cls * 2):(n_cls * 3)]

        accu_anc = K.cast(K.equal(K.argmax(tru_anc), K.argmax(out_anc)),
                          K.floatx())
        accu_pos = K.cast(K.equal(K.argmax(tru_pos), K.argmax(out_pos)),
                          K.floatx())
        accu_neg = K.cast(K.equal(K.argmax(tru_neg), K.argmax(out_neg)),
                          K.floatx())

        return (accu_anc + accu_pos + accu_neg) / 3
Exemple #11
0
	def sparse_accuracy(self, y_true, y_pred):
		"""训练过程中显示逐帧准确率的函数,排除了mask的影响
		此处y_true需要是整数形式(非one hot)
		"""
		# 导出mask并转换数据类型
		mask = K.all(K.greater(y_pred, -1e6), axis=2)
		mask = K.cast(mask, K.floatx())
		# y_true需要重新明确一下shape和dtype
		y_true = K.reshape(y_true, K.shape(y_pred)[:-1])
		y_true = K.cast(y_true, 'int32')
		# 逐标签取最大来粗略评测训练效果
		y_pred = K.cast(K.argmax(y_pred, 2), 'int32')
		isequal = K.cast(K.equal(y_true, y_pred), K.floatx())
		return K.sum(isequal * mask) / K.sum(mask)
    def decoder(self, inputs):
        decoder_inputs, encoder_encodings, encoder_masks = inputs
        if K.dtype(decoder_inputs) != 'int32':
            decoder_inputs = K.cast(decoder_inputs, 'int32')

        decoder_masks = K.equal(decoder_inputs, 0)
        # Embeddings
        embeddings = K.gather(self.embeddings, decoder_inputs)
        embeddings *= self._model_dim**0.5  # Scale
        # Position Encodings
        position_encodings = self.DecoderPositionEncoding(embeddings)
        # Embedings + Postion-encodings
        encodings = embeddings + position_encodings
        # Dropout
        encodings = K.dropout(encodings, self._dropout_rate)

        for i in range(self._decoder_stack):
            # Masked-Multi-head-Attention
            masked_attention = self.DecoderMultiHeadAttetions0[i]
            masked_attention_input = [
                encodings, encodings, encodings, decoder_masks
            ]
            masked_attention_out = masked_attention(masked_attention_input)
            # Add & Norm
            masked_attention_out += encodings
            masked_attention_out = self.DecoderLayerNorms0[i](
                masked_attention_out)

            # Multi-head-Attention
            attention = self.DecoderMultiHeadAttetions1[i]
            attention_input = [
                masked_attention_out, encoder_encodings, encoder_encodings,
                encoder_masks
            ]
            attention_out = attention(attention_input)
            # Add & Norm
            attention_out += masked_attention_out
            attention_out = self.DecoderLayerNorms1[i](attention_out)

            # Feed-Forward
            ff = self.DecoderPositionWiseFeedForwards[i]
            ff_out = ff(attention_out)
            # Add & Norm
            ff_out += attention_out
            encodings = self.DecoderLayerNorms2[i](ff_out)

        # Pre-Softmax 与 Embeddings 共享参数
        linear_projection = K.dot(encodings, K.transpose(self.embeddings))
        outputs = K.softmax(linear_projection)
        return outputs
Exemple #13
0
def rpn_regr_loss(y_true, y_pred):

    sigma = 9.0
    cls = y_true[0, :, 0]
    regr = y_true[0, :, 1:3]
    regr_keep = tf.where(K.equal(cls, 1))[:, 0]
    regr_true = tf.gather(regr, regr_keep)
    regr_pred = tf.gather(y_pred[0], regr_keep)
    diff = tf.abs(regr_true - regr_pred)
    less_one = tf.cast(tf.less(diff, 1.0 / sigma), 'float32')
    loss = less_one * 0.5 * diff**2 * sigma + tf.abs(1 - less_one) * (
        diff - 0.5 / sigma)
    loss = K.sum(loss, axis=1)
    return K.switch(tf.size(loss) > 0, K.mean(loss), K.constant(0.0))
    def metric(y_true, y_pred):
        """
        Parameters
        ----------
        y_true : keras tensor
            True values to predict
        y_pred : keras tensor
            Prediction made by the model.
            It is assumed that this keras tensor includes extra columns to store the abstaining classes.
        """
        # matching in original classes
        true_pred = K.sum(K.cast(K.equal(K.argmax(y_true, axis=-1), K.argmax(y_pred, axis=-1)), 'int64'))

        # total abstention
        total_abs = K.sum(K.cast(K.equal(K.argmax(y_pred, axis=-1), nb_classes), 'int64'))

        # total predicted in original classes
        total_pred = K.sum(K.cast(K.equal(K.argmax(y_pred, axis=-1), K.argmax(y_pred, axis=-1)), 'int64'))

        # guard against divide by zero
        condition = K.greater(total_pred, total_abs)
        abs_acc = K.switch(condition, true_pred / (total_pred - total_abs), total_pred / total_pred)
        return abs_acc
Exemple #15
0
def word_accuracy(y_true, y_pred):
    """
    Our custom metric for comparision between baseline and our model
    If all typing key are correct return 1 else 0
    Note: This is difference from Keras's accuracy metric. 
    If word has 3 length and 1 key is faild. Keras's accuracy metric will return 0.6667
    but we want it to return 0
    """
    # if word is same, the sum will return 0
    def is_correct_word(x):
        return K.sum(x)
    absolute = K.abs(y_true-y_pred)
    count = K.map_fn(is_correct_word,absolute)
    return K.equal(count,0)
def Jaccard(y_true, y_pred):
    nb_classes = K.int_shape(y_pred)[-1]
    iou = []
    pred_pixels = K.argmax(y_pred, axis=-1)
    for i in range(0, nb_classes
                   ):  # exclude first label (background) and last label (void)
        true_labels = K.equal(y_true[:, :, 0], i)
        pred_labels = K.equal(pred_pixels, i)
        inter = tf.compat.v1.to_int32(true_labels & pred_labels)
        union = tf.compat.v1.to_int32(true_labels | pred_labels)
        legal_batches = K.sum(tf.to_int32(true_labels), axis=1) > 0
        ious = K.sum(inter, axis=1) / K.sum(union, axis=1)
        if _IS_TF_2:
            iou.append(K.mean(ious[legal_batches]))
        else:
            iou.append(K.mean(tf.gather(ious, indices=tf.where(
                legal_batches))))  # returns average IoU of the same objects
    iou = tf.stack(iou)
    legal_labels = ~tf.math.is_nan(iou) if _IS_TF_2 else ~tf.debugging.is_nan(
        iou)
    iou = iou[legal_labels] if _IS_TF_2 else tf.gather(
        iou, indices=tf.where(legal_labels))
    return K.mean(iou)
Exemple #17
0
    def _p_cluster_loss(self, y_true, y_pred):
        event_filter = y_true[:, 0]  # ∈ n
        p_cluster = K.reshape(y_true[:, 1], (-1, 1))  # ∈ nx1
        # loss ∈ n
        loss = keras.losses.sparse_categorical_crossentropy(p_cluster, y_pred)

        # composing _p_cluster_match; a mast for the matched clusters of p
        y_pred_sparse = K.cast(K.argmax(y_pred), y_true.dtype)  # ∈ n
        self._p_cluster_pred = y_pred_sparse  # ∈ n
        self._p_cluster_match = K.cast(K.equal(y_true[:, 1], y_pred_sparse),
                                       'float32')  # [float] ∈ n

        # return (n*n) ∈ n
        return event_filter * loss
Exemple #18
0
 def fast_accuracy(self, y_true, y_pred):
     mask = self.mask
     if len(K.int_shape(y_true)) == 3:
         y_true = K.argmax(y_true, axis=-1)
     y_pred = K.argmax(y_pred, -1)
     y_true = K.cast(y_true, y_pred.dtype)
     # 逐标签取最大来粗略评测训练效果
     isequal = K.equal(y_true, y_pred)
     isequal = K.cast(isequal, y_pred.dtype)
     if mask is None:
         return K.mean(isequal)
     else:
         mask = K.cast(mask, y_pred.dtype)
         return K.sum(isequal * mask) / K.sum(mask)
Exemple #19
0
def rec_acc(y_true, y_pred):
    # y_true: (batch_size, maxlen, 1)
    # y_pred: (batch_size, maxlen, 1)

    # (batch_size, maxlen, 1)
    is_particle_wise_equal = K.equal(y_true, K.round(y_pred))

    # (batch_size, maxlen)
    is_particle_wise_equal = K.squeeze(is_particle_wise_equal, axis=-1)

    # (batch_size, )
    is_jet_wise_correct = K.all(is_particle_wise_equal, axis=1)

    return K.mean(is_jet_wise_correct)
Exemple #20
0
def reshape_accuracy(real, pred):
    # Use in training metrics.
    pred = K.cast(K.argmax(pred, axis=-1), K.floatx())
    real = K.cast(K.squeeze(real, axis=-1), K.floatx())
    # print('pred =', pred[:10])
    # print('real =', real[:10])

    # accuracy = K.mean(pred == real)
    # accuracy = tf.metrics.Accuracy(real, pred) # keras.
    # For tensorflow metrics.
    accuracy_tensor = K.cast(K.equal(real, pred),
                             K.floatx())  # shape = (N, max_length)

    return accuracy_tensor
 def _pairwise_distances(self, inputs: List[Tensor]) -> Tensor:
     emb_c, emb_r = inputs
     bs = K.shape(emb_c)[0]
     embeddings = K.concatenate([emb_c, emb_r], 0)
     dot_product = K.dot(embeddings, K.transpose(embeddings))
     square_norm = K.batch_dot(embeddings, embeddings, axes=1)
     distances = K.transpose(square_norm) - 2.0 * dot_product + square_norm
     distances = distances[0:bs, bs:bs+bs]
     distances = K.clip(distances, 0.0, None)
     mask = K.cast(K.equal(distances, 0.0), K.dtype(distances))
     distances = distances + mask * 1e-16
     distances = K.sqrt(distances)
     distances = distances * (1.0 - mask)
     return distances
Exemple #22
0
def lm_acc(y_true, y_pred):
    """
    pad 부분을 제외하고 accuracy를 계산하는 함수
    :param y_true: 정답
    :param y_pred: 예측 값
    :retrun loss: pad 부분이 제외된 accuracy 값
    """
    y_pred_class = tf.cast(K.argmax(y_pred, axis=-1), tf.float32)
    y_true = tf.cast(y_true, tf.float32)
    matches = tf.cast(K.equal(y_true, y_pred_class), tf.float32)
    mask = tf.cast(tf.not_equal(y_true, 0), tf.float32)
    matches *= mask
    accuracy = K.sum(matches) / K.maximum(K.sum(mask), 1)
    return accuracy
Exemple #23
0
    def new_get_updates(self, loss, params):
        self.slow_params = [
            K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params
        ]
        update_iter = [K.update_add(self.lookahead_iterations, 1)]

        def just_copy_func():
            copy_slow_params = [
                K.update(p, q) for p, q in zip(self.slow_params, params)
            ]
            return tf.group(*copy_slow_params)

        def update_func():
            update_params = [
                K.update(q, p * self.slow_ratio + q * self.fast_ratio)
                for p, q in zip(self.slow_params, params)
            ]
            with tf.control_dependencies(update_params):
                reset_slow_params = [
                    K.update(p, q) for p, q in zip(self.slow_params, params)
                ]
            return tf.group(*(reset_slow_params + update_iter))

        def just_iter_func():
            return tf.group(*update_iter)

        # copy params to self.slow_params at iteration 0
        copy_switch = K.equal(self.lookahead_iterations, 0)
        copy_params = [K.switch(copy_switch, just_copy_func, tf.no_op())]
        with tf.control_dependencies(copy_params):
            # do the 'slow weights update' every 'k' iterations
            update_switch = K.equal(self.lookahead_iterations % self.k, 0)
            with tf.control_dependencies(self.orig_get_updates(loss, params)):
                self.updates = [
                    K.switch(update_switch, update_func, just_iter_func)
                ]
                return self.updates
Exemple #24
0
    def updated_get_updates(self, loss, params):
        self.accumulate_gradient_accumulators = [
            K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params
        ]
        if ema_decay > 0:
            self.params_for_ema_tracking = params
            self.params_ema = [
                K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params
            ]
        updates_accumulated_iterations = K.update_add(
            self.accumulated_iterations, 1)
        new_grads = orig_get_gradients(loss, params)
        if not accumulate_sum_or_mean:
            new_grads = [
                g / K.cast(self.update_params_frequency, K.dtype(g))
                for g in new_grads
            ]
        self.updated_grads = [
            K.update_add(p, g)
            for p, g in zip(self.accumulate_gradient_accumulators, new_grads)
        ]

        def update_function():
            with tensorflow.control_dependencies(orig_get_updates(
                    loss, params)):
                reset_grads = [
                    K.update(p, K.zeros(K.int_shape(p), dtype=K.dtype(p)))
                    for p in self.accumulate_gradient_accumulators
                ]
                if ema_decay > 0:
                    reset_grads += [K.update_add(self.total_iterations, 1)]
                    reset_grads += [
                        K.update(e_p, (e_p * ema_decay) + (1 - ema_decay) * p)
                        for e_p, p in zip(self.params_ema, params)
                    ]
            return tensorflow.group(*(reset_grads +
                                      [updates_accumulated_iterations]))

        def just_store_function():
            return tensorflow.group(*[updates_accumulated_iterations])

        update_switch = K.equal(
            (updates_accumulated_iterations) % self.update_params_frequency, 0)

        with tensorflow.control_dependencies(self.updated_grads):
            self.updates = [
                K.switch(update_switch, update_function, just_store_function)
            ]
            return self.updates
        def custom_loss(y_true, y_pred, loss_weights=loss_weights):  # Verified

            zero_index = K.zeros_like(y_true[:, 0])
            ones_index = K.ones_like(y_true[:, 0])

            # Classifier
            labels = y_true[:, 0]
            class_preds = y_pred[:, 0]
            bi_crossentropy_loss = -labels * K.log(class_preds) - (
                1 - labels) * K.log(1 - class_preds)

            classify_valid_index = tf.where(K.less(y_true[:, 0], 0),
                                            zero_index, ones_index)
            classify_keep_num = K.cast(tf.reduce_sum(classify_valid_index) *
                                       SAMPLE_KEEP_RATIO,
                                       dtype=tf.int32)
            # For classification problem, only pick 70% of the valid samples.

            classify_loss_sum = bi_crossentropy_loss * classify_valid_index
            classify_loss_sum_filtered, _ = tf.nn.top_k(classify_loss_sum,
                                                        k=classify_keep_num)
            classify_loss = K.mean(classify_loss_sum_filtered)

            # Bounding box regressor
            rois = y_true[:, 1:5]
            roi_preds = y_pred[:, 1:5]
            # roi_raw_mean_square_error = K.sum(K.square(rois - roi_preds), axis = 1) # mse
            roi_raw_smooth_l1_loss = K.mean(
                tf.where(
                    K.abs(rois - roi_preds) < 1,
                    0.5 * K.square(rois - roi_preds),
                    K.abs(rois - roi_preds) - 0.5))  # L1 Smooth Loss

            roi_valid_index = tf.where(K.equal(K.abs(y_true[:, 0]), 1),
                                       ones_index, zero_index)
            roi_keep_num = K.cast(tf.reduce_sum(roi_valid_index),
                                  dtype=tf.int32)

            # roi_valid_mean_square_error = roi_raw_mean_square_error * roi_valid_index
            # roi_filtered_mean_square_error, _ = tf.nn.top_k(roi_valid_mean_square_error, k = roi_keep_num)
            # roi_loss = K.mean(roi_filtered_mean_square_error)
            roi_valid_smooth_l1_loss = roi_raw_smooth_l1_loss * roi_valid_index
            roi_filtered_smooth_l1_loss, _ = tf.nn.top_k(
                roi_valid_smooth_l1_loss, k=roi_keep_num)
            roi_loss = K.mean(roi_filtered_smooth_l1_loss)

            loss = classify_loss * loss_weights[0] + roi_loss * loss_weights[1]

            return loss
Exemple #26
0
    def cindex_lowerbound(y_true, y_pred):
        y = y_true[:, 0]
        e = y_true[:, 1]
        ydiff = y[tf.newaxis, :] - y[:, tf.newaxis]
        yij = K.cast(
            K.greater(ydiff, 0),
            K.floatx()) + K.cast(K.equal(ydiff, 0), K.floatx()) * K.cast(
                e[:, tf.newaxis] != e[tf.newaxis, :], K.floatx())  # yi > yj
        is_valid_pair = yij * e[:, tf.newaxis]

        ypdiff = tf.transpose(
            y_pred) - y_pred  # y_pred[tf.newaxis,:] - y_pred[:,tf.newaxis]
        ypij = (1 + K.log(K.sigmoid(ypdiff))) / K.log(tf.constant(2.0))
        cidx_lb = (K.sum(ypij * is_valid_pair)) / K.sum(is_valid_pair)
        return cidx_lb
Exemple #27
0
def sparse_categorical_accuracy(y_true, y_pred):
    """
    Description:
        Same as categorical_accuracy, but useful when the predictions are for
        sparse targets.
    Args:
        y_true (np.ndarray): ground truth class labels
        y_pred (np.ndarray): predicted class labels
    Returns:
        sparse_categorical_accuracy (float)
    """

    return K.mean(
        K.equal(K.max(y_true, axis=-1),
                K.cast(K.argmax(y_pred, axis=-1), K.floatx())))
Exemple #28
0
    def accuracy(self, y_true, y_pred):
        mask = self.mask
        if len(K.int_shape(y_true)) == 3:
            y_true = K.argmax(y_true, axis=-1)

        y_pred, _ = tfa.text.crf_decode(y_pred, self.transitions,
                                        self.sequence_lengths)
        y_true = K.cast(y_true, y_pred.dtype)
        is_equal = K.equal(y_true, y_pred)
        is_equal = K.cast(is_equal, y_pred.dtype)
        if mask is None:
            return K.mean(is_equal)
        else:
            mask = K.cast(mask, y_pred.dtype)
            return K.sum(is_equal * mask) / K.sum(mask)
Exemple #29
0
def lm_acc(y_true, y_pred):
    """
    acc 계산 함수
    :param y_true: 정답 (bs, n_seq)
    :param y_pred: 예측 값 (bs, n_seq, n_vocab)
    """
    # 정답 여부 확인
    y_pred_class = tf.cast(K.argmax(y_pred, axis=-1), tf.float32)
    matches = tf.cast(K.equal(y_true, y_pred_class), tf.float32)
    # pad(0) 인 부분 mask
    mask = tf.cast(tf.math.not_equal(y_true, 0), dtype=matches.dtype)
    matches *= mask
    # 정확도 계산
    accuracy = K.sum(matches) / K.maximum(K.sum(mask), 1)
    return accuracy
def iou(y_true,y_pred):
    y_true_digit = K.flatten(K.argmax(y_true,axis=-1))
    y_pred_digit = K.flatten(K.argmax(y_pred,axis=-1))
    classes_true = tf.unique(y_true_digit)[0]
    classes_pred = tf.unique(y_pred_digit)[0]
    classes = K.concatenate([classes_true,classes_pred])
    classes = tf.unique(classes)[0]
    classes = classes[classes!=50]

    iou = tf.constant(0,dtype=tf.float32)
    for i_class in classes:
        value = K.zeros_like(y_true_digit)+K.cast(i_class,tf.int64)
        mask_true = K.cast(K.equal(y_true_digit,value),K.floatx())
        mask_pred = K.cast(K.equal(y_pred_digit,value),K.floatx())

        tp = K.sum(K.minimum(mask_true,mask_pred))
        fp = K.sum(K.minimum(1-mask_true,mask_pred))
        fn = K.sum(K.minimum(mask_true,1-mask_pred))

        iou_class = (tp+1e-12)/(tp+fp+fn+1e-12)
        iou += iou_class
    num_classes = K.cast(len(classes),tf.float32)
    iou = iou/num_classes
    return iou