Пример #1
0
def accuracy_length(y_true, y_pred):
    """Compute the length control signal accuracy by matching position of EOS token."""

    EOS_token = 2

    mask_PAD = K.all(K.equal(y_true, 0), axis=-1)  # Shape = (N, max_length, 1)
    mask_PAD = 1 - K.cast(mask_PAD, K.floatx())
    mask_PAD = tf.squeeze(
        mask_PAD)  # Shape = (N, max_length). False if this is PAD.

    y_pred = K.cast(K.argmax(y_pred, axis=-1), K.floatx())
    y_pred = y_pred * mask_PAD  # Shape = (N, max_length)

    filter_EOS = K.all(K.equal(y_true, EOS_token), axis=-1)
    filter_EOS = K.cast(filter_EOS, K.floatx())
    filter_EOS = tf.squeeze(
        filter_EOS)  # Shape = (N, max_length), True if it is EOS.

    y_expected = K.equal(y_pred * filter_EOS, float(EOS_token))
    y_expected = K.cast(y_expected, K.floatx())  # Shape = (N, max_length)
    y_expected = K.sum(y_expected, axis=-1)  # Shape = (N, )
    y_expected = K.cast((K.equal(y_expected, 1.0)), K.floatx())
    y_result = K.cast(K.equal(y_pred, float(EOS_token)),
                      K.floatx())  # Shape = (N, max_length)
    y_result = K.sum(y_result, axis=-1)  # Shape = (N, )
    y_result = K.cast((K.equal(y_result, 1.0)), K.floatx())

    accuracy = y_expected * y_result  # Shape = (N, )
    accuracy = (K.sum(accuracy) / K.sum(filter_EOS)
                )  # / K.sum(tf.ones_like(accu)))

    return accuracy
Пример #2
0
def competition_coef(y_true, y_pred, smooth=1):
    y_pred = K.argmax(y_pred, axis=-1)
    y_true = K.argmax(y_true, axis=-1)
    # try:
    # Compute tumor+kidney Dice
    tk_pd = K.greater(y_pred, 0)
    tk_gt = K.greater(y_true, 0)
    intersection = K.all(K.stack([tk_gt, tk_pd], axis=3), axis=3)
    tk_dice = (2 * K.sum(K.cast(intersection, K.floatx())) + smooth)/ (
            K.sum(K.cast(tk_pd, K.floatx())) + K.sum(K.cast(tk_gt, K.floatx())) + smooth
    )
    # except ZeroDivisionError:
    #     return 0

    # try:
        # Compute tumor Dice
    tu_pd = K.greater(y_pred, 1)
    tu_gt = K.greater(y_true, 1)
    intersection = K.all(K.stack([tu_pd, tu_gt], axis=3), axis=3)
    tu_dice = (2 * K.sum(K.cast(intersection, K.floatx())) + smooth)/ (
            K.sum(K.cast(tu_pd, K.floatx())) + K.sum(K.cast(tu_gt, K.floatx())) + smooth
    )
    # except ZeroDivisionError:
    #     return tk_dice / 2.0
    return (tk_dice+tu_dice) / 2.0
Пример #3
0
	def basic_loss(self, y_true, y_pred, go_backwards=False):
		"""y_true需要是整数形式(非one hot)
		"""
		# 导出mask并转换数据类型
		mask = K.all(K.greater(y_pred, -1e6), axis=2)
		mask = K.cast(mask, K.floatx())
		# y_true需要重新明确一下shape和dtype
		y_true = K.reshape(y_true, K.shape(y_pred)[:-1])
		y_true = K.cast(y_true, 'int32')
		# 反转相关
		if self.hidden_dim is None:
			if go_backwards:  # 是否反转序列
				y_true, y_pred = self.reverse_sequence([y_true, y_pred], mask)
				trans = K.transpose(self.trans)
			else:
				trans = self.trans
			histoty = K.gather(trans, y_true)
		else:
			if go_backwards:  # 是否反转序列
				y_true, y_pred = self.reverse_sequence([y_true, y_pred], mask)
				r_trans, l_trans = self.l_trans, self.r_trans
			else:
				l_trans, r_trans = self.l_trans, self.r_trans
			histoty = K.gather(l_trans, y_true)
			histoty = tf.einsum('bnd,kd->bnk', histoty, r_trans)
		# 计算loss
		histoty = K.concatenate([y_pred[:, :1], histoty[:, :-1]], 1)
		y_pred = (y_pred + histoty) / 2
		loss = K.sparse_categorical_crossentropy(
			y_true, y_pred, from_logits=True
		)
		return K.sum(loss * mask) / K.sum(mask)
Пример #4
0
	def basic_accuracy(self, y_true, y_pred, go_backwards=False):
		"""训练过程中显示逐帧准确率的函数,排除了mask的影响
		此处y_true需要是整数形式(非one hot)
		"""
		# 导出mask并转换数据类型
		mask = K.all(K.greater(y_pred, -1e6), axis=2)
		mask = K.cast(mask, K.floatx())
		# y_true需要重新明确一下shape和dtype
		y_true = K.reshape(y_true, K.shape(y_pred)[:-1])
		y_true = K.cast(y_true, 'int32')
		# 反转相关
		if self.hidden_dim is None:
			if go_backwards:  # 是否反转序列
				y_true, y_pred = self.reverse_sequence([y_true, y_pred], mask)
				trans = K.transpose(self.trans)
			else:
				trans = self.trans
			histoty = K.gather(trans, y_true)
		else:
			if go_backwards:  # 是否反转序列
				y_true, y_pred = self.reverse_sequence([y_true, y_pred], mask)
				r_trans, l_trans = self.l_trans, self.r_trans
			else:
				l_trans, r_trans = self.l_trans, self.r_trans
			histoty = K.gather(l_trans, y_true)
			histoty = tf.einsum('bnd,kd->bnk', histoty, r_trans)
		# 计算逐标签accuracy
		histoty = K.concatenate([y_pred[:, :1], histoty[:, :-1]], 1)
		y_pred = (y_pred + histoty) / 2
		y_pred = K.cast(K.argmax(y_pred, 2), 'int32')
		isequal = K.cast(K.equal(y_true, y_pred), K.floatx())
		return K.sum(isequal * mask) / K.sum(mask)
Пример #5
0
 def dense_loss(self, y_true, y_pred):
     """y_true需要是one hot形式
     """
     # 导出mask并转换数据类型
     # [B, T, 1] 这里也就是看 这个 T 是否是 pad 的
     mask = K.all(K.greater(y_pred, -1e6), axis=2, keepdims=True)
     mask = K.cast(mask, K.floatx())
     # 计算目标分数
     y_true, y_pred = y_true * mask, y_pred * mask
     target_score = self.target_score(y_true, y_pred)
     # 递归计算log Z
     init_states = [y_pred[:, 0]]
     # [B, T, output_dim] [B, T, 1] -> [B, T, output_dim+1]
     # 这里是为了传递 mask 到 rnn 中 
     y_pred = K.concatenate([y_pred, mask], axis=2)
     input_length = K.int_shape(y_pred[:, 1:])[1]
     # 这里会把 y_pred[:, 1:] 在 time 维度 split 开 一个个继续迭代计算
     log_norm, _, _ = K.rnn(
         self.log_norm_step,
         y_pred[:, 1:],
         init_states,
         input_length=input_length
     )  # 最后一步的log Z向量
     log_norm = tf.reduce_logsumexp(log_norm, 1)  # logsumexp得标量
     # 计算损失 -log p
     return log_norm - target_score
Пример #6
0
 def _compute_valid_seed_region(self):
     positions = K.concatenate([
         K.expand_dims(K.tile(K.expand_dims(K.arange(self.height), axis=1),
                              [1, self.width]),
                       axis=-1),
         K.expand_dims(K.tile(K.expand_dims(K.arange(self.width), axis=0),
                              [self.height, 1]),
                       axis=-1),
     ],
                               axis=-1)
     half_block_size = self.block_size // 2
     valid_seed_region = K.switch(
         K.all(
             K.stack(
                 [
                     positions[:, :, 0] >= half_block_size,
                     positions[:, :, 1] >= half_block_size,
                     positions[:, :, 0] < self.height - half_block_size,
                     positions[:, :, 1] < self.width - half_block_size,
                 ],
                 axis=-1,
             ),
             axis=-1,
         ),
         self.ones,
         self.zeros,
     )
     return K.expand_dims(K.expand_dims(valid_seed_region, axis=0), axis=-1)
Пример #7
0
    def custom_loss(y_true, y_pred):

        y_true_label_1 = y_true[:, :num_classes]
        y_true_label_2 = y_true[:, num_classes:num_classes * 2]
        y_pred_label_1 = y_pred[:, :num_classes]
        y_pred_label_2 = y_pred[:, num_classes:num_classes * 2]

        y_pred_embedding_1 = y_pred[:,
                                    num_classes * 2:num_classes * 2 + emb_size]
        y_pred_embedding_2 = y_pred[:, num_classes * 2 + emb_size:]

        class_loss_1 = categorical_crossentropy(y_true_label_1, y_pred_label_1)
        class_loss_2 = categorical_crossentropy(y_true_label_2, y_pred_label_2)
        embedding_loss = cosine_similarity(y_pred_embedding_1,
                                           y_pred_embedding_2)

        are_labels_equal = K.all(K.equal(y_true_label_1, y_true_label_2),
                                 axis=1)

        a = tf.where(are_labels_equal,
                     tf.fill([tf.shape(are_labels_equal)[0]], 1.0),
                     tf.fill([tf.shape(are_labels_equal)[0]], -1.0))

        result = class_loss_1 + class_loss_2 + tf.math.multiply(
            a, embedding_loss)

        return tf.math.reduce_mean(result)
Пример #8
0
    def masked_categorical_crossentropy(y_true, y_pred):

        mask = K.all(K.equal(y_true, mask_value), axis=-1)
        mask = 1 - K.cast(mask, K.floatx())

        loss = K.categorical_crossentropy(y_true, y_pred) * mask

        return K.sum(loss) / K.sum(mask)
Пример #9
0
def accperclass(y_true, y_pred, c):
    ''' Returns the accuracy per class for the class c'''
    z = K.equal(
        K.reshape(K.cast(K.argmax(y_pred), 'float32'), K.shape(y_true)),
        y_true)
    e = K.equal(y_true, c)
    return (K.sum(K.cast(K.all(K.stack([z, e], axis=0), axis=0), 'int32')) /
            K.maximum(1, K.sum(K.cast(e, 'int32'))))
Пример #10
0
def joint_histogram(tensor, bins):
    _uniq_obj = np.zeros((bins, bins, 2, 1))
    for i in range(bins):
        for j in range(bins):
            _uniq_obj[i, j, :, 0] = np.array([i, j])
    _uniq_obj = K.constant(_uniq_obj)
    _cond = K.all(K.equal(tensor, _uniq_obj), axis=2)
    return tf.count_nonzero(_cond, axis=2)
Пример #11
0
 def full_number_accuracy(y_true, y_pred):
     y_true_argmax = K.argmax(y_true)
     y_pred_argmax = K.argmax(y_pred)
     tfd = K.equal(y_true_argmax, y_pred_argmax)
     tfn = K.all(tfd, axis=1)
     tfc = K.cast(tfn, dtype='float32')
     tfm = K.mean(tfc)
     return tfm
Пример #12
0
	def compute_mask(self, inputs, mask=None):
		if self.conditional:
			masks = [K.expand_dims(m, 0) for m in mask if m is not None]
			if len(masks) == 0:
				return None
			else:
				return K.all(K.concatenate(masks, axis=0), axis=0)
		else:
			return mask
Пример #13
0
def soft_acc_multi_output(y_true, y_pred):
    return K.mean(
        K.all(
            K.equal(
                K.cast(K.round(y_true), "int32"), K.cast(K.round(y_pred), "int32"),
            ),
            axis=1,
        )
    )
 def compute_mask(self, inputs, mask=None):
     if self.conditional:
         masks = mask if mask is not None else []
         masks = [m[None] for m in masks if m is not None]
         if len(masks) == 0:
             return None
         else:
             return K.all(K.concatenate(masks, axis=0), axis=0)
     else:
         return mask
Пример #15
0
    def pl_categorical_accuracy(self, y_true, y_pred):
        index = y_true == self.unlabeled

        index = K.all(index, axis=1)

        acc = keras.metrics.categorical_accuracy(y_true, y_pred)
        # tf.print('acc', acc.shape, 'index', index.shape)
        # return tf.reduce_mean(acc, axis=-1)
        return tf.reduce_sum(tf.where(index, 0.0, acc), axis=-1) / tf.cast(
            tf.reduce_sum(tf.where(index, 0, 1), axis=-1), tf.float32)
Пример #16
0
def masked_perplexity_loss(y_true, y_pred, PAD_token=0):
    """Construct customer masked perplexity loss."""
    mask = K.all(K.equal(y_true, PAD_token),
                 axis=-1)  # Label padding as zero in y_true
    mask = 1 - K.cast(mask, K.floatx())
    nomask = K.sum(mask)
    loss = K.sparse_categorical_crossentropy(
        y_true,
        y_pred) * mask  # Multiply categorical_crossentropy with the mask
    return tf.exp(K.sum(loss) / nomask)
Пример #17
0
 def viterbi_accuracy(y_true, y_pred):
     # -1e10 to avoid zero at sum(mask)
     mask = K.cast(K.all(K.greater(y_pred, -1e10), axis=2), K.floatx())
     shape = tf.shape(y_pred)
     sequence_lengths = tf.ones(shape[0], dtype=tf.int32) * (shape[1])
     y_pred, _ = crf_decode(y_pred, self.transitions, sequence_lengths)
     if self.sparse_target:
         y_true = K.argmax(y_true, 2)
     y_pred = K.cast(y_pred, 'int32')
     y_true = K.cast(y_true, 'int32')
     corrects = K.cast(K.equal(y_true, y_pred), K.floatx())
     return K.sum(corrects * mask) / K.sum(mask)
Пример #18
0
        def acc_igm1(y_true, y_pred):
            """
            custom accuracy function which only considers known-labels(not -1)
            """
            known = K.equal(
                K.max(y_true, axis=-1),
                1)  # if max value == 0, it means -1 (unknown) label.
            correct = K.equal(K.argmax(y_true, axis=-1),
                              K.argmax(y_pred, axis=-1))
            true = K.all(K.stack([correct, known], axis=0), axis=0)

            return K.sum(K.cast(true, 'int32')) / K.sum(K.cast(known, 'int32'))
Пример #19
0
    def sentence_accuracy(y_true, y_pred):
        y_true_class = K.argmax(y_true, axis=-1)
        y_pred_class = K.argmax(y_pred, axis=-1)

        ignore_mask = K.cast(K.equal(y_true_class, to_ignore), 'int32')
        matches = K.cast(K.equal(y_true_class, y_pred_class), 'int32')
        matches = K.any(K.stack([matches, ignore_mask], axis=0),
                        axis=0)  # This is logic OR
        accuracy = K.sum(K.cast(K.all(matches, axis=1), 'int32')) / K.sum(
            K.cast(K.any(matches, axis=1), 'int32'))

        return accuracy
Пример #20
0
def plate_acc(y_true, y_pred):
    '''
    How many plates were correctly classified
    If Ground Truth is ABC 123
    Then prediction ABC 123 would score 1
    else ABD 123 would score 0
    Avg these results (1 + 0) / 2 -> Gives .5 accuracy
    (Half of the plates were completely corrected classified)
    '''
    y_true = K.reshape(y_true, shape=(-1, 7, 37))
    y_pred = K.reshape(y_pred, shape=(-1, 7, 37))
    et = K.equal(K.argmax(y_true), K.argmax(y_pred))
    return K.mean(K.cast(K.all(et, axis=-1, keepdims=False), dtype='float32'))
Пример #21
0
	def sparse_accuracy(self, y_true, y_pred):
		"""训练过程中显示逐帧准确率的函数,排除了mask的影响
		此处y_true需要是整数形式(非one hot)
		"""
		# 导出mask并转换数据类型
		mask = K.all(K.greater(y_pred, -1e6), axis=2)
		mask = K.cast(mask, K.floatx())
		# y_true需要重新明确一下shape和dtype
		y_true = K.reshape(y_true, K.shape(y_pred)[:-1])
		y_true = K.cast(y_true, 'int32')
		# 逐标签取最大来粗略评测训练效果
		y_pred = K.cast(K.argmax(y_pred, 2), 'int32')
		isequal = K.cast(K.equal(y_true, y_pred), K.floatx())
		return K.sum(isequal * mask) / K.sum(mask)
Пример #22
0
def rec_acc(y_true, y_pred):
    # y_true: (batch_size, maxlen, 1)
    # y_pred: (batch_size, maxlen, 1)

    # (batch_size, maxlen, 1)
    is_particle_wise_equal = K.equal(y_true, K.round(y_pred))

    # (batch_size, maxlen)
    is_particle_wise_equal = K.squeeze(is_particle_wise_equal, axis=-1)

    # (batch_size, )
    is_jet_wise_correct = K.all(is_particle_wise_equal, axis=1)

    return K.mean(is_jet_wise_correct)
Пример #23
0
def precision(y_true, y_pred):
    """Precision for foreground pixels.

    Calculates pixelwise precision TP/(TP + FP).

    """
    # count true positives
    truth = K.round(K.clip(y_true, K.epsilon(), 1))
    pred_pos = K.round(K.clip(y_pred, K.epsilon(), 1))
    true_pos = K.sum(K.cast(K.all(K.stack([truth, pred_pos], axis=2), axis=2),
                            dtype='float64'))
    pred_pos_ct = K.sum(pred_pos) + K.epsilon()
    precision = true_pos/pred_pos_ct

    return precision
Пример #24
0
    def pl_loss_test(self, y_true, y_pred):
        pl = K.one_hot(K.argmax(y_pred), self.out_size)
        pl = tf.cast(pl, y_true.dtype)

        index = y_true == self.unlabeled

        y_pl = tf.where(index, pl, y_true)

        index = K.all(index, axis=1)
        # coef_arr = tf.where(index, self.alpha, 1.0)

        loss = keras.losses.categorical_crossentropy(y_pl, y_pred)
        labeled_loss = tf.reduce_sum(tf.where(index, 0, loss))
        unlabeled_loss = tf.reduce_sum(tf.where(index, loss, 0))

        return labeled_loss + self.alpha * unlabeled_loss
Пример #25
0
def recall(y_true, y_pred):
    """Precision for foreground pixels.

    Calculates pixelwise recall TP/(TP + FN).

    """
    # count true positives
    truth = K.round(K.clip(y_true, K.epsilon(), 1))
    pred_pos = K.round(K.clip(y_pred, K.epsilon(), 1))
    true_pos = K.sum(K.cast(K.all(K.stack([truth, pred_pos], axis=2), axis=2),
                            dtype='float64'))
    truth_ct = K.sum(K.round(K.clip(y_true, K.epsilon(), 1)))
    if truth_ct == 0:
        return 0
    recall = true_pos/truth_ct

    return recall
Пример #26
0
 def _compute_valid_seed_region(self):
     positions = K.arange(self.seq_len)
     half_block_size = self.block_size // 2
     valid_seed_region = K.switch(
         K.all(
             K.stack(
                 [
                     positions >= half_block_size,
                     positions < self.seq_len - half_block_size,
                 ],
                 axis=-1,
             ),
             axis=-1,
         ),
         self.ones,
         self.zeros,
     )
     return K.expand_dims(K.expand_dims(valid_seed_region, axis=0), axis=-1)
Пример #27
0
    def pl_loss(self, y_true, y_pred):
        # tf.print('loss for:', y_true.shape, y_pred.shape, y_true[0])
        '''
        Custom loss function for pseudo-labeling

        In general, I believe that this is only well-defined when y is 1D and one-hot encoded

        unlabeled = scalar of what value has been assigned to the unlabled samples
        num_classes = number of classes of Y
        alpha = alpha term of conditional cross-entropy
        '''
        # tf.print('pl_loss:', y_true, y_pred)

        pl = K.one_hot(K.argmax(y_pred), self.out_size)
        pl = tf.cast(pl, y_true.dtype)

        # Calculate whether each sample in the batch is labeled or unlabeled
        index = y_true == self.unlabeled

        # clip the predictions for numerical stability
        # pred = K.clip(y_pred, 1e-12, 1 - 1e-12)

        # tf.print('y_pred', y_pred.shape, y_pred)
        # tf.print('y_pl', pl.shape, pl)
        # tf.print('y_true', y_true.shape, y_true)
        # tf.print('index', index.shape, index)

        y_pl = tf.where(index, pl, y_true)
        # tf.print('y_pl', y_pl.shape, y_pl)

        # tf.print('index', index.shape, index)
        # Set coefficient for each sample based on whether labeled or unlabeled
        index = K.all(index, axis=1)
        coef_arr = tf.where(index, self.alpha, 1.0)

        # tf.print(self.alpha)
        # tf.print('coef_arr', coef_arr.shape, coef_arr)

        # compute the loss
        loss = keras.losses.categorical_crossentropy(y_pl, y_pred)
        # tf.print('loss', loss.shape, loss)
        # loss = tf.nn.softmax_cross_entropy_with_logits(y_true, y_pred)
        # tf.print(tf.reduce_sum(coef_arr * loss))
        return tf.reduce_sum(coef_arr * loss)
Пример #28
0
def discretize_with_histogram(tensor, bins):
    _min = K.min(tensor)
    _max = K.max(tensor)
    _len_shape = len(tensor.shape)
    _bins = K.cast(tf.range(bins), dtype=K.floatx())
    _range = tf.linspace(_min, _max, bins + 1)
    for _ in range(_len_shape):
        _bins = K.expand_dims(_bins, axis=-1)
        _range = K.expand_dims(_range, axis=-1)
    _cond1 = K.greater_equal(tensor, _range[:-1])
    _cond2 = K.less(tensor, _range[1:])
    _cond3 = K.less_equal(tensor, _range[1:])
    _cond4 = K.concatenate((_cond2[:-1], _cond3[-1:]), axis=0)
    _all_cond = K.cast(K.all(K.stack((_cond1, _cond4), axis=0), axis=0),
                       dtype=K.floatx())
    _axis = tuple([i + 1 for i in range(_len_shape)])
    _discrete = K.sum(_all_cond * _bins, axis=0)
    _histogram = tf.count_nonzero(_all_cond, axis=_axis)
    return _discrete, _histogram
Пример #29
0
 def get_updates(self, loss, params):
     # Only for initialization (仅初始化)
     self.optimizer.get_updates(loss, params)
     # Common updates (常规更新)
     dense_params = [p for p in params if p not in self.embeddings]
     self.updates = self.optimizer.get_updates(loss, dense_params)
     # Sparse update (稀疏更新)
     sparse_params = self.embeddings
     sparse_grads = self.get_gradients(loss, sparse_params)
     sparse_flags = [
         K.all(K.not_equal(g, 0), axis=-1, keepdims=True)
         for g in sparse_grads
     ]
     original_lr = self.optimizer.lr
     for f, p in zip(sparse_flags, sparse_params):
         self.optimizer.lr = original_lr * K.cast(f, 'float32')
         # updates only when gradients are not equal to zeros.
         # (gradients are equal to zeros means these words are not sampled very likely.)
         # 仅更新梯度不为0的Embedding(梯度为0意味着这些词很可能是没被采样到的)
         self.updates.extend(self.optimizer.get_updates(loss, [p]))
     self.optimizer.lr = original_lr
     return self.updates
def one_hot_it(label, label_values):
    """
    Convert a segmentation image label array to one-hot format
    by replacing each pixel value with a vector of length num_classes

    # Arguments
        label: The 2D array segmentation image label
        label_values
        
    # Returns
        A 2D array with the same width and height as the input, but
        with a depth size of num_classes
    """
 
    semantic_map = []
    for colour in label_values:
        # colour_map = np.full((label.shape[0], label.shape[1], label.shape[2]), colour, dtype=int)
        equality = K.equal(label, colour)
        class_map = K.all(equality, axis = -1)
        semantic_map.append(class_map)
    semantic_map = K.stack(semantic_map, axis=-1)
    return semantic_map