Esempio n. 1
0
def eigen_loss(y_true, y_pred):
    y_true = tf.Print(y_true, [y_true], message='y_true', summarize=30)
    y_pred = tf.Print(y_pred, [y_pred], message='y_pred', summarize=30)

    y_true_clipped = K.clip(y_true, K.epsilon(), None)
    y_pred_clipped = K.clip(y_pred, K.epsilon(), None)

    first_log = K.log(y_pred_clipped + 1.)
    second_log = K.log(y_true_clipped + 1.)
    w_x = K.variable(np.array([[-1., 0., 1.],
                                [-1., 0., 1.],
                                [-1., 0., 1.]]).reshape(3, 3, 1, 1))

    grad_x_pred = K.conv2d(first_log, w_x, padding='same')
    grad_x_true = K.conv2d(second_log, w_x, padding='same')

    w_y = K.variable(np.array([[-1., -1., -1.],
                                [0., 0., 0.],
                                [1., 1., 1.]]).reshape(3, 3, 1, 1))

    grad_y_pred = K.conv2d(first_log, w_y, padding='same')
    grad_y_true = K.conv2d(second_log, w_y, padding='same')
    diff_x = grad_x_pred - grad_x_true
    diff_y = grad_y_pred - grad_y_true

    log_term = K.mean(K.square((first_log - second_log)), axis=-1)
    sc_inv_term = K.square(K.mean((first_log - second_log),axis=-1))
    grad_loss = K.mean(K.square(diff_x) + K.square(diff_y), axis=-1)

    return log_term - (0.5 * sc_inv_term) + grad_loss
Esempio n. 2
0
    def sensitivity(y_true, y_pred):
        y_pred_pos = K.round(K.clip(y_pred, 0, 1))
        y_pos = K.round(K.clip(y_true, 0, 1))
        tp = K.sum(y_pos * y_pred_pos)
        pos = K.sum(y_pos)

        return tp / (pos + K.epsilon())
Esempio n. 3
0
def root_mean_squared_logarithmic_loss(y_true, y_pred):
    y_true = tf.Print(y_true, [y_true], message='y_true', summarize=30)
    y_pred = tf.Print(y_pred, [y_pred], message='y_pred', summarize=30)
    first_log = K.log(K.clip(y_pred, K.epsilon(), None) + 1.)
    second_log = K.log(K.clip(y_true, K.epsilon(), None) + 1.)

    return K.sqrt(K.mean(K.square(first_log - second_log), axis=-1)+0.00001)
Esempio n. 4
0
    def call(self, inputs, **kwargs):
        assert isinstance(inputs, list) and len(inputs) == 3
        first, second, features = inputs[0], inputs[1], inputs[2]
        if not self.from_logits:
            first = kb.clip(first, 1e-10, 1.0)
            second = kb.clip(second, 1e-10, 1.0)
            first_, second_ = kb.log(first), kb.log(second)
        else:
            first_, second_ = first, second
        # embedded_features.shape = (M, T, 1)
        if self.use_intermediate_layer:
            features = kb.dot(features, self.first_kernel)
            features = kb.bias_add(features, self.first_bias, data_format="channels_last")
            features = self.intermediate_activation(features)
        embedded_features = kb.dot(features, self.features_kernel)
        embedded_features = kb.bias_add(
            embedded_features, self.features_bias, data_format="channels_last")
        if self.use_dimension_bias:
            tiling_shape = [1] * (kb.ndim(first)-1) + [kb.shape(first)[-1]]
            embedded_features = kb.tile(embedded_features, tiling_shape)
            embedded_features = kb.bias_add(
                embedded_features, self.dimensions_bias, data_format="channels_last")
        sigma = kb.sigmoid(embedded_features)

        result = weighted_sum(first_, second_, sigma,
                              self.first_threshold, self.second_threshold)
        probs = kb.softmax(result)
        if self.return_logits:
            return [probs, result]
        return probs
Esempio n. 5
0
    def specificity(y_true, y_pred):
        y_pred_neg = 1 - K.round(K.clip(y_pred, 0, 1))
        y_neg = 1 - K.round(K.clip(y_true, 0, 1))
        tn = K.sum(y_neg * y_pred_neg)
        neg = K.sum(y_neg)

        return tn / (neg + K.epsilon())
Esempio n. 6
0
def recall(y_true, y_pred):
    # Count positive samples.
    c1 = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
    c3 = K.sum(K.round(K.clip(y_true, 0, 1)))

    # How many relevant items are selected?
    return c1 / (c3  + smooth)
Esempio n. 7
0
def precision(y_true, y_pred):
    # Count positive samples.
    c1 = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
    c2 = K.sum(K.round(K.clip(y_pred, 0, 1)))

    # How many selected items are relevant?
    return c1 / (c2 + smooth)
Esempio n. 8
0
def binary_crossentropy_with_ranking(y_true, y_pred):
    """ Trying to combine ranking loss with numeric precision"""
    # first get the log loss like normal
    logloss = K.mean(K.binary_crossentropy(y_pred, y_true), axis=-1)

    # next, build a rank loss

    # clip the probabilities to keep stability
    y_pred_clipped = K.clip(y_pred, K.epsilon(), 1-K.epsilon())

    # translate into the raw scores before the logit
    y_pred_score = K.log(y_pred_clipped / (1 - y_pred_clipped))

    # determine what the maximum score for a zero outcome is
    y_pred_score_zerooutcome_max = K.max(y_pred_score * (y_true <1))

    # determine how much each score is above or below it
    rankloss = y_pred_score - y_pred_score_zerooutcome_max

    # only keep losses for positive outcomes
    rankloss = rankloss * y_true

    # only keep losses where the score is below the max
    rankloss = K.square(K.clip(rankloss, -100, 0))

    # average the loss for just the positive outcomes
    rankloss = K.sum(rankloss, axis=-1) / (K.sum(y_true > 0) + 1)

    # return (rankloss + 1) * logloss - an alternative to try
    return rankloss + logloss
Esempio n. 9
0
 def call(self, x, mask=None):
     sin = x[:, self.sin_idx : self.sin_idx + 1]
     cos = x[:, self.cos_idx : self.cos_idx + 1]
     eps = 1e-7
     scale = K.sqrt(1.0 / (eps + sin ** 2 + cos ** 2))
     sin_scaled = K.clip(scale * sin, -1, 1)
     cos_scaled = K.clip(scale * cos, -1, 1)
     return K.concatenate([x[:, : self.sin_idx], sin_scaled, cos_scaled, x[:, self.cos_idx + 1 :]], axis=1)
def label_reg_loss(y_true, y_pred):
  # KL-div
  y_true = K.clip(y_true, K.epsilon(), 1)
  y_pred = K.clip(y_pred, K.epsilon(), 1)

  y_true_mean = K.mean(y_true, axis=0)
  y_pred_mean = K.mean(y_pred, axis=0)
  return K.sum(y_true_mean * K.log(y_true_mean / y_pred_mean), axis=-1)
Esempio n. 11
0
def precision(y_true, y_pred):
    '''Calculates the precision, a metric for multi-label classification of
    how many selected items are relevant.
    '''
    true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
    predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
    precision = true_positives / (predicted_positives + K.epsilon())
    return precision
Esempio n. 12
0
def recall(y_true, y_pred):
    '''Calculates the recall, a metric for multi-label classification of
    how many relevant items are selected.
    '''
    true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
    possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
    recall = true_positives / (possible_positives + K.epsilon())
    return recall
Esempio n. 13
0
 def get_output(self, x, mask=None):
     sin = x[:, self.sin_idx]
     cos = x[:, self.cos_idx]
     eps = 1e7
     scale = K.sqrt(1/(eps + sin**2 + cos**2))
     sin = K.clip(scale*sin, -1, 1)
     cos = K.clip(scale*cos, -1, 1)
     return K.concatenate([x[:, :self.sin_idx], scale*sin, scale*cos,
                           x[:, self.cos_idx+1:]], axis=1)
def true_positive_rate(y_true, y_pred, mode='p'):
    threshold_value = 0.5
    if mode=='n':
        threshold_value=1-threshold_value
    # works as round() with threshold_value
    y_pred = K.cast(K.greater(K.clip(y_pred, 0, 1), threshold_value), K.floatx())
    true_positives = K.round(K.sum(K.clip(y_true * y_pred, 0, 1)))
    real_positives = K.sum(K.clip(y_true, 0, 1))
    return true_positives / (real_positives + K.epsilon())
Esempio n. 15
0
def precision(y_true, y_pred):
    """Precision metric.
    Only computes a batch-wise average of precision.
    Computes the precision, a metric for multi-label classification of
    how many selected items are relevant.
    """
    true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
    predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
    precision = true_positives / (predicted_positives + K.epsilon())
    return true_positives
Esempio n. 16
0
def recall(y_true, y_pred):
    """Recall metric.
    Only computes a batch-wise average of recall.
    Computes the recall, a metric for multi-label classification of
    how many relevant items are selected.
    """
    true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
    possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
    recall = true_positives / (possible_positives + K.epsilon())
    return recall
Esempio n. 17
0
def f1(y_true, y_pred):
    y_true_f = KB.flatten(y_true)
    y_pred_f = KB.flatten(y_pred)
    true_positives = KB.sum(KB.round(KB.clip(y_true_f * y_pred_f, 0, 1)), axis=-1)
    possible_positives = KB.sum(KB.round(KB.clip(y_true_f, 0, 1)), axis=-1)
    recall = true_positives / (possible_positives + KB.epsilon())
    predicted_positives = KB.sum(KB.round(KB.clip(y_pred_f, 0, 1)), axis=-1)
    precision = true_positives / (predicted_positives + KB.epsilon())

    return 2*((precision*recall)/(precision+recall+KB.epsilon()))
Esempio n. 18
0
    def dice(self, y_true, y_pred):
        """
        compute dice for given Tensors

        """
        if self.crop_indices is not None:
            y_true = utils.batch_gather(y_true, self.crop_indices)
            y_pred = utils.batch_gather(y_pred, self.crop_indices)

        if self.input_type == 'prob':
            # We assume that y_true is probabilistic, but just in case:
            y_true /= K.sum(y_true, axis=-1, keepdims=True)
            y_true = K.clip(y_true, K.epsilon(), 1)

            # make sure pred is a probability
            y_pred /= K.sum(y_pred, axis=-1, keepdims=True)
            y_pred = K.clip(y_pred, K.epsilon(), 1)

        # Prepare the volumes to operate on
        # If we're doing 'hard' Dice, then we will prepare one-hot-based matrices of size
        # [batch_size, nb_voxels, nb_labels], where for each voxel in each batch entry,
        # the entries are either 0 or 1
        if self.dice_type == 'hard':

            # if given predicted probability, transform to "hard max""
            if self.input_type == 'prob':
                if self.approx_hard_max:
                    y_pred_op = _hard_max(y_pred, axis=-1)
                    y_true_op = _hard_max(y_true, axis=-1)
                else:
                    y_pred_op = _label_to_one_hot(K.argmax(y_pred, axis=-1), self.nb_labels)
                    y_true_op = _label_to_one_hot(K.argmax(y_true, axis=-1), self.nb_labels)

            # if given predicted label, transform to one hot notation
            else:
                assert self.input_type == 'max_label'
                y_pred_op = _label_to_one_hot(y_pred, self.nb_labels)
                y_true_op = _label_to_one_hot(y_true, self.nb_labels)

        # If we're doing soft Dice, require prob output, and the data already is as we need it
        # [batch_size, nb_voxels, nb_labels]
        else:
            assert self.input_type == 'prob', "cannot do soft dice with max_label input"
            y_pred_op = y_pred
            y_true_op = y_true

        # compute dice for each entry in batch.
        # dice will now be [batch_size, nb_labels]
        sum_dim = 1
        top = 2 * K.sum(y_true_op * y_pred_op, sum_dim)
        bottom = K.sum(K.square(y_true_op), sum_dim) + K.sum(K.square(y_pred_op), sum_dim)
        # make sure we have no 0s on the bottom. K.epsilon()
        bottom = K.maximum(bottom, self.area_reg)
        return top / bottom
Esempio n. 19
0
def precision_K(y_true, y_pred):
    """
    Calculate precision for keras tensors
    Args:
        y_true: true labels
        y_pred: predicted labels

    Returns:
        precision
    """
    true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
    predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
    precision = true_positives / (predicted_positives + K.epsilon())
    return precision
Esempio n. 20
0
def recall_K(y_true, y_pred):
    """
    Calculate recall for keras tensors
    Args:
        y_true: true labels
        y_pred: predicted labels

    Returns:
        recall
    """
    true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
    possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
    recall = true_positives / (possible_positives + K.epsilon())
    return recall
Esempio n. 21
0
    def loss(self, y_true, y_pred):
        """ categorical crossentropy loss """

        if self.crop_indices is not None:
            y_true = utils.batch_gather(y_true, self.crop_indices)
            y_pred = utils.batch_gather(y_pred, self.crop_indices)

        if self.use_float16:
            y_true = K.cast(y_true, 'float16')
            y_pred = K.cast(y_pred, 'float16')

        # scale and clip probabilities
        # this should not be necessary for softmax output.
        y_pred /= K.sum(y_pred, axis=-1, keepdims=True)
        y_pred = K.clip(y_pred, K.epsilon(), 1)

        # compute log probability
        log_post = K.log(y_pred)  # likelihood

        # loss
        loss = - y_true * log_post

        # weighted loss
        if self.weights is not None:
            loss *= self.weights

        if self.vox_weights is not None:
            loss *= self.vox_weights

        # take the total loss
        # loss = K.batch_flatten(loss)
        mloss = K.mean(K.sum(K.cast(loss, 'float32'), -1))
        tf.verify_tensor_all_finite(mloss, 'Loss not finite')
        return mloss
Esempio n. 22
0
 def loss(y_true, y_pred):
     y_true = denormalize(y_true, y_mean, y_std)
     y_pred = denormalize(y_pred, y_mean, y_std)
     diff = K.abs((y_true - y_pred) / K.clip(K.abs(y_true),
                                             K.epsilon(),
                                             None))
     return 100. * K.mean(diff, axis=-1)
Esempio n. 23
0
def sens(y_true, y_pred):

	y_pred_pos = K.round(K.clip(y_pred, 0, 1))
    	y_pred_neg = 1 - y_pred_pos

   	y_pos = K.round(K.clip(y_true, 0, 1))
   	y_neg = 1 - y_pos

   	tp = K.sum(y_pos * y_pred_pos)
   	tn = K.sum(y_neg * y_pred_neg)

  	fp = K.sum(y_neg * y_pred_pos)
  	fn = K.sum(y_pos * y_pred_neg)

	se = tp / (tp + fn)
	return se
Esempio n. 24
0
def fbeta_score(y_true, y_pred, beta=1):
    '''Calculates the F score, the weighted harmonic mean of precision and recall.
    This is useful for multi-label classification, where input samples can be
    classified as sets of labels. By only using accuracy (precision) a model
    would achieve a perfect score by simply assigning every class to every
    input. In order to avoid this, a metric should penalize incorrect class
    assignments as well (recall). The F-beta score (ranged from 0.0 to 1.0)
    computes this, as a weighted mean of the proportion of correct class
    assignments vs. the proportion of incorrect class assignments.
    With beta = 1, this is equivalent to a F-measure. With beta < 1, assigning
    correct classes becomes more important, and with beta > 1 the metric is
    instead weighted towards penalizing incorrect class assignments.
    '''
    if beta < 0:
        raise ValueError('The lowest choosable beta is zero (only precision).')

    # If there are no true positives, fix the F score at 0 like sklearn.
    if K.sum(K.round(K.clip(y_true, 0, 1))) == 0:
        return 0

    p = precision(y_true, y_pred)
    r = recall(y_true, y_pred)
    bb = beta ** 2
    fbeta_score = (1 + bb) * (p * r) / (bb * p + r + K.epsilon())
    return fbeta_score
Esempio n. 25
0
def spec(y_true, y_pred):

	y_pred_pos = K.round(K.clip(y_pred, 0, 1))
    	y_pred_neg = 1 - y_pred_pos

   	y_pos = K.round(K.clip(y_true, 0, 1))
   	y_neg = 1 - y_pos

   	tp = K.sum(y_pos * y_pred_pos)
   	tn = K.sum(y_neg * y_pred_neg)

  	fp = K.sum(y_neg * y_pred_pos)
  	fn = K.sum(y_pos * y_pred_neg)

	sp = tn / (fp + tn)
	return sp
Esempio n. 26
0
def precision(y_true, y_pred):
    """
    Precision metric.

    Only computes a batch-wise average of precision.

    Computes the precision, a metric for multi-label classification of
    how many selected items are relevant.

    Source
    ------
    https://github.com/fchollet/keras/issues/5400#issuecomment-314747992
    """
    true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
    predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
    precision = true_positives / (predicted_positives + K.epsilon())
    return precision
Esempio n. 27
0
def jaccard_coef_int(y_true, y_pred):
    # __author__ = Vladimir Iglovikov
    y_pred_pos = K.round(K.clip(y_pred, 0, 1))
    intersection = K.sum(y_true * y_pred_pos, axis=[0, -1, -2])
    sum_ = K.sum(y_true + y_pred_pos, axis=[0, -1, -2])
    #sum_ = K.sum(y_true + y_pred, axis=[0, -1, -2])
    jac = (intersection + smooth) / (sum_ - intersection + smooth)
    return K.mean(jac)
Esempio n. 28
0
 def get_gradients(self, loss, params):
     grads = K.gradients(loss, params)
     if hasattr(self, 'clipnorm') and self.clipnorm > 0:
         norm = K.sqrt(sum([K.sum(K.square(g)) for g in grads]))
         grads = [clip_norm(g, self.clipnorm, norm) for g in grads]
     if hasattr(self, 'clipvalue') and self.clipvalue > 0:
         grads = [K.clip(g, -self.clipvalue, self.clipvalue) for g in grads]
     return grads
Esempio n. 29
0
    def compute_sigma_reg(self, y_true, y_pred):
        if self.logvar_map is not None:
            logvar_map = self.logvar_map
        elif self.var_map is not None:
            logvar_map = K.log(self.var_map + 1e-8)

        # we will scale later to K.sum
        return  0.5 * K.clip(logvar_map, -100, 100) 
Esempio n. 30
0
def metrics_mape(rate_true, rate_pred):
    if args.norm_ans:
        rate_true = denormalize(rate_true, rate_mean, rate_std)
        rate_pred = denormalize(rate_pred, rate_mean, rate_std)
    diff = K.abs((rate_true - rate_pred) / K.clip(K.abs(rate_true),
					    K.epsilon(),
					    None))
    return 100. * K.mean(diff, axis=-1)
Esempio n. 31
0
def kl_dist(vects):
    qry_vec, doc_vec = vects
    qry_vec = K.clip(qry_vec, K.epsilon(), 1)
    doc_vec = K.clip(doc_vec, K.epsilon(), 1)
    dist = K.batch_dot(-qry_vec, K.log(doc_vec), 1)
    return dist
Esempio n. 32
0
def kullback_leibler_divergence_(y_true, y_pred):
    y_true = K.clip(y_true, K.epsilon(), 1)
    y_pred = K.clip(y_pred, K.epsilon(), 1)
    return K.mean(K.sum(y_true * K.log(y_true / y_pred), axis=(1, 2, 3)),
                  axis=-1)
Esempio n. 33
0
def precision_m(y_true, y_pred):
    true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
    predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
    precision = true_positives / (predicted_positives + K.epsilon())
    return precision
Esempio n. 34
0
        def custom_loss(y_true, y_pred):
            out = K.clip(y_pred, 1e-8, 1 - 1e-8)
            log_lik = y_true * K.log(out)

            return K.sum(-log_lik * delta)
Esempio n. 35
0
def false_negative_rate(y_true, y_pred):
    y_pred_neg = 1 - K.round(K.clip(y_pred, 0, 1))
    y_pos = K.round(K.clip(y_true, 0, 1))
    false_negatives = K.sum(y_pos * y_pred_neg) / K.sum(y_pos + K.epsilon())
    return false_negatives
 def mean_squared_logarithmic_error(y_true, y_pred):
     first_log = K.log(K.clip(y_pred, K.epsilon(), None) + 1.)
     second_log = K.log(K.clip(y_true, K.epsilon(), None) + 1.)
     return K.mean(K.square(first_log - second_log), axis=-1)
Esempio n. 37
0
 def __call__(self, p):
     return K.clip(p, -self.c, self.c)
Esempio n. 38
0
 def recall(y_true, y_pred):
     true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
     possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
     recall = true_positives / (possible_positives + K.epsilon())
     return recall
Esempio n. 39
0
def seq_binary_entropy_loss(y_true, y_pred):
    y_pred = K.clip(y_pred, 1e-6, 1 - 1e-6)
    return -K.sum(y_true * K.log(y_pred) + (1 - y_true) * K.log(1 - y_pred),
                  axis=-1)
Esempio n. 40
0
def sensitivity(y_true, y_pred):
    true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
    possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
    return true_positives / (possible_positives + K.epsilon())
Esempio n. 41
0
def true_positives(y_true, y_pred):
    y_pred_pos = K.round(K.clip(y_pred, 0, 1))
    y_pos = K.round(K.clip(y_true, 0, 1))
    true_positives = K.sum(y_pos * y_pred_pos)
    return true_positives
Esempio n. 42
0
 def sampling(args):
     z_mean, z_log_var = args
     epsilon = K.random_normal(shape=(batch_size, latent_dim), mean=0., stddev=1.0)
     return z_mean + K.exp(K.clip(z_log_var/2, -2, 2)) * epsilon
Esempio n. 43
0
def constrainedCrossEntropy(ytrue, ypred):
    ypred = T.clip(ypred, 0.001, 0.9999)
    return losses.categorical_crossentropy(ytrue, ypred)
 def true_pos(yt, yp):
     return K.sum(K.round(yt)) / K.sum(K.clip(yt, 1, 1))
Esempio n. 45
0
def true_positive_rate(y_true, y_pred):
    y_pred_pos = K.round(K.clip(y_pred, 0, 1))
    y_pos = K.round(K.clip(y_true, 0, 1))
    true_positives = K.sum(y_pos * y_pred_pos) / K.sum(y_pos + K.epsilon())
    return true_positives
 def categorical_focal_loss_fixed(y_true, y_pred):
   epsilon = K.epsilon()
   y_pred = K.clip(y_pred, epsilon, 1. - epsilon)
   cross_entropy = -y_true * K.log(y_pred)
   loss = alpha * K.pow(1 - y_pred, gamma) * cross_entropy
   return K.mean(K.sum(loss, axis=-1))
Esempio n. 47
0
 def __call__(self, weights):
     return backend.clip(weights, -self.clip_value, self.clip_value)
Esempio n. 48
0
def false_negatives(y_true, y_pred):
    y_pred_neg = 1 - K.round(K.clip(y_pred, 0, 1))
    y_pos = K.round(K.clip(y_true, 0, 1))
    false_negatives = K.sum(y_pos * y_pred_neg)
    return false_negatives
Esempio n. 49
0
def true_negative_rate(y_true, y_pred):
    y_pred_neg = 1 - K.round(K.clip(y_pred, 0, 1))
    y_neg = 1 - K.round(K.clip(y_true, 0, 1))
    true_negatives = K.sum(y_neg * y_pred_neg) / K.sum(y_neg + K.epsilon())
    return true_negatives
 def pred_pos(yt, yp):
     return K.sum(K.round(yp)) / K.sum(K.clip(yt, 1, 1))
Esempio n. 51
0
def mape_custom(y_true, y_pred):
    diff = K.abs((y_true - y_pred) / K.clip(K.abs(y_true), K.epsilon(), None))
    return 100. * K.mean(diff, axis=[1, 2, 3])
 def loss(y_true, y_pred):
   y_pred /= K.sum(y_pred, axis=-1, keepdims=True)
   y_pred = K.clip(y_pred, K.epsilon(), 1 - K.epsilon())
   loss = y_true * K.log(y_pred) * weights
   loss = -K.sum(loss, -1)
   return loss
Esempio n. 53
0
 def __call__(self, p):
     return K.clip(p, self.min_value, self.max_value)
Esempio n. 54
0
def _loss_generator(y_true, y_pred):
    y_pred = K.clip(y_pred, _EPSILON, 1.0 - _EPSILON)
    out = -(K.log(y_pred))
    return K.mean(out, axis=-1)
Esempio n. 55
0
def _hard_sigmoid(x):
    x = (0.5 * x) + 0.5
    return K.clip(x, 0, 1)
Esempio n. 56
0
def custom_loss(y_true, y_pred):
    out = K.clip(y_pred, 1e-8, 1 - 1e-8)  # set boundary
    log_lik = y_true * K.log(out)  # policy gradient
    return K.sum(-log_lik * delta)
Esempio n. 57
0
def abs_KL_div(y_true, y_pred):
    y_true = K.clip(y_true, K.epsilon(), None)
    y_pred = K.clip(y_pred, K.epsilon(), None)
    #    return K.sum( K.abs( (y_true- y_pred) * (K.log(y_true / y_pred))), axis=-1)
    return K.sum((y_true - y_pred) * (K.log(y_true / y_pred)), axis=-1)
Esempio n. 58
0
 def n_norm(self, x, epsilon=1e-6):
     return K.pow(K.clip(K.sum(K.pow(x, self.n), -1), epsilon, 1 - epsilon),
                  1. / self.n)
Esempio n. 59
0
    def get_updates(self, loss, params):
        grads = self.get_gradients(loss, params)

        # first update the number of iterations
        self.updates = [K.update_add(self.iterations, 1)]

        if self.decay_epochs:
            ite_casted = K.cast(self.iterations, K.dtype(self.decay_epochs))
            hit_decay_epoch = K.any(K.equal(ite_casted, self.decay_epochs))

            #print(hit_decay_epoch)
            lr = K.switch(hit_decay_epoch, self.lr['all'] * self.decay['all'],
                          self.lr['all'])

            K.print_tensor(self.lr['all'])
            #a = K.switch(hit_decay_epoch,
            #             K.print_tensor(self.lr['all'],message='Decays:'),
            #             K.print_tensor(self.lr['all'],message=' '))

            self.updates.append(K.update(self.lr['all'], lr))

        shapes = [K.int_shape(p) for p in params]
        moments = [K.zeros(s) for s in shapes]
        self.weights = [self.iterations] + moments
        #print(self.weights)

        for p, g, m in zip(params, grads, moments):
            #print("HEREEEE:", p.name, g, m)
            if p.name in self.lr.keys():
                if self.verbose > 0:
                    print("Setting different learning rate for ", p.name,
                          " : ", K.eval(self.lr[p.name]))
                lr = self.lr[p.name]
                if self.decay_epochs and p.name in self.decay.keys():
                    lr = K.switch(hit_decay_epoch,
                                  self.lr[p.name] * self.decay[p.name],
                                  self.lr[p.name])
                    self.updates.append(K.update(self.lr[p.name], lr))
                    if self.verbose > 0:
                        print("Added decay to ", p.name, ": ", K.eval(lr), ",",
                              self.decay[p.name])
                elif self.decay_epochs:
                    lr = K.switch(hit_decay_epoch,
                                  self.lr[p.name] * self.decay['all'],
                                  self.lr[p.name])
                    self.updates.append(K.update(self.lr[p.name], lr))
                    if self.verbose > 0:
                        print("Added decay to ", p.name, ": ", K.eval(lr), ",",
                              self.decay['all'])
                else:
                    lr = self.lr[p.name]

            else:
                lr = self.lr['all']

            if p.name in self.momentum.keys():
                if self.verbose > 0:
                    print("Setting different momentum for ", p.name, " , ",
                          K.eval(self.momentum[p.name]))
                momentum = self.momentum[p.name]
            else:
                momentum = self.momentum['all']

            v = momentum * m - lr * g  # velocity
            self.updates.append(K.update(m, v))

            if self.nesterov:
                new_p = p + momentum * (momentum * m - lr * g) - lr * g
            else:
                new_p = p + momentum * m - lr * g

            # Apply constraints.
            if getattr(p, 'constraint', None) is not None:
                new_p = p.constraint(new_p)

            if self.clips_val and (p.name in self.clips.keys()):
                if self.verbose > 0:
                    print("Clipping variable", p.name, " to ",
                          self.clips[p.name])
                c = K.eval(self.clips[p.name])
                new_p = K.clip(new_p, c[0], c[1])
            #print("updates for ", p.name, " lr: ", K.eval(lr), " mom:", K.eval(momentum))
            self.updates.append(K.update(p, new_p))
        return self.updates
Esempio n. 60
0
def true_negatives(y_true, y_pred):
    y_pred_neg = 1 - K.round(K.clip(y_pred, 0, 1))
    y_neg = 1 - K.round(K.clip(y_true, 0, 1))
    true_negatives = K.sum(y_neg * y_pred_neg)
    return true_negatives