Exemple #1
0
def logloss(y_true, y_pred):
  y_pred = ops.convert_to_tensor(y_pred)
  y_true = math_ops.cast(y_true, y_pred.dtype)
  losses = math_ops.multiply(y_true, math_ops.log(y_pred + K.epsilon()))
  losses += math_ops.multiply((1 - y_true),
                              math_ops.log(1 - y_pred + K.epsilon()))
  return K.mean(-losses, axis=-1)
Exemple #2
0
 def __call__(self, w):
   norms = K.sqrt(
       math_ops.reduce_sum(math_ops.square(w), axis=self.axis, keepdims=True))
   desired = (
       self.rate * K.clip(norms, self.min_value, self.max_value) +
       (1 - self.rate) * norms)
   return w * (desired / (K.epsilon() + norms))
def recall(y_true, y_pred):
    """Recall metric.
    Only computes a batch-wise average of recall. Computes the recall, a metric
    for multi-label classification of how many relevant items are selected.
    """
    true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
    possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
    recall = true_positives / (possible_positives + K.epsilon())
    return recall
 def __call__(self, w):
     norms = backend.sqrt(
         math_ops.reduce_sum(math_ops.square(w),
                             axis=self.axis,
                             keepdims=True))
     desired = (
         self.rate * backend.clip(norms, self.min_value, self.max_value) +
         (1 - self.rate) * norms)
     return w * (desired / (backend.epsilon() + norms))
def outer_distance_transform_2d(mask,
                                bins=None,
                                erosion_width=None,
                                normalize=True):
    """Transform a label mask with an outer distance transform.

    Args:
        mask (numpy.array): A label mask (y data).
        bins (int): The number of transformed distance classes. If none,
            returns the continuous outer transform.
        erosion_width (int): Number of pixels to erode edges of each labels
        normalize (boolean): Normalize the transform of each cell by that
            cell's largest distance.

    Returns:
        numpy.array: A mask of same shape as input mask,
            with each label being a distance class from 1 to bins.
    """
    mask = np.squeeze(mask)  # squeeze the channels
    mask = erode_edges(mask, erosion_width)

    distance = ndimage.distance_transform_edt(mask)
    distance = distance.astype(K.floatx())  # normalized distances are floats

    if normalize:
        # uniquely label each cell and normalize the distance values
        # by that cells maximum distance value
        label_matrix = label(mask)
        for prop in regionprops(label_matrix):
            labeled_distance = distance[label_matrix == prop.label]
            normalized_distance = labeled_distance / np.amax(labeled_distance)
            distance[label_matrix == prop.label] = normalized_distance

    if bins is None:
        return distance

    # bin each distance value into a class from 1 to bins
    min_dist = np.amin(distance)
    max_dist = np.amax(distance)
    distance_bins = np.linspace(min_dist - K.epsilon(),
                                max_dist + K.epsilon(),
                                num=bins + 1)
    distance = np.digitize(distance, distance_bins, right=True)
    return distance - 1  # minimum distance should be 0, not 1
Exemple #6
0
    def __init__(self,
                 learning_rate=0.001,
                 beta_1=0.9,
                 beta_2=0.999,
                 epsilon=1e-7,
                 weight_decay=0.,
                 amsgrad=False,
                 total_steps=0,
                 warmup_proportion=0.1,
                 min_lr=0.,
                 name='RAdam',
                 **kwargs):
        r"""Construct a new Adam optimizer.

        Args:
            learning_rate: A Tensor or a floating point value.    The learning rate.
            beta_1: A float value or a constant float tensor. The exponential decay
                rate for the 1st moment estimates.
            beta_2: A float value or a constant float tensor. The exponential decay
                rate for the 2nd moment estimates.
            epsilon: A small constant for numerical stability. This epsilon is
                "epsilon hat" in the Kingma and Ba paper (in the formula just before
                Section 2.1), not the epsilon in Algorithm 1 of the paper.
            weight_decay: A floating point value. Weight decay for each param.
            amsgrad: boolean. Whether to apply AMSGrad variant of this algorithm from
                the paper "On the Convergence of Adam and beyond".
            total_steps: An integer. Total number of training steps.
                Enable warmup by setting a positive value.
            warmup_proportion: A floating point value. The proportion of increasing steps.
            min_lr: A floating point value. Minimum learning rate after warmup.
            name: Optional name for the operations created when applying gradients.
                Defaults to "Adam".    @compatibility(eager) When eager execution is
                enabled, `learning_rate`, `beta_1`, `beta_2`, and `epsilon` can each be
                a callable that takes no arguments and returns the actual value to use.
                This can be useful for changing these values across different
                invocations of optimizer functions. @end_compatibility
            **kwargs: keyword arguments. Allowed to be {`clipnorm`, `clipvalue`, `lr`,
                `decay`}. `clipnorm` is clip gradients by norm; `clipvalue` is clip
                gradients by value, `decay` is included for backward compatibility to
                allow time inverse decay of learning rate. `lr` is included for backward
                compatibility, recommended to use `learning_rate` instead.
        """

        super(RAdam, self).__init__(name, **kwargs)
        self._set_hyper('learning_rate', kwargs.get('lr', learning_rate))
        self._set_hyper('beta_1', beta_1)
        self._set_hyper('beta_2', beta_2)
        self._set_hyper('decay', self._initial_decay)
        self._set_hyper('weight_decay', weight_decay)
        self._set_hyper('total_steps', float(total_steps))
        self._set_hyper('warmup_proportion', warmup_proportion)
        self._set_hyper('min_lr', min_lr)
        self.epsilon = epsilon or K.epsilon()
        self.amsgrad = amsgrad
        self._initial_weight_decay = weight_decay
        self._initial_total_steps = total_steps
    def call(self, x):
        print(x)
        features_dim = x.shape[-1].value
        step_dim = x.shape[-2].value
        # print(K.reshape(self.kernel, (-1, features_dim)))  # n, d
        # print(K.reshape(self.W, (features_dim, 1)))  # w= dx1
        # print(K.dot(K.reshape(self.kernel, (-1, features_dim)), K.reshape(self.W, (features_dim, 1))))  # nx1

        eij = K.reshape(
            K.dot(K.reshape(self.kernel, (-1, features_dim)),
                  K.reshape(self.W, (features_dim, 1))),
            (-1, step_dim + self.windows))
        print(eij)

        eij += self.b

        eij = K.tanh(eij)

        a = K.exp(eij)
        a = K.reshape(a, (step_dim + self.windows, 1))
        print(a)

        temp = a[0:self.windows, ]
        print(temp)
        temp /= K.cast(
            K.sum(temp, axis=0, keepdims=True) + K.epsilon(), K.floatx())

        weighted_input = self.kernel[0:self.windows, ] * temp
        alltemp = K.sum(weighted_input, axis=0, keepdims=True)

        for i in range(self.windows // 2 + 1, step_dim + self.windows // 2):
            temp = a[i - self.windows // 2:i + self.windows // 2, ]
            temp /= K.cast(
                K.sum(temp, axis=0, keepdims=True) + K.epsilon(), K.floatx())
            weighted_input = self.kernel[i - self.windows // 2:i +
                                         self.windows // 2, ] * temp
            temp = K.sum(weighted_input, axis=0, keepdims=True)
            alltemp = keras.layers.concatenate([alltemp, temp], 0)

        print(alltemp)

        alltemp = keras.activations.tanh(alltemp)
        return x + alltemp
 def __init__(self, lr=0.01, epsilon=None, decay=0., **kwargs):
   super(TruncatedAdagrad, self).__init__(**kwargs)
   with K.name_scope(self.__class__.__name__):
     self.lr = K.variable(lr, name='lr')
     self.decay = K.variable(decay, name='decay')
     self.iterations = K.variable(0, dtype='int64', name='iterations')
   if epsilon is None:
     epsilon = K.epsilon()
   self.epsilon = epsilon
   self.initial_decay = decay
Exemple #9
0
def precision(y_true, y_pred):
    """Precision metric.
    Only computes a batch-wise average of precision. Computes the precision, a
    metric for multi-label classification of how many selected items are
    relevant.
    """
    true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
    predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
    precision = true_positives / (predicted_positives + K.epsilon())
    return precision
Exemple #10
0
def invert(grads):
    """Inverts the gradients.

    Args:
        grads: A numpy array of grads to use.

    Returns:
        The inverted gradients.
    """
    return 1. / (grads + K.epsilon())
Exemple #11
0
def _rbox_aabb_loss(ground_truth_aabb, predicted_aabb, EPS=K.epsilon()):
    ground_truth_area = _aabb_box_area(ground_truth_aabb)
    predicted_area = _aabb_box_area(predicted_aabb)

    intersected_area = _aabb_intersected_area(ground_truth_aabb,
                                              predicted_aabb)
    union_area = ground_truth_area + predicted_area - intersected_area

    # Equivalent to -log(intersected_area / union_area)
    return K.log(union_area + EPS) - K.log(intersected_area + EPS)
Exemple #12
0
    def root_mean_squared_error(y_true, y_pred):
        """A simple Keras implementation of R^2 that can be used as a Keras
             loss function.

             Since `score` uses R^2, it is
             advisable to use the same loss/metric when optimizing the model.
        """
        ss_res = K.sum(K.square(y_true - y_pred), axis=0)
        ss_tot = K.sum(K.square(y_true - K.mean(y_true, axis=0)), axis=0)
        return K.mean(1 - ss_res / (ss_tot + K.epsilon()), axis=-1)
Exemple #13
0
 def _update_s_matrix_stats(self, num_of_complex_params_t, s):
     if not self.add_s_matrix_stats:
         return tf.no_op(), tf.no_op()
     abs_eigvals = tf.math.abs(tf.linalg.eigvalsh(s))
     tol = K.epsilon() * tf.cast(num_of_complex_params_t, abs_eigvals.dtype) * tf.math.reduce_max(
         tf.math.abs(s))  # see https://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.matrix_rank.html
     filtered_eigvals = tf.boolean_mask(abs_eigvals, abs_eigvals > tol)
     updated_s_matrix_rank = K.update(self.s_matrix_rank, tf.count_nonzero(filtered_eigvals))
     updated_s_matrix_min_eigval = K.update(self.s_matrix_min_eigval, tf.math.reduce_min(filtered_eigvals))
     return updated_s_matrix_min_eigval, updated_s_matrix_rank
Exemple #14
0
 def __init__(self, lr=0.01, epsilon=None, decay=0., **kwargs):
   super(Adagrad, self).__init__(**kwargs)
   with K.name_scope(self.__class__.__name__):
     self.lr = K.variable(lr, name='lr')
     self.decay = K.variable(decay, name='decay')
     self.iterations = K.variable(0, dtype='int64', name='iterations')
   if epsilon is None:
     epsilon = K.epsilon()
   self.epsilon = epsilon
   self.initial_decay = decay
Exemple #15
0
def f1_score(y_true, y_pred):
    """Computes the F1 Score
    Only computes a batch-wise average of recall. Computes the recall, a metric
    for multi-label classification of how many relevant items are selected.
    """
    #     print(y_true, y_pred)
    #     print(y_true.shape, y_pred.shape)
    p = precision(y_true, y_pred)
    r = recall(y_true, y_pred)
    return (2 * p * r) / (p + r + K.epsilon())
def normalize(x):
    """utility function to normalize a tensor.

    # Arguments
        x: An input tensor.

    # Returns
        The normalized input tensor.
    """
    return x / (K.sqrt(K.mean(K.square(x))) + K.epsilon())
    def ssd_loss(y_true, y_pred):
        num_classes = 11 # tf.shape(y_true)[2] - 4 # openCV에서 동적 shape를 지원안함.
        y_true = tf.reshape(y_true, [-1, num_classes + 4])
        y_pred = tf.reshape(y_pred, [-1, num_classes + 4])
        eps = K.epsilon()

        # Split Classification and Localization output
        y_true_clf, y_true_loc = tf.split(y_true, [num_classes, 4], axis=-1)
        y_pred_clf, y_pred_loc = tf.split(y_pred, [num_classes, 4], axis=-1)

        # split foreground & background
        mask = y_true_clf[:, -1]
        if ignore_match:
            # ignore match의 경우
            # y_true_clf[:, -1]의 값이 {0,1}이 아닌 다른 값으로 채워져 있음
            # y_true_clf의 경우, 이후 softmax 계산할 때 값이 {0,1}사이에 매칭되지 않은 경우,
            # NaN을 반환할 수 있어, y_true_clf 내 ignore match된 값들을 다시 1로 바꾸어줌
            neg_mask = tf.where(tf.equal(mask, 1.),
                                tf.ones_like(mask),
                                tf.zeros_like(mask))
            pos_mask = tf.where(tf.equal(mask, 0.),
                                tf.ones_like(mask),
                                tf.zeros_like(mask))
            y_true_clf = tf.where(tf.not_equal(y_true_clf, 0),
                                  tf.ones_like(y_true_clf),
                                  tf.zeros_like(y_true_clf))
        else:
            neg_mask = mask
            pos_mask = 1 - mask
        num_pos = tf.reduce_sum(pos_mask)
        num_neg = tf.reduce_sum(neg_mask)
        num_neg = tf.minimum(pos_neg_ratio * num_pos, num_neg)

        # softmax loss
        y_pred_clf = K.clip(y_pred_clf, eps, 1. - eps)
        clf_loss = -tf.reduce_sum(y_true_clf * tf.log(y_pred_clf),
                                  axis=-1)
        pos_clf_loss = tf.reduce_sum(clf_loss * pos_mask) / (num_pos + eps)
        neg_clf_loss = clf_loss * neg_mask
        values, indices = tf.nn.top_k(neg_clf_loss,
                                      k=tf.cast(num_neg, tf.int32))
        neg_clf_loss = tf.reduce_sum(values) / (num_neg + eps)

        clf_loss = pos_clf_loss + neg_clf_loss
        # smooth l1 loss
        l1_loss = tf.abs(y_true_loc - y_pred_loc)
        l2_loss = 0.5 * (y_true_loc - y_pred_loc) ** 2
        loc_loss = tf.where(tf.less(l1_loss, 1.0),
                            l2_loss,
                            l1_loss - 0.5)
        loc_loss = tf.reduce_sum(loc_loss, axis=-1)
        loc_loss = tf.reduce_sum(loc_loss * pos_mask) / (num_pos + eps)

        # total loss
        return clf_loss + alpha * loc_loss
Exemple #18
0
 def __init__(self, lr=1.0, rho=0.95, epsilon=None, decay=0., **kwargs):
     super(Adadelta, self).__init__(**kwargs)
     with K.name_scope(self.__class__.__name__):
         self.lr = K.variable(lr, name='lr')
         self.decay = K.variable(decay, name='decay')
         self.iterations = K.variable(0, dtype='int64', name='iterations')
     if epsilon is None:
         epsilon = K.epsilon()
     self.rho = rho
     self.epsilon = epsilon
     self.initial_decay = decay
Exemple #19
0
def whale_siamese_image_mean_np(img: np.ndarray) -> np.ndarray:
    """
        Test Pass, equal to `whale_siamese_image_mean_tf`
    Args:
        img:

    Returns:

    """
    img -= np.mean(img, keepdims=True)
    img /= np.std(img, keepdims=True) + K.epsilon()
    return img
Exemple #20
0
    def root_mean_squared_error(y_true, y_pred):
        """A simple Keras implementation of R^2 that can be used as a Keras
        loss function.

        Since ScikitLearn's `score` uses R^2 by default, it is
        advisable to use the same loss/metric when optimizing the model.
        """
        ss_res = k_backend.sum(k_backend.square(y_true - y_pred), axis=0)
        ss_tot = k_backend.sum(
            k_backend.square(y_true - k_backend.mean(y_true, axis=0)), axis=0)
        return k_backend.mean(1 - ss_res / (ss_tot + k_backend.epsilon()),
                              axis=-1)
Exemple #21
0
def call(inputs, mask=None):
    steps_axis = 1
    if mask is not None:
        mask = math_ops.cast(mask, backend.floatx())
        input_shape = inputs.shape.as_list()
        broadcast_shape = [-1, input_shape[steps_axis], 1]
        mask = array_ops.reshape(mask, broadcast_shape)
        inputs *= mask
        return backend.sum(inputs, axis=steps_axis) / (
            math_ops.reduce_sum(mask, axis=steps_axis) + backend.epsilon())
    else:
        return backend.mean(inputs, axis=steps_axis)
Exemple #22
0
def artifact_precision(y_true, y_pred):
    weights = y_true[:, :, :, :, 2]

    mask = tf.equal(weights, 1)
    mask_true = tf.boolean_mask(y_true[:, :, :, :, 2], mask)
    mask_pred = tf.boolean_mask(1 - y_pred[:, :, :, :, 0], mask)

    true_positives = K.sum(K.round(K.clip(mask_true * mask_pred, 0, 1)))
    predicted_positives = K.sum(K.round(K.clip(mask_pred, 0, 1)))

    precision = true_positives / (predicted_positives + K.epsilon())

    return precision
def r2_score(y_true, y_pred):
    """
  Adds coefficient of determination metric
  :param y_true: The ground truth output tensorr, same dimensions as 'labels'
  :param y_pred: The output from the neural network
  :return: Weighted loss float Tensor
  """
    '''
  https://jmlb.github.io/ml/2017/03/20/CoeffDetermination_CustomMetric4Keras/
  '''
    SS_res = K.sum(K.square(y_true - y_pred))
    SS_tot = K.sum(K.square(y_true - K.mean(y_true)))
    return 1 - SS_res / (SS_tot + K.epsilon())
 def __init__(self, lr=0.001, rho=0.9, epsilon=None, decay=0., **kwargs):
     super(RMSprop, self).__init__(**kwargs)
     with backend.name_scope(self.__class__.__name__):
         self.lr = backend.variable(lr, name='lr')
         self.rho = backend.variable(rho, name='rho')
         self.decay = backend.variable(decay, name='decay')
         self.iterations = backend.variable(0,
                                            dtype='int64',
                                            name='iterations')
     if epsilon is None:
         epsilon = backend.epsilon()
     self.epsilon = epsilon
     self.initial_decay = decay
Exemple #25
0
def generalised_dice_loss(y_true,
                          y_pred,
                          type_weight='Uniform'):
    """
    Function to calculate the Generalised Dice Loss defined in
        Sudre, C. et. al. (2017) Generalised Dice overlap as a deep learning
        loss function for highly unbalanced segmentations. DLMIA 2017
    :param y_pred: the logits
    :param y_true: the segmentation ground truth
    :param type_weight: type of weighting allowed between labels (choice
        between Square (square of inverse of volume),
        Simple (inverse of volume) and Uniform (no weighting))
    :return: the loss
    """
    # n_el = tf.cast(K.prod(tf.shape(y_pred)), tf.float32)
    # those ops need the y_true not to be 0! Although you find nan loss and accuracy
    if type_weight == 'Square':
        weights_op = lambda x: 1. / (tf.math.pow(K.sum(x, axis=(0, 1, 2)), y=3) + K.epsilon())
    elif type_weight == 'Simple':
        weights_op = lambda x: 1. / (tf.reduce_sum(x, axis=(0, 1, 2)) + K.epsilon())
    elif type_weight == 'Uniform':
        weights_op = lambda x: 1.
    else:
        raise ValueError("The variable type_weight \"{}\""
                         "is not defined.".format(type_weight))
    # treat each class separately
    w = weights_op(y_true)
    numerator = y_true * y_pred
    numerator = w * tf.reduce_sum(numerator, (0, 1, 2))
    numerator = tf.reduce_sum(numerator)

    denominator = tf.reduce_sum(y_true, (0, 1, 2)) + tf.reduce_sum(y_pred, (0, 1, 2))
    denominator = w * denominator
    denominator = tf.reduce_sum(denominator)

    gen_dice_coef = numerator / denominator
    # generalised_dice_score = 2. * num / denom
    return 1. - 2. * gen_dice_coef
Exemple #26
0
    def call(self, x, mask=None):
        features_dim = self.features_dim
        step_dim = self.step_dim

        t1 = x[:, 0, :]
        t1 = K.expand_dims(t1, 1)
        # t1 = K.tile(t1, [1, step_dim, 1])
        print(t1)
        eij = K.batch_dot(x, t1, (2, 2))  #(?,500,1)
        # eij = K.tile(eij, [1, 1, features_dim])
        print(eij)
        a = K.exp(eij)
        a /= K.cast(K.sum(a, axis=1, keepdims=True) + K.epsilon(), K.floatx())
        print(a)
        weighted_input = x * a
        temp = K.sum(weighted_input, axis=1)
        temp = K.expand_dims(temp, 1)
        temp = K.tile(temp, [1, 1, features_dim])
        print(temp)
        alltemp = temp

        for i in range(1, step_dim):
            t1 = x[:, i, :]
            t1 = K.expand_dims(t1, 1)
            # t1 = K.tile(t1, [1, 2, 1])
            eij = K.batch_dot(x, t1, (2, 2))
            # eij = K.tile(eij, [1, 1, features_dim])
            a = K.exp(eij)
            a /= K.cast(
                K.sum(a, axis=1, keepdims=True) + K.epsilon(), K.floatx())
            weighted_input = x * a
            temp = K.sum(weighted_input, axis=1)
            temp = K.expand_dims(temp, 1)
            temp = K.tile(temp, [1, 1, features_dim])
            alltemp = keras.layers.concatenate([alltemp, temp], 1)

        temp = keras.layers.concatenate([x, alltemp])
        return temp
 def __init__(self,
              center=True,
              scale=True,
              epsilon=None,
              gamma_initializer='ones',
              beta_initializer='zeros',
              gamma_regularizer=None,
              beta_regularizer=None,
              gamma_constraint=None,
              beta_constraint=None,
              **kwargs):
     """Layer normalization layer
     See: [Layer Normalization](https://arxiv.org/pdf/1607.06450.pdf)
     :param center: Add an offset parameter if it is True.
     :param scale: Add a scale parameter if it is True.
     :param epsilon: Epsilon for calculating variance.
     :param gamma_initializer: Initializer for the gamma weight.
     :param beta_initializer: Initializer for the beta weight.
     :param gamma_regularizer: Optional regularizer for the gamma weight.
     :param beta_regularizer: Optional regularizer for the beta weight.
     :param gamma_constraint: Optional constraint for the gamma weight.
     :param beta_constraint: Optional constraint for the beta weight.
     :param kwargs:
     """
     super(LayerNormalization, self).__init__(**kwargs)
     self.supports_masking = True
     self.center = center
     self.scale = scale
     if epsilon is None:
         epsilon = K.epsilon() * K.epsilon()
     self.epsilon = epsilon
     self.gamma_initializer = initializers.get(gamma_initializer)
     self.beta_initializer = initializers.get(beta_initializer)
     self.gamma_regularizer = regularizers.get(gamma_regularizer)
     self.beta_regularizer = regularizers.get(beta_regularizer)
     self.gamma_constraint = constraints.get(gamma_constraint)
     self.beta_constraint = constraints.get(beta_constraint)
     self.gamma, self.beta = None, None
Exemple #28
0
def expand_dims_f1(y_true, y_pred):
    ####
    y_true = tf.expand_dims(y_true, -1)
    y_true = tf.cast(y_true, tf.float32)
    ####
    y_pred = tf.slice(y_pred, [0, 1], [-1, 1])
    ####
    """F1-score"""
    precision = Precision(y_true, y_pred)
    #print ("  f1--Precision:  %s" % (precision))
    recall = Recall(y_true, y_pred)
    #print ("  f1--Recall:  %s" % (recall))
    f1 = 2 * ((precision * recall) / (precision + recall + K.epsilon()))
    return f1
def fbeta_score(y_true, y_pred, beta=1):
    # Calculates the F score, the weighted harmonic mean of precision and recall.
    if beta < 0:
        raise ValueError('The lowest choosable beta is zero (only precision).')

    # If there are no true positives, fix the F score at 0 like sklearn.
    if K.sum(K.round(K.clip(y_true, 0, 1))) == 0:
        return 0

    p = precision(y_true, y_pred)
    r = recall(y_true, y_pred)
    bb = beta**2
    fbeta_score = (1 + bb) * (p * r) / (bb * p + r + K.epsilon())
    return fbeta_score
Exemple #30
0
 def call(self, inputs):
     inputs = ops.convert_to_tensor_v2_with_dispatch(inputs)
     if inputs.shape.rank == 1:
         inputs = array_ops.expand_dims(inputs, 1)
     # If the inputs are not floats, cast them to floats. This avoids issues
     # with int-float multiplication and division below.
     if inputs.dtype != K.floatx():
         inputs = math_ops.cast(inputs, K.floatx())
     # We need to reshape the mean and variance data to ensure that Tensorflow
     # broadcasts the data correctly.
     mean = array_ops.reshape(self.mean, self._broadcast_shape)
     variance = array_ops.reshape(self.variance, self._broadcast_shape)
     return ((inputs - mean) /
             math_ops.maximum(math_ops.sqrt(variance), K.epsilon()))
Exemple #31
0
def axon_precision(y_true, y_pred):
    weights = tf.reduce_sum(y_true, axis=-1)

    mask = tf.equal(weights, 1)

    mask_true = tf.boolean_mask(y_true[:, :, :, :, 0], mask)
    mask_pred = tf.boolean_mask(y_pred[:, :, :, :, 0], mask)

    true_positives = K.sum(K.round(K.clip(mask_true * mask_pred, 0, 1)))
    predicted_positives = K.sum(K.round(K.clip(mask_pred, 0, 1)))

    precision = true_positives / (predicted_positives + K.epsilon())

    return precision
Exemple #32
0
def axon_recall(y_true, y_pred):
    weights = tf.reduce_sum(y_true, axis=-1)

    mask = tf.equal(weights, 1)

    mask_true = tf.boolean_mask(y_true[:, :, :, :, 0], mask)
    mask_pred = tf.boolean_mask(y_pred[:, :, :, :, 0], mask)

    true_positives = K.sum(K.round(K.clip(mask_true * mask_pred, 0, 1)))
    actual_positives = K.sum(K.round(K.clip(mask_true, 0, 1)))

    recall = true_positives / (actual_positives + K.epsilon())

    return recall
def discriminative_instance_loss_3D(y_true,
                                    y_pred,
                                    delta_v=0.5,
                                    delta_d=1.5,
                                    order=2,
                                    gamma=1e-3):
    def temp_norm(ten, axis=-1):
        return tf.sqrt(
            tf.constant(1e-4, dtype=K.floatx()) +
            tf.reduce_sum(tf.square(ten), axis=axis))

    # y_pred = tf.divide(y_pred, tf.expand_dims(tf.norm(y_pred, ord = 2, axis = -1), axis = -1))

    # Compute variance loss
    cells_summed = tf.tensordot(y_true,
                                y_pred,
                                axes=[[0, 1, 2, 3], [0, 1, 2, 3]])
    n_pixels = tf.cast(tf.count_nonzero(y_true, axis=[0, 1, 2, 3]),
                       dtype=K.floatx()) + K.epsilon()
    n_pixels_expand = tf.expand_dims(n_pixels, axis=1)
    mu = tf.divide(cells_summed, n_pixels_expand)

    mu_tensor = tf.tensordot(y_true, mu, axes=[[-1], [0]])
    L_var_1 = y_pred - mu_tensor
    L_var_2 = tf.square(
        tf.nn.relu(
            temp_norm(L_var_1, axis=-1) -
            tf.constant(delta_v, dtype=K.floatx())))
    L_var_3 = tf.tensordot(L_var_2, y_true, axes=[[0, 1, 2, 3], [0, 1, 2, 3]])
    L_var_4 = tf.divide(L_var_3, n_pixels)
    L_var = tf.reduce_mean(L_var_4)

    # Compute distance loss
    mu_a = tf.expand_dims(mu, axis=0)
    mu_b = tf.expand_dims(mu, axis=1)

    diff_matrix = tf.subtract(mu_a, mu_b)
    L_dist_1 = temp_norm(diff_matrix, axis=-1)
    L_dist_2 = tf.square(
        tf.nn.relu(tf.constant(2 * delta_d, dtype=K.floatx()) - L_dist_1))
    diag = tf.constant(0, dtype=K.floatx()) * tf.diag_part(L_dist_2)
    L_dist_3 = tf.matrix_set_diag(L_dist_2, diag)
    L_dist = tf.reduce_mean(L_dist_3)

    # Compute regularization loss
    L_reg = gamma * temp_norm(mu, axis=-1)

    L = L_var + L_dist + L_reg

    return L
Exemple #34
0
 def __init__(self,
              lr=0.002,
              beta_1=0.9,
              beta_2=0.999,
              epsilon=None,
              decay=0.,
              **kwargs):
   super(Adamax, self).__init__(**kwargs)
   with K.name_scope(self.__class__.__name__):
     self.iterations = K.variable(0, dtype='int64', name='iterations')
     self.lr = K.variable(lr, name='lr')
     self.beta_1 = K.variable(beta_1, name='beta_1')
     self.beta_2 = K.variable(beta_2, name='beta_2')
     self.decay = K.variable(decay, name='decay')
   if epsilon is None:
     epsilon = K.epsilon()
   self.epsilon = epsilon
   self.initial_decay = decay
Exemple #35
0
 def __init__(self,
              lr=0.002,
              beta_1=0.9,
              beta_2=0.999,
              epsilon=None,
              schedule_decay=0.004,
              **kwargs):
   super(Nadam, self).__init__(**kwargs)
   with K.name_scope(self.__class__.__name__):
     self.iterations = K.variable(0, dtype='int64', name='iterations')
     self.m_schedule = K.variable(1., name='m_schedule')
     self.lr = K.variable(lr, name='lr')
     self.beta_1 = K.variable(beta_1, name='beta_1')
     self.beta_2 = K.variable(beta_2, name='beta_2')
   if epsilon is None:
     epsilon = K.epsilon()
   self.epsilon = epsilon
   self.schedule_decay = schedule_decay
Exemple #36
0
def mean_squared_logarithmic_error(y_true, y_pred):
  first_log = math_ops.log(K.clip(y_pred, K.epsilon(), None) + 1.)
  second_log = math_ops.log(K.clip(y_true, K.epsilon(), None) + 1.)
  return K.mean(math_ops.square(first_log - second_log), axis=-1)
Exemple #37
0
def mean_squared_logarithmic_error(y_true, y_pred):  # pylint: disable=missing-docstring
  y_pred = ops.convert_to_tensor(y_pred)
  y_true = math_ops.cast(y_true, y_pred.dtype)
  first_log = math_ops.log(K.clip(y_pred, K.epsilon(), None) + 1.)
  second_log = math_ops.log(K.clip(y_true, K.epsilon(), None) + 1.)
  return K.mean(math_ops.squared_difference(first_log, second_log), axis=-1)
Exemple #38
0
def mean_absolute_percentage_error(y_true, y_pred):  # pylint: disable=missing-docstring
  y_pred = ops.convert_to_tensor(y_pred)
  y_true = math_ops.cast(y_true, y_pred.dtype)
  diff = math_ops.abs(
      (y_true - y_pred) / K.clip(math_ops.abs(y_true), K.epsilon(), None))
  return 100. * K.mean(diff, axis=-1)
Exemple #39
0
def mean_absolute_percentage_error(y_true, y_pred):
  diff = math_ops.abs(
      (y_true - y_pred) / K.clip(math_ops.abs(y_true), K.epsilon(), None))
  return 100. * K.mean(diff, axis=-1)
Exemple #40
0
 def __call__(self, w):
   return w / (
       K.epsilon() + K.sqrt(
           math_ops.reduce_sum(
               math_ops.square(w), axis=self.axis, keepdims=True)))
Exemple #41
0
def kullback_leibler_divergence(y_true, y_pred):  # pylint: disable=missing-docstring
  y_pred = ops.convert_to_tensor(y_pred)
  y_true = math_ops.cast(y_true, y_pred.dtype)
  y_true = K.clip(y_true, K.epsilon(), 1)
  y_pred = K.clip(y_pred, K.epsilon(), 1)
  return math_ops.reduce_sum(y_true * math_ops.log(y_true / y_pred), axis=-1)
Exemple #42
0
def kullback_leibler_divergence(y_true, y_pred):
  y_true = K.clip(y_true, K.epsilon(), 1)
  y_pred = K.clip(y_pred, K.epsilon(), 1)
  return math_ops.reduce_sum(y_true * math_ops.log(y_true / y_pred), axis=-1)
Exemple #43
0
def poisson(y_true, y_pred):
  return K.mean(y_pred - y_true * math_ops.log(y_pred + K.epsilon()), axis=-1)
Exemple #44
0
def poisson(y_true, y_pred):
  y_pred = ops.convert_to_tensor(y_pred)
  y_true = math_ops.cast(y_true, y_pred.dtype)
  return K.mean(y_pred - y_true * math_ops.log(y_pred + K.epsilon()), axis=-1)
Exemple #45
0
def logloss(y_true, y_pred):
  losses = math_ops.multiply(y_true, math_ops.log(y_pred + K.epsilon()))
  losses += math_ops.multiply((1 - y_true),
                              math_ops.log(1 - y_pred + K.epsilon()))
  return K.mean(-losses, axis=-1)