Пример #1
0
def depth_loss_function(y_true, y_pred, theta=0.1, maxDepthVal=1000.0 / 10.0):
    # Point-wise depth
    l_depth = K.mean(K.abs(y_pred - y_true), axis=-1)

    # Edges
    dy_true, dx_true = tf.image.image_gradients(y_true)
    dy_pred, dx_pred = tf.image.image_gradients(y_pred)
    l_edges = K.mean(K.abs(dy_pred - dy_true) + K.abs(dx_pred - dx_true),
                     axis=-1)

    # Structural similarity (SSIM) index
    l_ssim = K.clip((1 - tf.image.ssim(y_true, y_pred, maxDepthVal)) * 0.5, 0,
                    1)

    # Weights
    w1 = 1.0
    w2 = 1.0
    w3 = theta
    return (w1 * l_ssim) + (w2 * K.mean(l_edges)) + (w3 * K.mean(l_depth))
Пример #2
0
def loss_uncertainty_mae(y_true, y_pred):
    """
    Mean absolute error loss for the uncertainty estimation.
    L = sigma_pred / abs(label_true - label_reco).

    Returns
    -------
    loss : Mean absolute error for uncertainty estimations.

    """
    # order in y_pred: 1) pred label 2) pred label error
    # prevent that the gradient flows back over the label network:
    y_pred_label = K.stop_gradient(y_pred[:, 0])
    y_pred_label_std = y_pred[:, 1]
    y_true_label = y_true[:, 0]

    # (s - |y_true - y_pred|)
    loss = K.abs(y_pred_label_std - K.abs(y_true_label - y_pred_label))
    return loss
Пример #3
0
 def class_loss_regr_fixed_num(y_true, y_pred):
     y_true = tf.dtypes.cast(y_true, 'float32')
     y_pred = tf.dtypes.cast(y_pred, 'float32')
     x = y_true[:, :, 4 * num_classes:] - y_pred
     x_abs = K.abs(x)
     x_bool = K.cast(K.less_equal(x_abs, 1.0), 'float32')
     return lambda_cls_regr * K.sum(
         y_true[:, :, :4 * num_classes] *
         (x_bool * (0.5 * x * x) + (1 - x_bool) *
          (x_abs - 0.5))) / K.sum(epsilon + y_true[:, :, :4 * num_classes])
Пример #4
0
 def loss_units(x):
     t = x / K.max(K.abs(x))
     x = K.switch(K.less(t, K.epsilon()), K.zeros_like(x), x)
     m = K.sum(K.cast(K.greater(x, 0.), K.floatx()))
     sum_x = K.sum(x)
     moving_units = K.switch(K.less_equal(m, self.units), m,
                             (1. - self.moving_decay) * self.moving_units)
     epsilon_minus = 0.
     epsilon_plus = K.switch(K.less_equal(m, self.units), self.moving_units, 0.)
     return K.relu(moving_units - sum_x - epsilon_minus) + K.relu(sum_x - moving_units - epsilon_plus)
Пример #5
0
def custom_loss(pred, label):
    pred_vgg_input = tf.keras.applications.vgg16.preprocess_input(pred)
    label_vgg_input = tf.keras.applications.vgg16.preprocess_input(label)
    pred_vgg_features = vgg_model(pred_vgg_input)
    label_vgg_features = vgg_model(label_vgg_input)
    vgg_loss = K.mean(K.abs(pred_vgg_features - label_vgg_features)) / 0.6

    pred_resnet_input = tf.keras.applications.resnet_v2.preprocess_input(pred)
    label_resnet_input = tf.keras.applications.resnet_v2.preprocess_input(
        label)
    pred_resnet_features = resnet_model(pred_resnet_input)
    label_resnet_features = resnet_model(label_resnet_input)
    resnet_loss = K.mean(
        K.abs(pred_resnet_features - label_resnet_features)) / 0.018

    gram_matrix_loss = get_gram_matrix_loss(pred, label) / 0.065
    pixel_loss = K.mean(K.abs(pred - label)) / 0.065
    return 100 * (3 * vgg_loss + 5 * resnet_loss + pixel_loss +
                  gram_matrix_loss)
Пример #6
0
def dice_coef(y_true, y_pred, smooth=1):
    """
    Dice = (2*|X & Y|)/ (|X|+ |Y|)
         =  2*sum(|A*B|)/(sum(A^2)+sum(B^2))
    ref: https://arxiv.org/pdf/1606.04797v1.pdf
    """
    intersection = K.sum(K.abs(y_true * y_pred), axis=-1)

    return (2. * intersection + smooth) / (
        K.sum(K.square(y_true), -1) + K.sum(K.square(y_pred), -1) + smooth)
Пример #7
0
def mean_absolute_error(y_true, y_pred):
    """
    Copy of the Keras mean absolute error function for testing purposes.
    """
    # y_pred = tf.Print(y_pred, [y_pred], message='y_pred', summarize=5)
    # y_true = tf.Print(y_true, [y_true], message='y_true', summarize=5)
    absolute = K.abs(y_pred - y_true)
    # absolute = tf.Print(absolute, [absolute], message='absolute', summarize=5)
    mae = K.mean(absolute, axis=-1)
    return mae
Пример #8
0
def mask_aware_mean(inputs):
    # https://github.com/github/CodeSearchNet/blob/master/src/utils/tfutils.py#L107
    # recreate the masks - all zero rows have been masked
    mask = backend.not_equal(backend.sum(backend.abs(inputs), axis=2, keepdims=True), 0)
    # number of that rows are not all zeros
    num = backend.sum(backend.cast(mask, 'float32'), axis=1, keepdims=False)
    # compute mask-aware mean of inputs
    inputs_mean = backend.sum(inputs, axis=1, keepdims=False) / (num + 1E-8)

    return inputs_mean
Пример #9
0
    def smooth_l1(y_true, y_pred):
        '''
		'''

        HUBER_DELTA = 1.0

        x = abs(y_true - y_pred)
        x = switch(x < HUBER_DELTA, 0.5 * x**2,
                   HUBER_DELTA * (x - 0.5 * HUBER_DELTA))
        return x
 def __call__(self, x):
     if not self.l1 and not self.l2:
         return K.constant(0.)
     regularization = 0.
     x = 1 - K.sqrt(K.sum(K.square(x)))
     if self.l1:
         regularization += self.l1 * K.abs(x)
     if self.l2:
         regularization += self.l2 * K.square(x)
     return regularization
Пример #11
0
 def channel(self, zMean):
     batch = K.shape(zMean)[0]
     # Generate Laplace r.v.
     # Source: https://en.wikipedia.org/wiki/Laplace_distribution#Generating_random_variables_according_to_the_Laplace_distribution
     # Code: https://stackoverflow.com/questions/56691436/how-can-one-add-laplacian-noise-to-a-tensor-in-keras
     u = K.random_uniform((batch, self.latent_dim), minval=-0.5, maxval=0.5)
     epsilon = K.sign(u) * K.log(1 - 2 * K.abs(u) + K.epsilon())
     # Because BatchNormalization produces z vector with signal power 'latentDim',
     # we should not scale the noise power here.
     return zMean + np.sqrt(self.train_noisepow) * epsilon
Пример #12
0
 def _calculate_dilation_matrix(self, xx, yy, d, sigma=1, norm_axis=None):
     if self.interp_mode == 'bilinear':
         # bilinear
         delta_to_pos_1 = yy - (xx - 0.5) * d
         delta_to_neg_1 = yy - (xx + 0.5) * d
         mat_pos = K.maximum(1 - K.abs(delta_to_pos_1), 0)
         mat_neg = K.maximum(1 - K.abs(delta_to_neg_1), 0)
     else:
         # gaussian
         relative_delta_to_pos_1 = yy / d - (xx - 0.5)
         relative_delta_to_neg_1 = yy / d - (xx + 0.5)
         mat_pos = K.exp(-(relative_delta_to_pos_1 / sigma)**2)
         mat_pos = mat_pos / K.sum(mat_pos, axis=norm_axis, keepdims=True)
         mat_neg = K.exp(-(relative_delta_to_neg_1 / sigma)**2)
         mat_neg = mat_neg / K.sum(mat_neg, axis=norm_axis, keepdims=True)
     mat = mat_pos - mat_neg
     # normalize weight matrix by area summed - output of contextconv will be weighted averagepools
     mat = mat / d
     return mat
    def calc_error(self, y_true, y_pred):
        """
        A method to calculate Mean Absolute Percentage Error
        """

        y_true = K.maximum(y_true, 1e-7)  # prevent errors multiplying by zero
        error = K.mean(K.abs((y_true - y_pred) / y_true)) * 100

        # error = tf.keras.losses.MAPE(y_true,y_pred)
        return error
Пример #14
0
    def rpn_loss_reg_fixed_num(y_true, y_pred):
        x = y_true[:, :, :, 4 * num_anchors:] - y_pred
        x_abs = K.abs(x)

        x_bool = K.cast(K.less_equal(x_abs, 1.0), tf.float32)
        return lambda_rpn_reg * K.sum(
            y_true[:, :, :, :4 * num_anchors] *
            (x_bool * (0.5 * x * x) + (1 - x_bool) *
             (x_abs - 0.5))) / K.sum(epsilon +
                                     y_true[:, :, :, :4 * num_anchors])
def weighted_bce_loss(y_true, y_pred, weight):
    # avoiding overflow
    epsilon = 1e-7
    y_pred = K.clip(y_pred, epsilon, 1. - epsilon)
    logit_y_pred = K.log(y_pred / (1. - y_pred))
    #logit_y_pred = y_pred

    loss = (1. - y_true) * logit_y_pred + (1. + (weight - 1.) * y_true) * \
    (K.log(1. + K.exp(-K.abs(logit_y_pred))) + K.maximum(-logit_y_pred, 0.))
    return K.sum(loss) / K.sum(weight)
def mean_iou(y_true, y_pred, smooth=1):

    y_true = tf.cast(y_true, "int32")
    y_pred = tf.cast(y_pred > 0.5, "int32")

    intersection = K.sum(K.abs(y_true * y_pred), axis=[1, 2])
    union = K.sum(y_true, [1, 2]) + K.sum(y_pred, [1, 2]) - intersection
    iou = K.mean((intersection + smooth) / (union + smooth), axis=[1, 0])

    return iou
Пример #17
0
def weighted_bce_loss(y_true, y_pred, weight):
    # avoiding overflow
    epsilon = 1e-7
    y_pred = K.clip(y_pred, epsilon, 1. - epsilon)
    logit_y_pred = K.log(y_pred / (1. - y_pred))

    # https://www.tensorflow.org/api_docs/python/tf/nn/weighted_cross_entropy_with_logits
    loss = (1. - y_true) * logit_y_pred + (1. + (weight - 1.) * y_true) * \
                                          (K.log(1. + K.exp(-K.abs(logit_y_pred))) + K.maximum(-logit_y_pred, 0.))
    return K.sum(loss) / K.sum(weight)
Пример #18
0
    def __call__(self, y_true, y_pred):
        anchor, positive, negative = tf.unstack(y_pred)
        anchor_positive_distance = euclidean_distance(anchor, positive)
        anchor_negative_distance = euclidean_distance(anchor, negative)

        softmax = K.softmax(
            K.concatenate([anchor_positive_distance,
                           anchor_negative_distance]))
        ideal_distance = K.constant([0., 1.])
        return K.mean(K.abs(ideal_distance - softmax))
def frn_layer_keras(x, tau, beta, gamma, epsilon=1e-6):
    # x: Input tensor of shape [BxHxWxC].
    # tau, beta, gamma: Variables of shape [1, 1, 1, C].
    # eps: A scalar constant or learnable variable.
    # Compute the mean norm of activations per channel.
    nu2 = K.mean(K.square(x), axis=[1, 2], keepdims=True)
    # Perform FRN.
    x = x * 1 / K.sqrt(nu2 + K.abs(epsilon))
    # Return after applying the Offset-ReLU non-linearity.
    return K.maximum(gamma * x + beta, tau)
def iou_metric(y_true, y_pred):
    # iou loss for bounding box prediction
    # input must be as [x, y, w, h]

    # AOG = Area of Groundtruth box
    AoG = K.abs(K.transpose(y_true)[2]) * K.abs(K.transpose(y_true)[3])

    # AOP = Area of Predicted box
    AoP = K.abs(K.transpose(y_pred)[2]) * K.abs(K.transpose(y_pred)[3])

    # Left point
    Topleft_pred_X = K.transpose(y_pred)[0] - K.transpose(y_pred)[2] / 2
    Topleft_pred_Y = K.transpose(y_pred)[1] - K.transpose(y_pred)[3] / 2
    Topleft_true_X = K.transpose(y_true)[0] - K.transpose(y_true)[2] / 2
    Topleft_true_Y = K.transpose(y_true)[1] - K.transpose(y_true)[3] / 2

    # Left point
    BotRight_pred_X = K.transpose(y_pred)[0] + K.transpose(y_pred)[2] / 2
    BotRight_pred_Y = K.transpose(y_pred)[1] + K.transpose(y_pred)[3] / 2
    BotRight_true_X = K.transpose(y_true)[0] + K.transpose(y_true)[2] / 2
    BotRight_true_Y = K.transpose(y_true)[1] + K.transpose(y_true)[3] / 2

    # overlaps are the co-ordinates of intersection box
    overlap_0 = K.maximum(Topleft_pred_X, Topleft_true_X)
    overlap_1 = K.maximum(Topleft_pred_Y, Topleft_true_Y)
    overlap_2 = K.minimum(BotRight_pred_X, BotRight_true_X)
    overlap_3 = K.minimum(BotRight_pred_Y, BotRight_true_Y)

    # intersection area
    # zero = K.variable(value=0, dtype='float32', name='zero')
    intersection = K.maximum(0., (overlap_2 - overlap_0)) * K.maximum(0., (overlap_3 - overlap_1))

    # area of union of both boxes
    union = AoG + AoP - intersection

    # iou calculation
    iou = intersection / union

    # bounding values of iou to (0,1)
    iou = K.clip(iou, 0.0 + K.epsilon(), 1.0 - K.epsilon())

    return K.mean(iou)
Пример #21
0
def build_final_model(input_shape, distance_metric='uniform_euclidean'):

    assert distance_metric in ('uniform_euclidean', 'weighted_l1',
                               'cosine_distance')
    left_input = Input(input_shape)
    right_input = Input(input_shape)
    model = get_base_conv_encoder(input_shape)
    encoded_l = model(left_input)
    encoded_r = model(right_input)

    if distance_metric == 'weighted_l1':
        print("using Weighted_l1")
        L1_layer = Lambda(lambda tensors: K.abs(tensors[0] - tensors[1]))
        L1_distance = L1_layer([encoded_l, encoded_r])
        prediction = Dense(1,
                           activation='sigmoid',
                           bias_initializer=initialize_bias)(L1_distance)

    if distance_metric == 'uniform_euclidean':
        print("inside euclidian")
        L1_layer = Lambda(lambda tensors: K.sqrt(
            K.sum(K.square(K.abs(tensors[0] - tensors[1])),
                  axis=-1,
                  keepdims=True)))
        L1_distance = L1_layer([encoded_l, encoded_r])
        prediction = Dense(1,
                           activation='sigmoid',
                           bias_initializer=initialize_bias)(L1_distance)

    if distance_metric == 'cosine_distance':
        print("using cosine similarity")
        L1_layer = Lambda(cosine_similarity,
                          output_shape=cos_dist_output_shape)
        L1_distance = L1_layer([encoded_l, encoded_r])
        prediction = Dense(1,
                           activation='sigmoid',
                           bias_initializer=initialize_bias)(L1_distance)

    # Connect the inputs with the outputs
    siamese_net = Model(inputs=[left_input, right_input], outputs=prediction)
    # return the model
    return siamese_net
Пример #22
0
def custom_loss_function(y_true, y_pred, regressParams, X, Y):
    """" regression constant is a list of [constant,slope]. """

    regressParams = tf.constant(regressParams.astype(np.float32))
    X = tf.constant(X.astype(np.float32))
    Y = tf.constant(Y.astype(np.float32))

    Y_pred = regressParams[0] + regressParams[1] * X + y_pred
    loss = K.mean(K.abs((Y - Y_pred)))

    return loss
Пример #23
0
def calculate_new_loss_variable(y_true, y_pred):
    y = K.argmax(
        y_true,
        axis=1)  # Array of all the indexes of max values for true values
    y_hat = K.argmax(
        y_pred,
        axis=1)  # Array of all the indexes of max values for predictions

    tmp = K.cast(K.sum(K.abs(y - y_hat)), dtype='float32')
    loss = tmp * 0.001  # [0, 0.506]
    return loss
Пример #24
0
 def dice_coef(y_true, y_pred, smooth=1):
     y_true_label = y_true[:, :, :, :, label]
     y_pred_label = y_pred[:, :, :, :, label]
     """
     Dice = (2*|X & Y|)/ (|X|+ |Y|)
          =  2*sum(|A*B|)/(sum(A^2)+sum(B^2))
     ref: https://arxiv.org/pdf/1606.04797v1.pdf
     """
     intersection = sum(abs(y_true_label * y_pred_label), axis=-1)
     return (2. * intersection + smooth) / (sum(
         square(y_true_label), -1) + sum(square(y_pred_label), -1) + smooth)
 def class_loss_regr_fixed_num(y_true, y_pred):
     # print('Y True:', y_true.shape)
     # print('Y_pred: ', y_pred.shape)
     y_true = tf.cast(y_true, 'float32')
     x = y_true[:, :, 4 * num_classes:] - y_pred
     x_abs = K.abs(x)
     x_bool = K.cast(K.less_equal(x_abs, 1.0), tf.float32)
     return lambda_cls_regr * K.sum(
         y_true[:, :, :4 * num_classes] *
         (x_bool * (0.5 * x * x) + (1 - x_bool) *
          (x_abs - 0.5))) / K.sum(epsilon + y_true[:, :, :4 * num_classes])
Пример #26
0
    def multi_loss(self, y, y_, fault):
        """
        Loss formula

        :param y: 2d np.array. Batch of predicting y.
        :param y_: 2d np.array. Batch of real y.
        :param fault: 2d np.array. Batch of valid fault mask.
        :return: float. Loss value.
        """
        loss = layers.multiply([(y - y_), fault])
        return K.sum(K.abs(loss)) / K.sum(fault)
Пример #27
0
 def loss(y_true, y_pred):
     # split y_true into the actual y_true (=y_t) and the scaling factor
     y_t, scaling_factors = tf.split(y_true, **split_args)
     y_t = K.cast(y_t, y_pred.dtype)
     sq = K.square(K.abs(y_pred - y_t))
     # arbitrarily take the very first element of the array as actual
     # scaling factor
     scaling_factor = K.cast(scaling_factors[0, 0, 0], sq.dtype)
     # note that the scaling_factor is now multiplicative!
     #return K.mean(sq, axis=-1) / scaling_factor
     return K.mean(sq, axis=-1) / scaling_factor
Пример #28
0
def get_gradients(self, tape, loss, var_list, grad_loss=None):
        """Called in `minimize` to compute gradients from loss."""
        grads = tape.gradient(loss, var_list, grad_loss)
        if not (hasattr(self.e2efs_layer, 'regularization_loss')):
            return list(zip(grads, var_list))
        with tf.GradientTape() as e2efs_tape:
            e2efs_loss = self.e2efs_layer.regularization_func(self.e2efs_layer.kernel)
        e2efs_grad = grads[0]
        e2efs_regularizer_grad = e2efs_tape.gradient(e2efs_loss, [self.e2efs_layer.kernel])[0]
        # tf.print(e2efs_regularizer_grad)
        e2efs_regularizer_grad_corrected = e2efs_regularizer_grad / (tf.norm(e2efs_regularizer_grad) + K.epsilon())
        e2efs_grad_corrected = e2efs_grad / (tf.norm(e2efs_grad) + K.epsilon())
        combined_e2efs_grad = (1. - self.e2efs_layer.moving_factor) * e2efs_grad_corrected + \
                              self.e2efs_layer.moving_factor * e2efs_regularizer_grad_corrected
        combined_e2efs_grad = K.sign(
            self.e2efs_layer.moving_factor) * K.minimum(self.th, K.max(
            K.abs(combined_e2efs_grad))) * combined_e2efs_grad / K.max(
            K.abs(combined_e2efs_grad) + K.epsilon())
        grads[0] = combined_e2efs_grad
        return list(zip(grads, var_list))
Пример #29
0
def calc_loss(pred, target, loss='l2'):
    if loss.lower() == "l2":
        return K.mean(K.square(pred - target))
    elif loss.lower() == "l1":
        return K.mean(K.abs(pred - target))
    elif loss.lower() == "cross_entropy":
        return -K.mean(
            K.log(pred + K.epsilon()) * target +
            K.log(1 - pred + K.epsilon()) * (1 - target))
    else:
        raise ValueError(f'Recieve an unknown loss type: {loss}.')
Пример #30
0
def join_reim_mag_output(tensor):
    """

    Args:
      tensor: Tensor of shape (batch_size, n, n, 2)

    Returns:
      Tensor of shape (batch_size, n, n) with joined real and imag parts

    """
    return tf.expand_dims(K.abs(utils.join_reim_tensor(tensor)), -1)