Exemplo n.º 1
0
def correlation_coefficient_loss(y_true, y_pred):
    x = y_true
    y = y_pred
    mx = K.mean(x)
    my = K.mean(y)
    xm, ym = x-mx, y-my
    r_num = K.sum(tf.multiply(xm,ym))
    r_den = K.sqrt(tf.multiply(K.sum(K.square(xm)), K.sum(K.square(ym))))
    r = r_num / r_den

    r = K.maximum(K.minimum(r, 1.0), -1.0)
    return K.square(r)
Exemplo n.º 2
0
def Kaggle_IoU_Precision(y_true, y_pred, threshold=0.5):
    y_pred = K.squeeze(tf.to_int32(y_pred > threshold), -1)
    y_true = K.cast(y_true[..., 0], K.floatx())
    y_pred = K.cast(y_pred, K.floatx())
    truth_areas = K.sum(y_true, axis=[1, 2])
    pred_areas = K.sum(y_pred, axis=[1, 2])
    intersection = K.sum(y_true * y_pred, axis=[1, 2])
    union = K.clip(truth_areas + pred_areas - intersection, 1e-9, 128 * 128)
    check = K.map_fn(lambda x: K.equal(x, 0),
                     truth_areas + pred_areas,
                     dtype=tf.bool)
    p = intersection / union
    iou = K.switch(check, p + 1., p)

    prec = K.map_fn(lambda x: K.mean(K.greater(x, np.arange(0.5, 1.0, 0.05))),
                    iou,
                    dtype=tf.float32)
    prec_iou = K.mean(prec)
    return prec_iou
def FocalLoss(y_true, y_pred):
    """
    :param y_true: A tensor of the same shape as `y_pred`
    :param y_pred:  A tensor resulting from a sigmoid
    :return: Output tensor.
    """
    gamma = 2.0
    alpha = 0.25

    pt_1 = tf.where(tf.equal(y_true, 1), y_pred, tf.ones_like(y_pred))
    pt_0 = tf.where(tf.equal(y_true, 0), y_pred, tf.zeros_like(y_pred))

    epsilon = K.epsilon()
    # clip to prevent NaN's and Inf's
    pt_1 = K.clip(pt_1, epsilon, 1. - epsilon)
    pt_0 = K.clip(pt_0, epsilon, 1. - epsilon)

    return -K.mean(alpha * K.pow(1. - pt_1, gamma) * K.log(pt_1)) \
           - K.mean((1 - alpha) * K.pow(pt_0, gamma) * K.log(1. - pt_0))
Exemplo n.º 4
0
def weighted_mse(y_pred, y_true):

    majority_weight = 0.9500
    minority_weight = 0.0500
    # Apply the weights
    loss = K.mean(K.square((y_pred - y_true) * (y_true * majority_weight) +
                           (1. - y_true) * minority_weight),
                  axis=-1)

    # Return the mean error
    return loss
Exemplo n.º 5
0
def mrcnn_mask_loss_graph(target_masks, target_class_ids, pred_masks):
    """Mask binary cross-entropy loss for the masks head.

    target_masks: [batch, num_rois, height, width].
        A float32 tensor of values 0 or 1. Uses zero padding to fill array.
    target_class_ids: [batch, num_rois]. Integer class IDs. Zero padded.
    pred_masks: [batch, proposals, height, width, num_classes] float32 tensor
                with values from 0 to 1.
    """
    # Reshape for simplicity. Merge first two dimensions into one.
    target_class_ids = K.reshape(target_class_ids,
                                 (-1, ))  # (batch * num_rois, )
    mask_shape = tf.shape(target_masks)
    target_masks = K.reshape(
        target_masks, (-1, mask_shape[2],
                       mask_shape[3]))  # (batch * num_rois, height, width)
    pred_shape = tf.shape(pred_masks)
    pred_masks = K.reshape(
        pred_masks,
        (-1, pred_shape[2], pred_shape[3],
         pred_shape[4]))  # (batch * proposals, height, width, num_classes)
    # Permute predicted masks to [N, num_classes, height, width]
    pred_masks = tf.transpose(
        pred_masks,
        [0, 3, 1, 2])  # (batch * proposals, num_classes, height, width)

    # Only positive ROIs contribute to the loss. And only
    # the class specific mask of each ROI.
    positive_ix = tf.where(target_class_ids > 0)[:, 0]  # (?, )
    positive_class_ids = tf.cast(tf.gather(target_class_ids, positive_ix),
                                 tf.int64)  # (?, )
    indices = tf.stack([positive_ix, positive_class_ids], axis=1)  # (?, 2)

    # Gather the masks (predicted and true) that contribute to loss
    y_true = tf.gather(
        target_masks,  # (batch * num_rois, height, width)
        positive_ix)  # (?, )
    y_pred = tf.gather_nd(
        pred_masks,  # (batch * proposals, num_classes, height, width)
        indices)  # (?, 2)

    # Compute binary cross entropy. If no positive ROIs, then return 0.
    # shape: [batch, roi, num_classes]
    loss = K.switch(
        tf.size(y_true) > 0,
        K.binary_crossentropy(
            target=y_true,  # (?, height, width)
            output=y_pred),  # (?, height, width)
        tf.constant(0.0))
    loss = K.mean(loss)
    return loss
Exemplo n.º 6
0
def attention_3d_block(inputs):
    # inputs.shape = (batch_size, time_steps, input_dim)
    input_dim = int(inputs.shape[3])
    a = Permute((3, 1, 2))(inputs)
    # a = Reshape((input_dim, TIME_STEPS , TIME_STEPS ))(a) # this line is not useful. It's just to know which dimension is what. softmax
    a = Dense(TIME_STEPS, activation='softmax')(a)
    print(a)
    if SINGLE_ATTENTION_VECTOR:
        a = Lambda(lambda x: K.mean(x, axis=1))(a)
        a = RepeatVector(input_dim)(a)
    a_probs = Permute((2, 3, 1))(a)

    output_attention_mul = merge([inputs, a_probs],
                                 name='attention_mul',
                                 mode='mul')
    return output_attention_mul
def dice_coef(y_true, y_pred, smooth=1):
    intersection = keras.sum(y_true * y_pred, axis=[1,2,3])
    union = keras.sum(y_true, axis=[1,2,3]) + keras.sum(y_pred, axis=[1,2,3])
    return keras.mean( (2. * intersection + smooth) / (union + smooth), axis=0)
Exemplo n.º 8
0
  def act(state) #roll the dice for a move
    if random.random() < epsilon
      return random action
    else
      return optimal action # action = argmax Q(s, a')
  
  def observe()
    observe that reward
  
  def remember()
  
  def learn() #learning fun

  	def loss(target, prediction)
    		err = prediction - target
    		loss =  keras.mean(keras.sqrt(1 + keras.square(error)) - 1)
    		return (loss)

  	def _build_model(self):
     		model = Sequential()
     		model.add(Dense(24, input_dim=self.state_size, activation='relu'))
    	 	model.add(Dense(24, activation='relu'))
     		model.add(Dense(self.action_size, activation='linear'))
     		model.compile(loss=self._huber_loss, optimizer=Adam(lr=self.learning_rate))
     		return model

	def load(self, name):
		self.model.load_weights(name)

	def save(self, name):
		self.model.save_weights(name)
def surface_loss(y_true, y_pred):
    y_true_dist_map = tf.py_function(func=calc_dist_map_batch,
                                     inp=[y_true],
                                     Tout=tf.float32)
    multipled = y_pred * y_true_dist_map
    return K.mean(multipled)
Exemplo n.º 10
0
def root_mean_squared_error(y_true, y_pred):
    return K.sqrt(K.mean(K.square(y_pred - y_true), axis=-1))
Exemplo n.º 11
0
def rmse(y_true, y_pred):
    return K.sqrt(K.mean(K.square(y_pred - y_true)))
Exemplo n.º 12
0
 def custom_loss(y_true,  y_pred): 
     return keras.mean(keras.square(func_g(X[-1])-y_pred))
Exemplo n.º 13
0
def loss(y_true, y_pred):
        
        return k.mean(k.square(y_pred - y_true) - k.square(y_true), axis=-1)
decoder_h = Dense(dense1, activation='tanh')
decoder_mean = Dense(original_dim, activation='tanh')

f_decoded = decoder_f(z)
h_decoded = decoder_h(f_decoded)
x_decoded_mean = decoder_mean(h_decoded)
x_decoded_img = Reshape(original_shape)(x_decoded_mean)

# instantiate VAE model
vae = Model(in_layer, x_decoded_img)

# Compute VAE loss
xent_loss = original_dim * metrics.binary_crossentropy(x, x_decoded_mean)
kl_loss = -0.5 * K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var),
                       axis=-1)
vae_loss = K.mean(0.5 * xent_loss + 0.5 * kl_loss)

vae.add_loss(vae_loss)
vae.compile(optimizer='adam')
vae.summary()

vae.fit(x_train,
        shuffle=True,
        epochs=train_epoch,
        batch_size=batch_size,
        validation_data=(anomaly_test, None))

vae.save('%s = %d %d vae_dense2_model.hdf5' % (var_str, var1, var2))

encoder = Model(in_layer, z_mean)
Exemplo n.º 15
0
 def wasserstein_loss(self, y_true, y_pred):
     return K.mean(y_true * y_pred)