Example #1
0
def triplet_loss(y_true, y_pred, alpha=0.4):
    """
    Implementation of the triplet loss function
    Arguments:
    y_true -- true labels, required when you define a loss in Keras, you don't need it in this function.
    y_pred -- python list containing three objects:
            anchor -- the encodings for the anchor data
            positive -- the encodings for the positive data (similar to anchor)
            negative -- the encodings for the negative data (different from anchor)
    Returns:
    loss -- real number, value of the loss
    """
    print('y_pred.shape = ', y_pred)

    total_lenght = y_pred.shape.as_list()[-1]
    #     print('total_lenght=',  total_lenght)
    #     total_lenght =12

    anchor = y_pred[:, 0:int(total_lenght * 1 / 3)]
    positive = y_pred[:, int(total_lenght * 1 / 3):int(total_lenght * 2 / 3)]
    negative = y_pred[:, int(total_lenght * 2 / 3):int(total_lenght * 3 / 3)]

    # distance between the anchor and the positive
    pos_dist = K.sum(K.square(anchor - positive), axis=1)

    # distance between the anchor and the negative
    neg_dist = K.sum(K.square(anchor - negative), axis=1)

    # compute loss
    basic_loss = pos_dist - neg_dist + alpha
    loss = K.maximum(basic_loss, 0.0)

    return loss
Example #2
0
def correlation_coefficient_loss(y_true, y_pred):
    x = y_true
    y = y_pred
    mx = K.mean(x)
    my = K.mean(y)
    xm, ym = x-mx, y-my
    r_num = K.sum(tf.multiply(xm,ym))
    r_den = K.sqrt(tf.multiply(K.sum(K.square(xm)), K.sum(K.square(ym))))
    r = r_num / r_den

    r = K.maximum(K.minimum(r, 1.0), -1.0)
    return K.square(r)
def weighted_mse(y_pred, y_true):

    majority_weight = 0.9500
    minority_weight = 0.0500
    # Apply the weights
    loss = K.mean(K.square((y_pred - y_true) * (y_true * majority_weight) +
                           (1. - y_true) * minority_weight),
                  axis=-1)

    # Return the mean error
    return loss
    def vae_loss(self, y_true, y_pred):
        """ Calculate loss = reconstruction loss + KL loss for eatch data in minibatch """
        # E[log P(X|z)]
        recon = K.sum(K.sum(K.binary_crossentropy(y_pred, y_true), axis=1))
        recon *= 256
        # D_KL(Q(z|X) || P(z|X)); calculate in closed from as both dist. are Gaussian
        kl = 0.5 * K.sum(
            K.sum(K.exp(self.log_sigma) + K.square(self.mu) - 1. -
                  self.log_sigma,
                  axis=1))
        return recon + kl


#   def sample_z(self,mu,log_sigma):
#     #mu, log_sigma = args
#     eps = K.backend.random_normal(shape=(self.mini_batch_size, self.latent_space_dim), mean=0., stddev=1.)
#     temp=np.array(log_sigma )/ 2.
#     return mu + K.exp(temp) * eps
Example #5
0
def deform_center_cnn(class_num, trainable=False, GPU=1):
    conv_args = {
        'trainable': trainable,
        'kernel_initializer': Orthogonal(gain=1.0, seed=None),
        'kernel_regularizer': OrthLocalReg2D,
        'padding': 'same'
    }

    sep_conv_args = {
        'trainable': trainable,
        'kernel_initializer': Orthogonal(gain=1.0, seed=None),
        'kernel_regularizer': OrthLocalRegSep2D,
        'padding': 'same'
    }

    inputs = l = Input((None, None, 3), name='input')
    input_target = Input((1, ), name='input_target')

    # norm_input = RGB2Gray()(inputs)
    norm_input = ImageNorm()(inputs)
    # norm_input = inputs
    stem_stride = (2, 2)
    # conv11
    l = Conv2D(32, (3, 3), strides=stem_stride, name='conv11',
               **conv_args)(norm_input)
    l = Activation('relu', name='conv11_relu')(l)
    l = BatchNormalization(name='conv11_bn')(l)

    l2 = InvConv2D(32, (3, 3),
                   strides=stem_stride,
                   name='inv_conv11',
                   **conv_args)(norm_input)
    l2 = Activation('relu', name='inv_conv11_relu')(l2)
    l2 = BatchNormalization(name='inv_conv11_bn')(l2)

    l = concatenate([l, l2])

    l5 = SeparableConv2D(64, (5, 5),
                         strides=(2, 2),
                         name='conv5_11_12',
                         **sep_conv_args)(l)
    l5 = Activation('relu', name='conv5_11_12_relu')(l5)
    l5 = BatchNormalization(name='conv5_11_12_bn')(l5)

    l3 = SeparableConv2D(64, (3, 3),
                         strides=(2, 2),
                         name='conv3_11_12',
                         **sep_conv_args)(l)
    l3 = Activation('relu', name='conv3_11_12_relu')(l3)
    l3 = BatchNormalization(name='conv3_11_12_bn')(l3)

    l = concatenate([l3, l5])

    l = SeparableConv2D(128, (3, 3), name='conv12_1', **sep_conv_args)(l)
    l = Activation('relu', name='conv12_1_relu')(l)
    l = BatchNormalization(name='conv12_1_bn')(l)

    l = SeparableConv2D(128, (1, 1), name='conv12_2', **sep_conv_args)(l)
    l = Activation('relu', name='conv12_2_relu')(l)
    l = BatchNormalization(name='conv12_2_bn')(l)

    l = SeparableConv2D(128, (3, 3), name='conv13', **sep_conv_args)(l)
    l = Activation('relu', name='conv13_relu')(l)
    l = BatchNormalization(name='conv13_bn')(l)

    l = SeparableConv2D(256, (3, 3),
                        strides=(2, 2),
                        name='conv14',
                        **sep_conv_args)(l)
    l = Activation('relu', name='conv14_relu')(l)
    l = l14 = BatchNormalization(name='conv14_bn')(l)

    l = SeparableConv2D(256, (3, 3), name='conv21', **sep_conv_args)(l)
    l = Activation('relu', name='conv21_relu')(l)
    l = BatchNormalization(name='conv21_bn')(l)

    l = SeparableConv2D(256, (3, 3), name='conv22', **sep_conv_args)(l)
    l = Activation('relu', name='conv22_relu')(l)
    l = BatchNormalization(name='conv22_bn')(l)

    l = Add(name='residual_14_22')([l14, l])

    # conv22
    # l_offset = ConvOffset2D(192, name='conv32_offset')(l)
    # l = Conv2D(256, (3, 3), strides=(2, 2), name='conv23', **conv_args)(l)
    # l = Activation('relu', name='conv23_relu')(l)
    # l = l23 = BatchNormalization(name='conv23_bn')(l)
    #
    # l = Conv2D(256, (3, 3), name='conv31', **conv_args)(l)
    # l = Activation('relu', name='conv31_relu')(l)
    # l = BatchNormalization(name='conv31_bn')(l)

    # l = Conv2D(256, (1, 1), name='conv32', **conv_args)(l31)
    # l = Activation('relu', name='conv32_relu')(l)
    # l = BatchNormalization(name='conv32_bn')(l)

    # l = Conv2D(256, (3, 3), name='conv33', **conv_args)(l)
    # l = Activation('relu', name='conv33_relu')(l)
    # l = BatchNormalization(name='conv33_bn')(l)
    # l = Add(name='residual_23_33')([l23, l])

    l_offset = ConvOffset2D(256, name='conv33_offset')(l)
    l = SeparableConv2D(512, (3, 3),
                        strides=(2, 2),
                        name='conv41',
                        **sep_conv_args)(l_offset)
    l = Activation('relu', name='conv41_relu')(l)
    l = BatchNormalization(name='conv41_bn')(l)

    l = SeparableConv2D(512, (3, 3), name='conv42', **sep_conv_args)(l)
    l = Activation('relu', name='conv42_relu')(l)
    l = BatchNormalization(name='conv42_bn')(l)

    # l_offset = ConvOffset2D(512, name='conv35_offset')(l)
    l = SeparableConv2D(512, (3, 3), name='conv43', **sep_conv_args)(l)
    l = Activation('relu', name='conv43_relu')(l)
    l = BatchNormalization(name='conv43_bn')(l)

    # l = LocallyConnected2D(512, (3, 3), name='conv44', padding='valid')(l)
    # l = Activation('relu', name='conv44_relu')(l)
    # l = BatchNormalization(name='conv44_bn')(l)

    l = SeparableConv2D(1024, (3, 3),
                        strides=(2, 2),
                        name='conv51',
                        **sep_conv_args)(l)
    l = Activation('relu', name='conv51_relu')(l)
    l = BatchNormalization(name='conv51_bn')(l)

    # l_offset = ConvOffset2D(1024, name='conv35_offset')(l)
    l = SeparableConv2D(1024, (3, 3), name='conv52', **sep_conv_args)(l)
    l = Activation('relu', name='conv52_relu')(l)
    l = BatchNormalization(name='conv52_bn')(l)

    l = SeparableConv2D(1024, (3, 3), name='conv53', **sep_conv_args)(l)
    l = Activation('relu', name='conv53_relu')(l)
    l = BatchNormalization(name='conv53_bn')(l)

    # out
    l = GlobalAvgPool2D(name='avg_pool')(l)
    # l = MaxPooling2D(name='max_pool_final')(l)
    # l = Flatten(name='flatten_maxpool')(l)

    l = Dense(512, name='fc1', trainable=trainable)(l)
    l = Activation('relu', name='fc1_relu')(l)

    l = Dense(256, name='fc2', trainable=trainable)(l)
    l = feature = Activation('relu', name='fc2_relu')(l)

    l = Dense(class_num, name='fc3', trainable=trainable)(l)
    outputs = l = Activation('softmax', name='out')(l)

    if GPU == 1:
        centers = Embedding(class_num, 256)(input_target)
        l2_loss = Lambda(
            lambda x: K.sum(K.square(x[0] - x[1][:, 0]), 1, keepdims=True),
            name='l2_loss')([feature, centers])
        Model(inputs=[inputs, input_target], outputs=[outputs, l2_loss])
    elif GPU == 0:
        return inputs, outputs
    else:
        BODY = Model(inputs=[inputs, input_target], outputs=[outputs, feature])
        BODY = make_parallel(BODY, GPU)
        softmax_output = Lambda(lambda x: x, name='output')(BODY.outputs[0])

        centers = Embedding(class_num, 256)(input_target)
        l2_loss = Lambda(
            lambda x: K.sum(K.square(x[0] - x[1][:, 0]), 1, keepdims=True),
            name='l2_loss')([BODY.outputs[1], centers])
        model_withcneter = Model(inputs=BODY.inputs,
                                 outputs=[softmax_output, l2_loss])
        return model_withcneter
  def act(state) #roll the dice for a move
    if random.random() < epsilon
      return random action
    else
      return optimal action # action = argmax Q(s, a')
  
  def observe()
    observe that reward
  
  def remember()
  
  def learn() #learning fun

  	def loss(target, prediction)
    		err = prediction - target
    		loss =  keras.mean(keras.sqrt(1 + keras.square(error)) - 1)
    		return (loss)

  	def _build_model(self):
     		model = Sequential()
     		model.add(Dense(24, input_dim=self.state_size, activation='relu'))
    	 	model.add(Dense(24, activation='relu'))
     		model.add(Dense(self.action_size, activation='linear'))
     		model.compile(loss=self._huber_loss, optimizer=Adam(lr=self.learning_rate))
     		return model

	def load(self, name):
		self.model.load_weights(name)

	def save(self, name):
		self.model.save_weights(name)
Example #7
0
def get_large_deform_cnn2(class_num, trainable=False, GPU=1):
    # init = Orthogonal(gain=1.0, seed=None)
    init = 'random_normal'

    inputs = l = Input((200, 200, 3), name='input')
    input_target = Input((1, ), name='input_target')

    #norm_input = ImageNorm()(inputs)
    norm_input = inputs

    # conv11
    l = Conv2D(32, (3, 3),
               padding='same',
               name='conv11',
               trainable=trainable,
               kernel_initializer=init,
               kernel_regularizer=OrthLocalReg2D)(norm_input)
    l = Activation('relu', name='conv11_relu')(l)
    l = BatchNormalization(name='conv11_bn')(l)

    l2 = InvConv2D(32, (3, 3),
                   padding='same',
                   name='inv_conv11',
                   trainable=trainable,
                   kernel_initializer=init,
                   kernel_regularizer=OrthLocalReg2D)(norm_input)
    l2 = Activation('relu', name='inv_conv11_relu')(l2)
    l2 = BatchNormalization(name='inv_conv11_bn')(l2)

    l3 = Conv2D(32, (3, 1),
                padding='same',
                name='conv11_2',
                trainable=trainable,
                kernel_initializer=init,
                kernel_regularizer=OrthLocalReg2D)(norm_input)
    l3 = Activation('relu', name='conv11_2_relu')(l3)
    l3 = BatchNormalization(name='conv11_2_bn')(l3)

    l5 = Conv2D(32, (1, 3),
                padding='same',
                name='conv11_3',
                trainable=trainable,
                kernel_initializer=init,
                kernel_regularizer=OrthLocalReg2D)(norm_input)
    l5 = Activation('relu', name='conv11_3_relu')(l5)
    l5 = BatchNormalization(name='conv11_3_bn')(l5)

    l4 = InvConv2D(32, (3, 1),
                   padding='same',
                   name='conv11_2i',
                   trainable=trainable,
                   kernel_initializer=init,
                   kernel_regularizer=OrthLocalReg2D)(norm_input)
    l4 = Activation('relu', name='conv11_2i_relu')(l4)
    l4 = BatchNormalization(name='conv11_2i_bn')(l4)

    l6 = InvConv2D(32, (1, 3),
                   padding='same',
                   name='conv11_3i',
                   trainable=trainable,
                   kernel_initializer=init,
                   kernel_regularizer=OrthLocalReg2D)(norm_input)
    l6 = Activation('relu', name='conv11_3i_relu')(l6)
    l6 = BatchNormalization(name='conv11_3i_bn')(l6)

    l = concatenate([l, l2, l3, l5, l4, l6])

    # conv12
    # l_offset = ConvOffset2D(32, name='conv12_offset')(l)

    l5 = Conv2D(128, (5, 5),
                padding='same',
                strides=(2, 2),
                name='pool5_11_12',
                trainable=trainable,
                kernel_initializer=init,
                kernel_regularizer=OrthLocalReg2D)(l)
    l5 = Activation('relu', name='pool5_11_12_relu')(l5)
    l5 = BatchNormalization(name='pool5_11_12_bn')(l5)

    l3 = Conv2D(128, (3, 3),
                padding='same',
                strides=(2, 2),
                name='pool3_11_12',
                trainable=trainable,
                kernel_initializer=init,
                kernel_regularizer=OrthLocalReg2D)(l)
    l3 = Activation('relu', name='pool3_11_12_relu')(l3)
    l3 = BatchNormalization(name='pool3_11_12_bn')(l3)

    l = concatenate([l3, l5])

    l3 = Conv2D(32, (5, 3),
                padding='same',
                name='conv12_2',
                trainable=trainable,
                kernel_initializer=init,
                kernel_regularizer=OrthLocalReg2D)(l)
    l3 = Activation('relu', name='conv12_2_relu')(l3)
    l3 = BatchNormalization(name='conv12_2_bn')(l3)

    l5 = Conv2D(32, (3, 5),
                padding='same',
                name='conv12_3',
                trainable=trainable,
                kernel_initializer=init,
                kernel_regularizer=OrthLocalReg2D)(l)
    l5 = Activation('relu', name='conv12_3_relu')(l5)
    l5 = BatchNormalization(name='conv12_3_bn')(l5)

    l = Conv2D(128, (3, 3),
               padding='same',
               name='conv12',
               trainable=trainable,
               kernel_initializer=init,
               kernel_regularizer=OrthLocalReg2D)(l)
    l = Activation('relu', name='conv12_relu')(l)
    l = BatchNormalization(name='conv12_bn')(l)

    l = concatenate([l, l3, l5])

    l = Conv2D(128, (3, 3),
               padding='same',
               name='conv13',
               trainable=trainable,
               kernel_initializer=init,
               kernel_regularizer=OrthLocalReg2D)(l)
    l = Activation('relu', name='conv13_relu')(l)
    l = jump = BatchNormalization(name='conv13_bn')(l)

    l = Conv2D(192, (3, 3),
               padding='same',
               strides=(2, 2),
               name='conv14',
               trainable=trainable,
               kernel_initializer=init,
               kernel_regularizer=OrthLocalReg2D)(l)
    l = Activation('relu', name='conv14_relu')(l)
    # l = BatchNormalization(name='conv14_bn')(l)

    l = Conv2D(192, (3, 3),
               padding='same',
               name='conv21',
               trainable=trainable,
               kernel_initializer=init,
               kernel_regularizer=OrthLocalReg2D)(l)
    l = Activation('relu', name='conv21_relu')(l)
    l = BatchNormalization(name='conv21_bn')(l)

    l = Conv2D(192, (3, 3),
               padding='same',
               name='conv22',
               trainable=trainable,
               kernel_initializer=init,
               kernel_regularizer=OrthLocalReg2D)(l)
    l = Activation('relu', name='conv22_relu')(l)
    l = BatchNormalization(name='conv22_bn')(l)

    # conv22
    # l_offset = ConvOffset2D(192, name='conv32_offset')(l)
    l = Conv2D(256, (3, 3),
               padding='same',
               strides=(2, 2),
               name='conv23',
               trainable=trainable,
               kernel_initializer=init,
               kernel_regularizer=OrthLocalReg2D)(l)
    l = Activation('relu', name='conv23_relu')(l)
    l = BatchNormalization(name='conv23_bn')(l)

    l = Conv2D(256, (3, 3),
               padding='same',
               name='conv31',
               trainable=trainable,
               kernel_initializer=init,
               kernel_regularizer=OrthLocalReg2D)(l)
    l = Activation('relu', name='conv31_relu')(l)
    l31 = BatchNormalization(name='conv31_bn')(l)

    # l = Conv2D(256, (1, 1), padding='same', name='conv32', trainable=trainable, kernel_initializer=init, kernel_regularizer=OrthLocalReg2D)(l31)
    # l = Activation('relu', name='conv32_relu')(l)
    # l = BatchNormalization(name='conv32_bn')(l)

    l = Conv2D(256, (3, 3),
               padding='same',
               name='conv33',
               trainable=trainable,
               kernel_initializer=init,
               kernel_regularizer=OrthLocalReg2D)(l)
    l = Activation('relu', name='conv33_relu')(l)
    l = BatchNormalization(name='conv33_bn')(l)
    l = Add(name='residual_31_32')([l31, l])

    lj = Conv2D(256, (3, 3),
                padding='same',
                strides=(2, 2),
                name='jump_pool',
                trainable=trainable,
                kernel_initializer=init,
                kernel_regularizer=OrthLocalReg2D)(jump)
    lj = Activation('relu', name='jump_pool_relu')(lj)
    lj = BatchNormalization(name='jump_pool_bn')(lj)

    lj = Conv2D(256, (3, 3),
                padding='same',
                strides=(2, 2),
                name='jump_pool2',
                trainable=trainable,
                kernel_initializer=init,
                kernel_regularizer=OrthLocalReg2D)(lj)
    lj = Activation('relu', name='jump_pool2_relu')(lj)
    lj = BatchNormalization(name='jump_pool2_bn')(lj)

    l = concatenate([l, lj])

    l_offset = ConvOffset2D(512, name='conv33_offset')(l)
    l = Conv2D(512, (3, 3),
               padding='same',
               strides=(2, 2),
               name='conv41',
               trainable=trainable,
               kernel_initializer=init,
               kernel_regularizer=OrthLocalReg2D)(l_offset)
    l = Activation('relu', name='conv41_relu')(l)
    l = BatchNormalization(name='conv41_bn')(l)

    l = Conv2D(512, (3, 3),
               padding='same',
               name='conv42',
               trainable=trainable,
               kernel_initializer=init,
               kernel_regularizer=OrthLocalReg2D)(l)
    l = Activation('relu', name='conv42_relu')(l)
    l = BatchNormalization(name='conv42_bn')(l)

    # l_offset = ConvOffset2D(512, name='conv35_offset')(l)
    l = Conv2D(512, (3, 3),
               padding='same',
               name='conv43',
               trainable=trainable,
               kernel_initializer=init,
               kernel_regularizer=OrthLocalReg2D)(l)
    l = Activation('relu', name='conv43_relu')(l)
    l = BatchNormalization(name='conv43_bn')(l)

    # out
    # l = GlobalAvgPool2D(name='avg_pool')(l)
    l = MaxPooling2D(name='max_pool_final')(l)
    l = Flatten(name='flatten_maxpool')(l)
    l = Dense(768,
              name='fc1',
              trainable=trainable,
              kernel_initializer=init,
              kernel_regularizer=OrthLocalReg1D)(l)
    l = Activation('relu', name='fc1_relu')(l)

    l = feature = Dense(256,
                        name='fc2',
                        trainable=trainable,
                        kernel_initializer=init)(l)
    l = Activation('relu', name='fc2_relu')(l)

    l = Dense(class_num, name='fc3', trainable=trainable)(l)
    outputs = l = Activation('softmax', name='out')(l)

    if GPU == 1:
        centers = Embedding(class_num, 256)(input_target)
        l2_loss = Lambda(
            lambda x: K.sum(K.square(x[0] - x[1][:, 0]), 1, keepdims=True),
            name='l2_loss')([feature, centers])
        Model(inputs=[inputs, input_target], outputs=[outputs, l2_loss])
    elif GPU == 0:
        return inputs, outputs
    else:
        BODY = Model(inputs=[inputs, input_target], outputs=[outputs, feature])
        BODY = make_parallel(BODY, GPU)
        softmax_output = Lambda(lambda x: x, name='output')(BODY.outputs[0])

        centers = Embedding(class_num, 256)(input_target)
        l2_loss = Lambda(
            lambda x: K.sum(K.square(x[0] - x[1][:, 0]), 1, keepdims=True),
            name='l2_loss')([BODY.outputs[1], centers])
        model_withcneter = Model(inputs=BODY.inputs,
                                 outputs=[softmax_output, l2_loss])
        return model_withcneter
Example #8
0
def root_mean_squared_error(y_true, y_pred):
    return K.sqrt(K.mean(K.square(y_pred - y_true), axis=-1))
Example #9
0
def rmse(y_true, y_pred):
    return K.sqrt(K.mean(K.square(y_pred - y_true)))
Example #10
0
 def custom_loss(y_true,  y_pred): 
     return keras.mean(keras.square(func_g(X[-1])-y_pred))
Example #11
0
def loss(y_true, y_pred):
        
        return k.mean(k.square(y_pred - y_true) - k.square(y_true), axis=-1)
# we instantiate these layers separately so as to reuse them later
decoder_f = Dense(dense2, activation='tanh')
decoder_h = Dense(dense1, activation='tanh')
decoder_mean = Dense(original_dim, activation='tanh')

f_decoded = decoder_f(z)
h_decoded = decoder_h(f_decoded)
x_decoded_mean = decoder_mean(h_decoded)
x_decoded_img = Reshape(original_shape)(x_decoded_mean)

# instantiate VAE model
vae = Model(in_layer, x_decoded_img)

# Compute VAE loss
xent_loss = original_dim * metrics.binary_crossentropy(x, x_decoded_mean)
kl_loss = -0.5 * K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var),
                       axis=-1)
vae_loss = K.mean(0.5 * xent_loss + 0.5 * kl_loss)

vae.add_loss(vae_loss)
vae.compile(optimizer='adam')
vae.summary()

vae.fit(x_train,
        shuffle=True,
        epochs=train_epoch,
        batch_size=batch_size,
        validation_data=(anomaly_test, None))

vae.save('%s = %d %d vae_dense2_model.hdf5' % (var_str, var1, var2))