Пример #1
0
def primary_loss(y_true, y_pred):
    # 3 separate loss calculations based on if note is played or not
    played = y_true[:, :, :, 0]
    bce_note = losses.binary_crossentropy(y_true[:, :, :, 0], y_pred[:, :, :, 0])
    bce_replay = losses.binary_crossentropy(y_true[:, :, :, 1], tf.multiply(played, y_pred[:, :, :, 1]) + tf.multiply(1 - played, y_true[:, :, :, 1]))
    mse = losses.mean_squared_error(y_true[:, :, :, 2], tf.multiply(played, y_pred[:, :, :, 2]) + tf.multiply(1 - played, y_true[:, :, :, 2]))
    return bce_note + bce_replay + mse
Пример #2
0
def bce_logdice_loss(y_true, y_pred):
    return binary_crossentropy(y_true, y_pred) - K.log(1. - dice_loss(y_true, y_pred))
Пример #3
0
 def loss(y_true, y_pred):
     return binary_crossentropy(y_true,
                                y_pred,
                                from_logits=False,
                                label_smoothing=1e-5)
Пример #4
0
 def cost_cls(y_true, y_pred):
     return losses.binary_crossentropy(y_true, y_pred)
Пример #5
0
    def compile_model(self, data_to_plot_x):
        # network parameters
        input_shape = (self.number_of_data_to_extract, )

        # Convolutional VAE

        # ENCODER
        input_shape = (self.number_of_data_to_extract,
                       self.range_of_notes_to_extract, 1)  # datasize
        inputs = Input(shape=input_shape, name='encoder_input')
        x = inputs
        for i in range(2):
            self.filters *= 2
            x = Conv2D(filters=self.filters,
                       kernel_size=self.kernel_size,
                       activation='relu',
                       strides=2,
                       padding='same')(x)

        # shape info needed to build decoder model
        shape = K.int_shape(x)

        # generate latent vector Q(z|X)
        x = Flatten()(x)
        x = Dense(16, activation='relu')(x)
        z_mean = Dense(self.latent_dim, name='z_mean')(x)
        z_log_var = Dense(self.latent_dim, name='z_log_var')(x)

        # use reparameterization trick to push the sampling out as input
        # note that "output_shape" isn't necessary with the TensorFlow backend
        z = Lambda(self.sampling, output_shape=(self.latent_dim, ),
                   name='z')([z_mean, z_log_var])

        # instantiate encoder model
        encoder = Model(inputs, [z_mean, z_log_var, z], name='encoder')
        encoder.summary()

        # DECODER
        latent_inputs = Input(shape=(self.latent_dim, ), name='z_sampling')
        x = Dense(shape[1] * shape[2] * shape[3],
                  activation='relu')(latent_inputs)
        x = Reshape((shape[1], shape[2], shape[3]))(x)

        # use Conv2DTranspose to reverse the conv layers from the encoder
        for i in range(2):
            x = Conv2DTranspose(filters=self.filters,
                                kernel_size=self.kernel_size,
                                activation='relu',
                                strides=2,
                                padding='same')(x)
            self.filters //= 2

        outputs = Conv2DTranspose(filters=1,
                                  kernel_size=self.kernel_size,
                                  activation='sigmoid',
                                  padding='same',
                                  name='decoder_output')(x)

        # instantiate decoder model
        decoder = Model(latent_inputs, outputs, name='decoder')
        decoder.summary()

        # Building the VAE
        outputs = decoder(encoder(inputs)[2])
        vae = Model(inputs, outputs, name='vae')

        # LOSS
        use_mse = True
        if use_mse:
            reconstruction_loss = mse(K.flatten(inputs), K.flatten(outputs))
        else:
            reconstruction_loss = binary_crossentropy(K.flatten(inputs),
                                                      K.flatten(outputs))

        reconstruction_loss *= self.range_of_notes_to_extract * self.number_of_data_to_extract
        kl_loss = 1 + z_log_var - K.square(z_mean) - K.exp(z_log_var)
        kl_loss = K.sum(kl_loss, axis=-1)
        kl_loss *= -0.5
        vae_loss = K.mean(reconstruction_loss + kl_loss)
        vae.add_loss(vae_loss)

        # Compile the VAE
        vae.compile(optimizer='rmsprop')
        vae.summary()
        return vae, encoder, decoder
def custom_loss(y_true, y_pred):
    loss1 = binary_crossentropy(y_true, y_pred)
    loss2 = mean_iou(y_true, y_pred)
    a1 = 1
    a2 = 0
    return a1 * loss1 + a2 * K.log(loss2)
Пример #7
0
def get_loss_binary(x, binary):
    from keras.losses import binary_crossentropy
    return binary_crossentropy(x, binary)
Пример #8
0
decoder = Model(latent_inputs, output_img, name='decoder')
# decoder.summary()

# Instantiate VAE model
#These two seem to be equivalent
# print(decoder(encoder(inputs)[2]))
# print(output_img)

outputs = decoder(encoder(inputs)[2]) 
vae = Model(inputs, outputs, name='vae_mlp')



models = (encoder, decoder)
data = (x_test, y_test)
reconstruction_loss = binary_crossentropy(inputs, outputs)

reconstruction_loss *= original_dim
kl_loss = 1 + z_log_var - K.square(z_mean) - K.exp(z_log_var)
kl_loss = K.sum(kl_loss, axis=-1)
kl_loss *= -0.5

vae_loss = K.mean(reconstruction_loss + kl_loss)
vae.add_loss(vae_loss)
vae.compile(optimizer='adam')
vae.summary()

#train the autoencoder
vae.fit(x_train,
		epochs=epochs,
		batch_size=batch_size,
Пример #9
0
def dice_p_bce(in_gt, in_pred):
    return 1e-3*binary_crossentropy(in_gt, in_pred) - dice_coef(in_gt, in_pred)
Пример #10
0
 def bce_tversky_loss_fixed(y_true, y_pred):
     return bce_weight * binary_crossentropy(
         y_true, y_pred) + tversky_weight * tversky_loss(y_true, y_pred)
Пример #11
0
 def bce_jaccardlog_loss_fixed(y_true, y_pred):
     return bce_weight * binary_crossentropy(
         y_true, y_pred) + jaccardlog_weight * jaccard_coef_logloss(
             y_true, y_pred)
Пример #12
0
 def bce_dice_loss_fixed(y_true, y_pred):
     return bce_weight * binary_crossentropy(
         y_true, y_pred) + dice_weight * dice_loss(y_true, y_pred)
Пример #13
0
def vae_loss(x, x_decoded_mean):
    xent_loss = binary_crossentropy(x, x_decoded_mean)  # remove original_dim
    kl_loss = -0.5 * K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var),
                           axis=-1)
    return K.mean(xent_loss + kl_loss)
Пример #14
0
 def vae_loss(x, outputs):
     reconstruction_loss = binary_crossentropy(x, outputs)# * num_features
     kl_loss = 1 + z_log_var - K.square(z_mean) - K.exp(z_log_var)
     kl_loss = K.sum(kl_loss, axis=-1) * -0.5
     return K.mean(reconstruction_loss + kl_loss)
def bce_dice_loss(y_true, y_pred):
    return 0.5 * binary_crossentropy(y_true, y_pred) - dice_coef(y_true, y_pred)
def bce_dice_loss(y_true, y_pred):
    return binary_crossentropy(y_true, y_pred) + dice_loss(y_true, y_pred)
Пример #17
0
def binary_crossentropy_with_false_negatives(Y_true, Y_pred, a=1.0):
    return binary_crossentropy(Y_true, Y_pred) + a * false_negatives(Y_true, Y_pred)
Пример #18
0
 def loss_fn(self, x, labels):
     pred, emb = self.get_pred_and_emb(x)
     # Minimize error using cross entropy
     self.loss = tf.reduce_mean(binary_crossentropy(labels, pred))
     return self.loss, emb
    def _prepare(self):
        super()._prepare()

        # network parameters
        input_shape = (self._original_dim, )

        # VAE model = encoder + decoder
        with self._graph.as_default():

            #
            # (1) build encoder model
            #
            self._inputs = Input(shape=input_shape, name='encoder_input')
            print("intput_shape:", input_shape,
                  "intermediate_dim:", self._intermediate_dim)
            print("intputs:", self._inputs)
            x = Dense(self._intermediate_dim, activation='relu')(self._inputs)
            self._z_mean = Dense(self._latent_dim, name='z_mean')(x)
            self._z_log_var = Dense(self._latent_dim, name='z_log_var')(x)

            # Use reparameterization trick to push the sampling out as
            # input (note that "output_shape" isn't necessary with the
            # TensorFlow backend)
            self._z = Lambda(self._sampling, output_shape=(self._latent_dim,),
                             name='z')([self._z_mean, self._z_log_var])

            # instantiate encoder model. It provides two outputs:
            #  - (z_mean, z_log_var): a pair describing the mean and (log)
            #    variance of the code variable z (for input x)
            #  - z: a value sampled from that distribution
            self._encoder = Model(self._inputs,
                                  [self._z_mean, self._z_log_var, self._z],
                                  name='encoder')
            self._encoder.summary(print_fn=self._print_fn)
            # plot_model requires pydot 
            #plot_model(self._encoder, to_file='vae_mlp_encoder.png',
            #           show_shapes=True)

            #
            # (2) build decoder model
            #
            latent_inputs = Input(shape=(self._latent_dim,), name='z_sampling')
            x = Dense(self._intermediate_dim, activation='relu')(latent_inputs)
            self._outputs = Dense(self._original_dim, activation='sigmoid')(x)

            # instantiate decoder model
            self._decoder = Model(latent_inputs, self._outputs, name='decoder')
            self._decoder.summary(print_fn=self._print_fn)
            # plot_model require pydot installed
            #plot_model(self._decoder, to_file='vae_mlp_decoder.png', show_shapes=True)

            #
            # (3) define the loss function
            #
            self._outputs = self._decoder(self._encoder(self._inputs)[2])
            if self._loss == 'mse':
                reconstruction_loss = mse(self._inputs, self._outputs)
            else:
                reconstruction_loss = binary_crossentropy(self._inputs,
                                                          self._outputs)
            # VAE loss = mse_loss or xent_loss + kl_loss
            reconstruction_loss *= self._original_dim
            kl_loss = (1 + self._z_log_var -
                       K.square(self._z_mean) - K.exp(self._z_log_var))
            kl_loss = K.sum(kl_loss, axis=-1)
            kl_loss *= -0.5
            vae_loss = K.mean(reconstruction_loss + kl_loss)

            #
            # (4) instantiate VAE model
            #
            self._vae = Model(self._inputs, self._outputs, name='vae_mlp')
            self._vae.add_loss(vae_loss)
            self._vae.compile(optimizer='adam')
            self._vae.summary(print_fn=self._print_fn)
        self._model = self._vae
Пример #20
0
def dice_cross_loss(y_true, y_pred):
    return 0.9 * binary_crossentropy(y_true, y_pred) + 0.1 * dice_coef_loss(
        y_true, y_pred)
Пример #21
0
def iou_bce_loss(y_true, y_pred):
    return binary_crossentropy(y_true[:, 77:-78, 77:-78],
                               y_pred[:, 77:-78,
                                      77:-78]) + 3 * iou_loss(y_true, y_pred)
Пример #22
0
 def vae_loss(X, X_decoded_mean):
     return original_dim * losses.binary_crossentropy(X, X_decoded_mean)
Пример #23
0
def competition_metric_loss(true, pred):
    return (1 - competition_metric(true, pred)) + binary_crossentropy(
        true[:, 77:-78, 77:-78], pred[:, 77:-78, 77:-78])
Пример #24
0
    data = (x_test, y_test)

    if args.beta is None or args.beta < 1.0:
        beta = 1.0
        print("CVAE")
        model_name = "cvae_cnn_mnist"
    else:
        beta = args.beta
        print("Beta-CVAE with beta=", beta)
        model_name = "beta-cvae_cnn_mnist"

    # VAE loss = mse_loss or xent_loss + kl_loss
    if args.mse:
        reconstruction_loss = mse(K.flatten(inputs), K.flatten(outputs))
    else:
        reconstruction_loss = binary_crossentropy(K.flatten(inputs),
                                                  K.flatten(outputs))

    reconstruction_loss *= image_size * image_size
    kl_loss = 1 + z_log_var - K.square(z_mean) - K.exp(z_log_var)
    kl_loss = K.sum(kl_loss, axis=-1)
    kl_loss *= -0.5 * beta
    cvae_loss = K.mean(reconstruction_loss + kl_loss)
    cvae.add_loss(cvae_loss)
    cvae.compile(optimizer='rmsprop')
    cvae.summary()
    # cplot_model(cvae, to_file='cvae_cnn.png', show_shapes=True)

    if args.weights:
        cvae = cvae.load_weights(args.weights)
    else:
        # train the autoencoder
Пример #25
0
def bce_dice_loss(y_true, y_pred):
    return binary_crossentropy(
        y_true[:, 77:-78, 77:-78],
        y_pred[:, 77:-78, 77:-78]) + 0.3 * dice_loss(y_true, y_pred)
Пример #26
0
def _BCE_Dice_Loss(y_true, y_pred):
    loss = binary_crossentropy(y_true, y_pred) + _dice_coef_loss(
        y_true, y_pred)
    return loss
        memory.append((state, action, reward, next_state, done))
        state = next_state
        if done:
            print('Game {}, reward {}, Epsilon {:.2}'.format(game, total_reward, epsilon))
            break
        if iteration >= 50000 and iteration % 4 == 0:
            data = memory.sample()
            train(data)
    game_rewards.append(total_reward)
    if total_reward > max_reward:
        max_reward = total_reward
    if (game + 1) % 10 == 0:  # Save every 10 games
        saver.save(sess, checkpoint_path)
    print('Last 30 games average reward is {:.4}. Max reward is {}'.format(last_n_reward_average(30, game_rewards), max_reward))


# Test
from keras.losses import categorical_crossentropy, binary_crossentropy

def my_loss():
    return lambda y_true, y_pred: y_pred

import tensorflow as tf
import numpy as np
y_true = tf.Variable([[[1.,2.],[3.,4.]]])
y_pred= tf.Variable([[[1.,2.],[3.,4.]]])
loss = binary_crossentropy(y_true, y_pred)
sess = tf.InteractiveSession()
sess.run(tf.initialize_all_variables())
loss_val = sess.run(loss)
Пример #28
0
def test(test=True, dataset_name='blade', label=2, data_index=10, _class='abnormal', metric='nrmse'):
    '''
	if using MNIST dataset, you can randomly set a label as "normal" class and others as "abnormal"
	a metric has to be determined to present the reconstruction loss, also known as "anomaly score"
    '''
    assert metric in ['binary_cross_entropy', 'structral_similarity', 'nrmse']
    
    model_file = "{model}/model_{dataset_name}.hdf5".format(
            model='./model', dataset_name=dataset_name
            )
    # load model file
    if not os.path.exists(model_file):
        raise Exception("{} model not found".format(machine_type))
    model = load_model(model_file)
    
    if dataset_name == 'mnist':
        assert label in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
        (X_train, y_train), (_, _) = mnist.load_data()
        X_train = X_train / 255
        specific_idx = np.where(y_train == label)[0]
        if data_index >= len(X_train):
            data_index = 0
        data = X_train[specific_idx].reshape(-1, 28, 28, 1)[data_index: data_index+1]
        test_data = resize(data, (1, 256, 256, 1))
    elif dataset_name == 'blade':
        if test is True:
            assert _class in ['normal', 'abnormal', 'validation', 'evaluation']
            allFiles = glob.glob('../../data/test/{}'.format(_class) + '/*.wav')
        else:
            allFiles = glob.glob('../../data/train' + '/*.wav')
        f = allFiles[data_index: data_index+1][0]
        wav, sr = librosa.load(f, sr=None)
        wn = [2 * 1000.0 / sr, 0.99]
        b, a = signal.butter(8, wn, 'bandpass')
        wav = signal.filtfilt(b, a, wav)
        stft = np.abs(signal.stft(wav, fs=sr, window='hanning', nperseg=512, noverlap=256)[2])
        pca_sk = PCA(n_components=256)
        stft = pca_sk.fit_transform(stft[:-1, :])
        db = librosa.amplitude_to_db(stft, ref=np.min)
        normed_db = db / np.max(db)
        test_data = normed_db.reshape(1, 256, 256, 1)

    model_predicts = model.predict(test_data)
#     print(model_predicts.shape)
    
#     fig = plt.figure(figsize=(8, 8))
#     columns = 1
#     rows = 2
#     fig.add_subplot(rows, columns, 1)
    input_image = test_data.reshape((256, 256))
    reconstructed_image = model_predicts.reshape((256, 256))
#     plt.title('Input')
#     plt.imshow(input_image, label='Input')
#     fig.add_subplot(rows, columns, 2)
#     plt.title('Reconstruction')
#     plt.imshow(reconstructed_image, label='Reconstructed')
#     plt.show()
    # Compute the mean binary_crossentropy loss of reconstructed image.
    y_true = K.variable(input_image)
    y_pred = K.variable(reconstructed_image)
    if metric == 'binary_cross_entropy':
        error = K.eval(binary_crossentropy(y_true, y_pred)).mean()
    elif metric == 'structral_similarity':
        error = 1 - skimage.metrics.structural_similarity(input_image, reconstructed_image)
    elif metric == 'nrmse':
        error = np.sqrt(mean_squared_error(input_image, reconstructed_image)) / np.sqrt(np.mean(input_image**2))
    print('Reconstruction loss:', error)
    return error
 def bce_dice_loss(self, y_true, y_pred):
     loss = binary_crossentropy(y_true, y_pred) + \
            self.dice_loss(y_true, y_pred)
     return loss / 2.0
Пример #30
0
if __name__ == '__main__':
    # show usage of python script
    parser = argparse.ArgumentParser()
    help_ = "Load h5 model trained weights"
    parser.add_argument("-w", "--weights", help=help_)
    help_ = "Use mse loss instead of binary cross entropy (default)"
    parser.add_argument("-m", "--mse", help=help_, action='store_true')
    args = parser.parse_args()
    models = (encoder, decoder)
    #data = (x_test, y_test)

    # VAE loss = mse_loss or xent_loss + kl_loss
    if args.mse:
        reconstruction_loss = mse(inputs, outputs)
    else:
        reconstruction_loss = binary_crossentropy(inputs, outputs)

    reconstruction_loss *= original_dim
    kl_loss = 1 + z_log_var - K.square(z_mean) - K.exp(z_log_var)
    kl_loss = K.sum(kl_loss, axis=-1)
    kl_loss *= -0.5
    vae_loss = K.mean(reconstruction_loss + kl_loss)
    vae.add_loss(vae_loss)
    vae.compile(optimizer='adam')
    vae.summary()
    plot_model(vae, to_file='vae_mlp.png', show_shapes=True)

    if args.weights:
        vae.load_weights(args.weights)
        z_mean, _, _ = encoder.predict(x_train, batch_size=batch_size)
        np.save('latent', z_mean)
Пример #31
0
 def eegan_loss(y_true, y_pred):
     prior = binary_crossentropy(y_true, y_pred)
     reconstruction = 0.005 * mean_squared_error(z, z_prime)
     return prior + reconstruction
Пример #32
0
def bcdl_loss(y_true, y_pred):
    return losses.binary_crossentropy(y_true, y_pred) + dice_loss(
        y_true, y_pred)
Пример #33
0
def construct_vae(image_size, kernel_size, latent_dim):
    # network parameters
    input_shape = (image_size[0], image_size[1], 1)

    # VAE model = encoder + decoder
    # build encoder model
    inputs = Input(shape=input_shape, name='encoder_input')
    x = inputs
    x = Conv2D(filters=16,
               kernel_size=kernel_size,
               activation='relu',
               strides=1,
               padding='same')(x)
    x = Conv2D(filters=32,
               kernel_size=kernel_size,
               activation='relu',
               strides=2,
               padding='same')(x)
    x = Conv2D(filters=64,
               kernel_size=kernel_size,
               activation='relu',
               strides=1,
               padding='same')(x)

    # shape info needed to build decoder model
    shape = K.int_shape(x)

    # generate latent vector Q(z|X)
    x = Flatten()(x)
    x = Dense(16, activation='relu')(x)
    z_mean = Dense(latent_dim, name='z_mean')(x)
    z_log_var = Dense(latent_dim, name='z_log_var')(x)

    # use reparameterization trick to push the sampling out as input
    # note that "output_shape" isn't necessary with the TensorFlow backend
    z = Lambda(sampling, output_shape=(latent_dim, ),
               name='z')([z_mean, z_log_var])

    # instantiate encoder model
    encoder = Model(inputs, [z_mean, z_log_var, z], name='encoder')
    encoder.summary()
    plot_model(encoder, to_file='vae_cnn_encoder.png', show_shapes=True)

    # build decoder model
    latent_inputs = Input(shape=(latent_dim, ), name='z_sampling')
    x = Dense(shape[1] * shape[2] * shape[3], activation='relu')(latent_inputs)
    x = Reshape((shape[1], shape[2], shape[3]))(x)

    x = Conv2DTranspose(filters=64,
                        kernel_size=kernel_size,
                        activation='relu',
                        strides=1,
                        padding='same')(x)
    x = Conv2DTranspose(filters=32,
                        kernel_size=kernel_size,
                        activation='relu',
                        strides=2,
                        padding='same')(x)
    x = Conv2DTranspose(filters=16,
                        kernel_size=kernel_size,
                        activation='relu',
                        strides=1,
                        padding='same')(x)

    outputs = Conv2DTranspose(filters=1,
                              kernel_size=kernel_size,
                              activation='sigmoid',
                              padding='same',
                              name='decoder_output')(x)

    # instantiate decoder model
    decoder = Model(latent_inputs, outputs, name='decoder')
    decoder.summary()
    plot_model(decoder, to_file='vae_cnn_decoder.png', show_shapes=True)

    # instantiate VAE model
    outputs = decoder(encoder(inputs)[2])
    vae = Model(inputs, outputs, name='vae')

    # VAE loss = mse_loss or xent_loss + kl_loss
    reconstruction_loss = binary_crossentropy(K.flatten(inputs),
                                              K.flatten(outputs))

    reconstruction_loss *= image_size[0] * image_size[1]
    kl_loss = 1 + z_log_var - K.square(z_mean) - K.exp(z_log_var)
    kl_loss = K.sum(kl_loss, axis=-1)
    kl_loss *= -0.5
    vae_loss = K.mean(reconstruction_loss + kl_loss)
    vae.add_loss(vae_loss)
    vae.compile(optimizer='rmsprop')
    vae.summary()
    plot_model(vae, to_file='vae_cnn.png', show_shapes=True)

    return vae, encoder, decoder
Пример #34
0
def dice_p_bce(in_gt, in_pred):
    return 1e-3*binary_crossentropy(in_gt, in_pred) - dice_coef(in_gt, in_pred)
Пример #35
0
def bce_jaccard_loss(gt, pr, bce_weight=1., smooth=SMOOTH, per_image=True):
    bce = K.mean(binary_crossentropy(gt, pr))
    loss = bce_weight * bce + jaccard_loss(
        gt, pr, smooth=smooth, per_image=per_image)
    return loss
Пример #36
0
                          activation='sigmoid',
                          padding='same',
                          name='decoder_output')(b1)

# instantiate decoder model
decoder = Model(latent_inputs, outputs, name='decoder')

# instantiate VAE model
outputs = decoder(encoder(inputs)[2])
vae = Model(inputs, outputs, name='vae')


models = (encoder, decoder)
data = (x_test, y_test)

reconstruction_loss = binary_crossentropy(K.flatten(inputs),
                                              K.flatten(outputs))

reconstruction_loss *= image_size * image_size
kl_loss = 1 + z_log_var - K.square(z_mean) - K.exp(z_log_var)
kl_loss = K.sum(kl_loss, axis=-1)
kl_loss *= -0.5
vae_loss = K.mean(reconstruction_loss + kl_loss)
vae.add_loss(vae_loss)
vae.compile(optimizer='rmsprop')

# train the autoencoder
vae.fit(x_train,
        epochs=epochs,
        batch_size=batch_size,
        validation_data=(x_test, None))
Пример #37
0
def dice_logloss3(y_true, y_pred):
    return binary_crossentropy(y_true, y_pred) * 0.15 + dice_coef_loss(y_true, y_pred) * 0.85