Пример #1
0
    def create_critic_model(self):
        state_input = Input(shape=[self.state_dim])
        action_input = Input(shape=[self.action_dim])

        # define label layer
        target_q = Input(shape=[1])

        # first critic
        state1_h1 = Dense(500, activation='relu')(state_input)
        state1_h2 = Dense(200, activation='relu')(state1_h1)

        action1_h1 = Dense(500)(action_input)

        merged1 = Concatenate()([state1_h2, action1_h1])
        merged1_h1 = Dense(200, activation='relu')(merged1)
        output1 = Dense(1, activation='linear')(merged1_h1)

        # second critic
        state2_h1 = Dense(500, activation='relu')(state_input)
        state2_h2 = Dense(200, activation='relu')(state2_h1)

        action2_h1 = Dense(500)(action_input)

        merged2 = Concatenate()([state2_h2, action2_h1])
        merged2_h1 = Dense(200, activation='relu')(merged2)
        output2 = Dense(1, activation='linear')(merged2_h1)

        model = Model(input=[state_input, action_input, target_q],
                      output=[output1, output2])

        loss = K.mean(mse(output1, target_q) + mse(output2, target_q))
        model.add_loss(loss)

        return state_input, action_input, model
        def vae_loss(true, pred_decoded_mean):
            print('vae_loss', K.int_shape(true))
            print('vae_loss', K.int_shape(true[0]))
            print('vae_loss_2', K.int_shape(pred_decoded_mean))
            # print('vae_loss_3', K.int_shape(pred_functional))

            if K.int_shape(pred_decoded_mean)[1] == max_length:
                x_decoded_mean = conditional(true, pred_decoded_mean, max_length,
                                             DIM)  # we add this new function to the loss
                x = K.flatten(true)
                x_decoded_mean = K.flatten(x_decoded_mean)
                xent_loss = max_length * binary_crossentropy(x, x_decoded_mean)
            elif K.int_shape(pred_decoded_mean)[1] == max_length_func:
                # f_decoded_mean = conditional(true, pred_decoded_mean, max_length_func, 1) # we add this new function to the loss
                # f = tf.reshape(true, (-1, max_length_func))
                # f_decoded_mean = tf.reshape(f_decoded_mean, (-1, max_length_func))
                # xent_loss = max_length_func * binary_crossentropy(f, f_decoded_mean)

                t = tf.reshape(true, (-1, max_length_func))
                p = tf.reshape(pred_decoded_mean, (-1, max_length_func))
                xent_loss = max_length_func * mse(t, p)

            elif K.int_shape(pred_decoded_mean)[1] == 1:
                t = tf.reshape(true, (-1, 1))
                p = tf.reshape(pred_decoded_mean, (-1, 1))
                xent_loss = mse(t, p)

            else:
                raise ValueError('UNRECOGNIZED SHAPE')

            kl_loss = - 0.5 * K.mean(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)

            print('kl_loss', K.int_shape(kl_loss))
            print('xe_loss', K.int_shape(xent_loss))
            return xent_loss + kl_loss
Пример #3
0
def write(op, y_true, y_pred):
    y_test = y_true.astype(np.float32)
    pred = y_pred.astype(np.float32)
    op('MAE {}\n'.format(K.eval(K.mean(mae(y_test, pred)))))
    op('MSE {}\n'.format(K.eval((K.mean(mse(y_test, pred))))))
    op('RMSE {}\n'.format(K.eval(K.sqrt(K.mean(mse(y_test, pred))))))
    op('NRMSE_a {}\n'.format(K.eval(K.sqrt(K.mean(nrmse_a(y_test, pred))))))
    op('NRMSE_b {}\n'.format(K.eval(K.sqrt(K.mean(nrmse_b(y_test, pred))))))
    op('MAPE {}\n'.format(
        K.eval(K.mean(mean_absolute_percentage_error(y_test, pred)))))
    op('NRMSD {}\n'.format(K.eval(K.mean(nrmsd(y_test, pred)))))
    op('SMAPE {}\n'.format(K.eval(K.mean(smape(y_test, pred)))))
    op('R2 {}\n'.format(K.eval(K.mean(r2(y_test, pred)))))
Пример #4
0
 def root_mean_squared_error(y_true, y_pred):
     #print ("y_ture: ", y_true)
     #print ("y_pred: ", y_pred)
     #print("img_X",img_X.shape)
     #img_X,lstm_X,fused=combine_model()
     #loss1=K.sqrt(K.mean(K.square(img_X_more - y_true), axis=-1))
     #loss2=K.sqrt(K.mean(K.square(lstm_X_more - y_true), axis=-1))
     #loss3=K.sqrt(K.mean(K.square(fused- y_true), axis=-1))
     loss1 = mse(y_true, img_X_more)
     loss2 = mse(y_true, lstm_X_more)
     loss3 = mse(y_true, fused)
     #0.2*loss1+0.2*loss2+
     return 0.2 * K.sqrt(loss1) + 0.2 * K.sqrt(loss2) + K.sqrt(loss3)
Пример #5
0
def myLoss(ytrue, ypred):
    true_box_prob = ytrue[:, :2]
    true_box_coords1 = ytrue[:, 2:6]
    true_box_coords2 = ytrue[:, 6:10]
    pred_box_prob = ypred[:, :2]
    pred_box_coords1 = ypred[:, 2:6]
    pred_box_coords2 = ypred[:, 6:10]
    r1 = losses.mse(y_true=true_box_coords1, y_pred=pred_box_coords1)
    r2 = losses.mse(y_true=true_box_coords2, y_pred=pred_box_coords2)
    r1 = tf.multiply(r1, true_box_prob[:, 0])
    r2 = tf.multiply(r2, true_box_prob[:, 1])
    classification_loss = losses.binary_crossentropy(y_true=true_box_prob,
                                                     y_pred=pred_box_prob)
    return (r1 + r2) + classification_loss
Пример #6
0
def loss(x, xp, zm, zv, zm_prior, zv_prior, w_mse, w_kl):
    reconstruction_loss = mse(x, xp)
    reconstruction_loss *= w_mse
    kl_loss = (zv_prior - zv) * 0.5 + (K.square(zm - zm_prior) + K.exp(zv)) / (
        2 * K.exp(zv_prior) + 1e-10) - 0.5
    kl_loss = K.sum(kl_loss, axis=-1) * w_kl
    return reconstruction_loss + kl_loss
Пример #7
0
 def loss(y_pred, y_true):
     #y_true = print_tensor(y_true, message='y_true = ')
     #y_pred = print_tensor(y_pred, message='y_pred = ')
     absPred = y_pred[:, :1]
     absTrue = y_true[:, :1]
     #absPred = print_tensor(absPred, message='absPred = ')
     #absTrue = print_tensor(absTrue, message='absTrue = ')
     #prvPred = y_pred[1:]
     prvTrue = y_true[:, 1:]
     #prvTrue = print_tensor(prvTrue, message='prvTrue = ')
     anglePred = tanh(subtract(absPred, prvTrue))
     angleTrue = tanh(subtract(absTrue, prvTrue))
     grd = gradientMass * (mse(anglePred, angleTrue))
     abs = absoluteMass * (mse(absPred, absTrue))
     #return mse(anglePred,angleTrue)
     return grd + abs
Пример #8
0
def reconstruction_error_f(inputs, outputs):
    # E[log P(X|z)]
    inputs = K.flatten(inputs)
    outputs = K.flatten(outputs)
    reconstruction_loss_f = img_rows * img_rows * mse(inputs, outputs)
    #image_size * image_size * mse(inputs,outputs)
    return reconstruction_loss_f
Пример #9
0
        def vae_loss(inputs, outputs):
            lossname = 'mse'
            #if args.mse:
            if lossname == 'mse':
                reconstruction_loss = mse(inputs, outputs)

            else:
                reconstruction_loss = binary_crossentropy(inputs, outputs)

            reconstruction_loss *= original_dim
            kl_loss = 1 + z_log_var - K.square(z_mean - outputs2[0]) - K.exp(
                z_log_var)  # z_log_var=ln(sigma**2)
            kl_loss = K.sum(kl_loss, axis=-1)
            kl_loss *= -0.5
            y_loss = mse(z_mean, outputs2[0])
            return K.mean(reconstruction_loss + kl_loss)
Пример #10
0
    def load_data(self):
        # Get the original test set - the stuff that wasn't used to train the VAE.
        '''
        TODO: wasteful to be calling this here,
        ideally we should move this data loading call outside into a loop
        that wraps the Vae training and the Experiments
        But this will do for now
        '''
        _x_train, _y_train, _x_test, _y_test = dataset_utils.load_clean_train_test(
            vae_sig_type=self.trained_on,
            sig_id=self.sig_id,
            id_as_label=False)

        # Encode the signatures & extract the losses
        x_encoded = self.vanilla_vae.encoder.predict(_x_test)

        # Extract the losses from each input image
        x_reconstructed = self.vanilla_vae.decoder.predict(x_encoded)
        self.losses = (mse(_x_test, x_reconstructed) * self.image_res).eval(
            session=K.get_session()).reshape(-1, 1)

        # Split data
        x_train, x_test, y_train, y_test = train_test_split(
            x_encoded, _y_test, test_size=0.2)  # use latent vector
        # x_train, x_test, y_train, y_test = train_test_split(self.losses, _y_test, test_size=0.2)  # use recon_loss
        # x_reconstructed[:, :-1] = self.losses # use both
        # x_train, x_test, y_train, y_test = train_test_split(x_reconstructed, _y_test, test_size=0.2) # use both

        self.x_train = x_train
        self.x_test = x_test
        self.y_train = y_train
        self.y_test = y_test
Пример #11
0
		def loss_func(y_true, y_pred):
			y_true_attr = y_true[0]
			y_pred_attr = y_pred[0]

			y_true_a = y_true[1]
			y_pred_a = y_pred[1]

			## ATTR RECONSTRUCTION LOSS_______________________
			## mean squared error
			attr_reconstruction_loss = mse(K.flatten(y_true_attr), K.flatten(y_pred_attr))
			attr_reconstruction_loss *= modelArgs["input_shape"][0][0]

			## A RECONSTRUCTION LOSS_______________________
			## binary cross-entropy
			a_reconstruction_loss = binary_crossentropy(K.flatten(y_true_a), K.flatten(y_pred_a))
			a_reconstruction_loss *= (modelArgs["input_shape"][1][0] * modelArgs["input_shape"][1][1])

			## KL LOSS _____________________________________________
			kl_loss = 1 + z_log_var - K.square(z_mean) - K.exp(z_log_var)
			kl_loss = K.sum(kl_loss, axis=-1)
			kl_loss *= -0.5

			## COMPLETE LOSS __________________________________________________
			# attr_reconstruction_loss = tf.Print(attr_reconstruction_loss, [attr_reconstruction_loss], message="attr_reconstruction_loss: ")
			# a_reconstruction_loss = tf.Print(a_reconstruction_loss, [a_reconstruction_loss], message="a_reconstruction_loss: ")
			# kl_loss = tf.Print(kl_loss, [kl_loss], message="kl_loss: ")
			# tf.Print('weight_attribute_reconstruction_loss:', attr_reconstruction_loss, 'a_reconstruction_loss:', a_reconstruction_loss, 'kl_loss:', kl_loss)

			# loss = K.mean(trainArgs["loss_weights"][0] * a_reconstruction_loss + trainArgs["loss_weights"][1] * attr_reconstruction_loss + trainArgs["loss_weights"][2] * kl_loss)
			loss = trainArgs["loss_weights"][0] * a_reconstruction_loss

			return loss
Пример #12
0
    def compile_model(self, x_train):

        print("COMPILING MODEL")
        midi_file_size = x_train.shape[1]

        input_shape = (midi_file_size,)
        # VAE model = encoder + decoder
        # build encoder model
        inputs = Input(shape=input_shape, name='encoder_input')
        x = Dense(self.intermediate_dim, activation='relu')(inputs)
        z_mean = Dense(self.latent_dim, name='z_mean')(x)
        z_log_var = Dense(self.latent_dim, name='z_log_var')(x)

        # use reparameterization trick to push the sampling out as input
        # note that "output_shape" isn't necessary with the TensorFlow backend
        z = Lambda(self.sampling, output_shape=(self.latent_dim,), name='z')([z_mean, z_log_var])

        # instantiate encoder model
        encoder = Model(inputs, [z_mean, z_log_var, z], name='encoder')
        encoder.summary()
        #plot_model(encoder, to_file='vae_mlp_encoder.png', show_shapes=True)

        # build decoder model
        latent_inputs = Input(shape=(self.latent_dim,), name='z_sampling')
        x = Dense(self.intermediate_dim, activation='relu')(latent_inputs)
        outputs = Dense(midi_file_size, activation='sigmoid')(x)

        # instantiate decoder model
        decoder = Model(latent_inputs, outputs, name='decoder')
        decoder.summary()
        #plot_model(decoder, to_file='vae_mlp_decoder.png', show_shapes=True)

        # instantiate VAE model
        outputs = decoder(encoder(inputs)[2])
        vae = Model(inputs, outputs, name='vae_mlp')

        reconstruction_loss = mse(inputs, outputs)
        #midi_file_size = midi_file_size*midi_file_size
        #reconstruction_loss *= midi_file_size
        #kl_loss = 1 + z_log_var - K.square(z_mean) - K.exp(z_log_var)
        #kl_loss = K.sum(kl_loss, axis=-1)
        #kl_loss *= -0.5
        #vae_loss = K.mean(reconstruction_loss )#+ kl_loss)
        #vae.add_loss(vae_loss)


        #loss = 'binary_crossentropy'
        loss = 'mean_squared_error'


        #opt = Adam(lr=0.00005)  # 0.001 was the default, so try a smaller one
        opt = Adam(lr=0.00005)  # 0.001 was the default, so try a smaller one
        vae.compile(optimizer=opt, loss=loss)
        #vae.compile(optimizer='Adam', loss=loss)

        #vae.compile(optimizer='adam')
        vae.summary()
        #plot_model(vae,to_file='vae_mlp.png',show_shapes=True)

        return vae, encoder, decoder
Пример #13
0
def perceptual_loss(y_true, y_pred):
    vgg = VGG19(include_top=False, weights="imagenet", input_shape=(None, None, 3))
    loss_model = Model(inputs=vgg.inputs, outputs=vgg.get_layer("block5_conv4").output)
    loss_model.trainable = False
    preprocessed_y_pred = preprocess_vgg_input(y_pred)
    preprocessed_y_true = preprocess_vgg_input(y_true)
    return losses.mse(loss_model(preprocessed_y_true), loss_model(preprocessed_y_pred))
Пример #14
0
    def __init__(
            self, x_dim, u_dim, r_dim, model_path=None, model=None
    ):
        print("Init ForwardDynamicsAndRewardDNN")
        super(ForwardDynamicsAndRewardDNN, self).__init__(
            x_dim, u_dim, r_dim
        )

        if model_path is not None:
            self.mdl = load_model(
                model_path,
                custom_objects={'atan2_loss': atan2_loss, 'cos': KK.cos}
            )

        if model is not None:
            self.mdl = model

        x0 = KK.placeholder(shape=(None, self.x_dim), name='x0')
        u = KK.placeholder(shape=(None, self.u_dim), name='u')
        x1, cost = self.mdl([x0, u])
        samp_symb = KK.placeholder(
            shape=(1, self.x_dim),
            name='samp_syb'
        )
        loss = KK.expand_dims(mse(samp_symb, x1), axis=1)
        u_grads = KK.gradients([loss], [u])

        self.meas_fn = KK.function(
            [x0, u, samp_symb],
            [x1, loss] + u_grads
        )

        self.zero_control = None
Пример #15
0
    def recon_loss_combi(y_true, y_pred):

        mask_value = 0
        mask = K.cast(K.not_equal(y_true, mask_value), K.floatx())

        return lamb * mse(y_true * mask, y_pred * mask) + (
            1 - lamb) * mean_absolute_error(y_true * mask, y_pred * mask)
Пример #16
0
def texture(x_true, x_pred):
    """
    Apply Mean Squared Error on Gram matrices computed from feature vectors
    """

    # on reshape pour avoir les dimensions spatiales ramenées sur une seule dimension
    reshape_true = tf.reshape(x_true, [
        tf.shape(x_true)[0],
        tf.shape(x_true)[1] * tf.shape(x_true)[2],
        tf.shape(x_true)[3]
    ])
    # on calcule la transposée (en figeant la dimension de batch)
    transpose_true = tf.transpose(reshape_true, perm=[0, 2, 1])

    # idem pour y_pred
    reshape_pred = tf.reshape(x_pred, [
        tf.shape(x_pred)[0],
        tf.shape(x_pred)[1] * tf.shape(x_pred)[2],
        tf.shape(x_pred)[3]
    ])
    transpose_pred = tf.transpose(reshape_pred, perm=[0, 2, 1])

    # on fait le produit matriciel du vecteur et sa transposée, ce qui revient à faire un produit scalaire
    # entre les images
    gram_true = tf.matmul(transpose_true, reshape_true)
    gram_pred = tf.matmul(transpose_pred, reshape_pred)

    return loss_params["texture"] * mse(gram_true, gram_pred)
Пример #17
0
def build_graph(encoder, decoder, discriminator, recon_vs_gan_weight=1e-6):
    image_shape = K.int_shape(encoder.input)[1:]
    latent_shape = K.int_shape(decoder.input)[1:]

    sampler = Lambda(_sampling, output_shape=latent_shape, name='sampler')

    # Inputs
    x = Input(shape=image_shape, name='input_image')
    # z_p is sampled directly from isotropic gaussian
    z_p = Input(shape=latent_shape, name='z_p')

    # Build computational graph

    # z_mean, z_log_var = encoder(x)
    # z = sampler([z_mean, z_log_var, z_p])

    z = encoder(x)

    x_tilde = decoder(z)
    x_p = decoder(z_p)

    dis_x, dis_feat = discriminator(x)
    dis_x_tilde, dis_feat_tilde = discriminator(x_tilde)
    dis_x_p = discriminator(x_p)[0]

    # Compute losses

    # Learned similarity metric
    dis_nll_loss = mean_gaussian_negative_log_likelihood(
        dis_feat, dis_feat_tilde)

    # KL divergence loss
    # kl_loss = K.mean(-0.5 * K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1))

    kl_loss = KLD(z_p, z)

    phase_loss = mse(z_p, z)

    # Create models for training
    lamba1 = 0.03
    lamba2 = 0.03
    lamba3 = 22
    encoder_train = Model([x, z_p], dis_feat_tilde, name='e')
    encoder_train.add_loss(lamba1 * kl_loss)
    encoder_train.add_loss(lamba2 * dis_nll_loss)
    encoder_train.add_loss(lamba3 * phase_loss)

    decoder_train = Model([x, z_p], [dis_x_tilde, dis_x_p], name='de')
    normalized_weight = recon_vs_gan_weight / (1. - recon_vs_gan_weight)
    decoder_train.add_loss(normalized_weight * dis_nll_loss)

    discriminator_train = Model([x, z_p], [dis_x, dis_x_tilde, dis_x_p],
                                name='di')

    # Additional models for testing
    vae = Model(x, x_tilde, name='vae')
    vaegan = Model(x, dis_x_tilde, name='vaegan')
    vaegan_test = Model(x, z, name='vaegan_test')

    return encoder_train, decoder_train, discriminator_train, vae, vaegan, vaegan_test
Пример #18
0
    def train(self, X):
        def sampling(args):
            z_mean, z_log_var = args
            batch = K.shape(z_mean)[0]
            dim = K.int_shape(z_mean)[1]
            epsilon = K.random_normal(shape=(batch, dim), seed=0)
            return z_mean + K.exp(0.5 * z_log_var) * epsilon

        encoding_dim = self.n_components
        original_dim = X.shape[1]
        input = Input(shape=(original_dim, ))
        encoded = Dense(encoding_dim)(input)
        encoded = BatchNormalization()(encoded)
        encoded = Activation('relu')(encoded)
        z_mean = Dense(encoding_dim)(encoded)
        z_log_var = Dense(encoding_dim)(encoded)
        z = Lambda(sampling, output_shape=(encoding_dim, ),
                   name='z')([z_mean, z_log_var])
        decoded = Dense(encoding_dim, activation='relu')(z)
        output = Dense(original_dim, activation='sigmoid')(decoded)
        vae = Model(input, output)
        encoder = Model(input, z)
        reconstruction_loss = mse(input, output)
        reconstruction_loss *= original_dim
        kl_loss = 1 + z_log_var - K.square(z_mean) - K.exp(z_log_var)
        kl_loss = K.sum(kl_loss, axis=-1)
        kl_loss *= -0.5
        vae_loss = K.mean(reconstruction_loss + kl_loss)
        vae.add_loss(vae_loss)
        vae.compile(optimizer=Adam())
        vae.fit(X, epochs=self.epochs, verbose=2)
        return encoder.predict(X)
def loss(input_shape, inp, out_VAE, z_mean, z_var, e=1e-8):
    """
    loss(input_shape, inp, out_VAE, z_mean, z_var, e=1e-8)
    ------------------------------------------------------
    Since keras does not allow custom loss functions to have arguments
    other than the true and predicted labels, this function acts as a wrapper
    that allows us to implement the custom loss used in the paper, involving
    outputs from multiple layers.

    L = L<dice> + 0.1 ∗ L<L2> + 0.1 ∗ L<KL>

    - L<dice> is the dice loss between input and segmentation output.
    - L<L2> is the L2 loss between the output of VAE part and the input.
    - L<KL> is the standard KL divergence loss term for the VAE.

    Parameters
    ----------
    `input_shape`: A 4-tuple, required
        The shape of an image as the tuple (c, H, W, D), where c is
        the no. of channels; H, W and D is the height, width and depth of the
        input image, respectively.
    `inp`: An keras.layers.Layer instance, required
        The input layer of the model. Used internally.
    `out_VAE`: An keras.layers.Layer instance, required
        The output of VAE part of the decoder. Used internally.
    `z_mean`: An keras.layers.Layer instance, required
        The vector representing values of mean for the learned distribution
        in the VAE part. Used internally.
    `z_var`: An keras.layers.Layer instance, required
        The vector representing values of variance for the learned distribution
        in the VAE part. Used internally.
    `e`: Float, optional
        A small epsilon term to add in the denominator to avoid dividing by
        zero and possible gradient explosion.

    Returns
    -------
    loss_(y_true, y_pred): A custom keras loss function
        This function takes as input the predicted and ground labels, uses them
        to calculate the dice loss. Combined with the L<KL> and L<L2 computed
        earlier, it returns the total loss.
    """
    c, H, W, D = input_shape
    n = c * H * W * D

    loss_L2 = mse(inp, out_VAE)

    loss_KL = (1 / n) * K.sum(K.square(z_mean) + z_var - K.log(z_var) - 1,
                              axis=-1)

    def loss_(y_true, y_pred):
        y_true_f = K.flatten(y_true)
        y_pred_f = K.flatten(y_pred)
        intersection = K.sum(K.abs(y_true_f * y_pred_f), axis=-1)
        loss_dice = (2. * intersection) / (K.sum(K.square(y_true_f), -1) +
                                           K.sum(K.square(y_pred_f), -1) + e)

        return loss_dice + 0.1 * loss_L2 + 0.1 * loss_KL

    return loss_
Пример #20
0
def train_vae(vae,
              training_data,
              validation_split,
              inputs,
              outputs,
              output_tensors,
              n_epochs=50):
    x_train = training_data
    x_train = x_train.astype('float32') / 255
    if len(x_train.shape) == 3:
        image_size = x_train.shape[1]
        original_dim = image_size * image_size
    else:
        original_dim = x_train.shape[1]

    z_mean, z_log_var, z = output_tensors
    reconstruction_loss = mse(inputs, outputs)
    reconstruction_loss *= original_dim
    kl_loss = 1 + z_log_var - K.square(z_mean) - K.exp(z_log_var)
    kl_loss = K.sum(kl_loss, axis=-1)
    kl_loss *= -0.5
    vae_loss = K.mean(reconstruction_loss + kl_loss)
    vae.add_loss(vae_loss)
    vae.compile(optimizer='adam')
    #vae.summary()
    plot_model(vae, to_file='vae_mlp.png', show_shapes=True)
    batch_size = None
    vae.fit(x_train,
            epochs=n_epochs,
            validation_split=validation_split,
            batch_size=batch_size)
Пример #21
0
    def build_train(self, x_train, x_test=None, *args, **kwargs):
        """
        Builds a variational autoencoder using Keras
            and then compiles and trains it

        #Arguments
            x_train: array like, shape == (num_samples, num_features)
            x_test: optional validation data

        #Returns
            none.
            sets self.vae, self.encoder, and self.decoder
        """
        original_dim = x_train.shape[1]
        input_shape = (original_dim, )
        inputs = Input(shape=input_shape, name='encoder_input')
        x = Dense(self.intermediate_dim, activation='relu')(inputs)
        z_mean = Dense(self.latent_dim, name='z_mean')(x)
        z_log_var = Dense(self.latent_dim, name='z_log_var')(x)
        z = Lambda(self.sampling, output_shape=(self.latent_dim, ),
                   name='z')([z_mean, z_log_var])
        encoder = Model(inputs, [z_mean, z_log_var, z], name='encoder')
        if self.verbose:
            encoder.summary()
        latent_inputs = Input(shape=(self.latent_dim, ), name='z_sampling')
        x = Dense(self.intermediate_dim, activation='relu')(latent_inputs)
        outputs = Dense(original_dim, activation='sigmoid')(x)
        decoder = Model(latent_inputs, outputs, name='decoder')
        if self.verbose:
            decoder.summary()
        outputs = decoder(encoder(inputs)[2])
        self.vae = Model(inputs, outputs, name='vae_mlp')
        reconstruction_loss = mse(inputs, outputs)
        reconstruction_loss *= original_dim
        kl_loss = 1 + z_log_var - K.square(z_mean) - K.exp(z_log_var)
        kl_loss = K.sum(kl_loss, axis=-1)
        kl_loss *= -0.5
        vae_loss = K.mean(reconstruction_loss + kl_loss)
        self.vae.add_loss(vae_loss)
        self.vae.compile(optimizer='adam')
        if self.verbose:
            self.vae.summary()
        if self.weights:
            self.vae.load_weights(self.weights)
        else:
            if x_test is not None:
                self.vae.fit(x_train,
                             epochs=self.epochs,
                             batch_size=self.batch_size,
                             validation_data=(x_test, None),
                             verbose=self.verbose)
            else:
                self.vae.fit(x_train,
                             epochs=self.epochs,
                             batch_size=self.batch_size,
                             verbose=self.verbose)
            self.vae.save_weights('vae.h5')
        self.encoder = encoder
        self.decoder = decoder
        return
    def _train(self):
        self._buildModel()
        models = (self._encoder, self._decoder)

        # VAE loss = mse_loss or xent_loss + kl_loss
        if self._mse:
            reconstruction_loss = mse(self._inputs, self._outputs)
        else:
            reconstruction_loss = binary_crossentropy(self._inputs,
                                                      self._outputs)

        reconstruction_loss *= self._original_dim
        kl_loss = 1 + self._z_log_var - K.square(self._z_mean) - K.exp(
            self._z_log_var)
        kl_loss = K.sum(kl_loss, axis=-1)
        kl_loss *= -0.5
        vae_loss = K.mean(reconstruction_loss + kl_loss)
        self._vae.add_loss(vae_loss)
        self._vae.compile(optimizer='adam')
        self._vae.summary()

        if self._weights:
            self._vae.load_weights(self._weights)
        else:
            # train the autoencoder
            self._vae.fit(self._x_train,
                          epochs=self._epochs,
                          batch_size=self._batch_size,
                          validation_data=(self._x_test, None),
                          shuffle=False)

            self._vae.save_weights('vae_features.h5')
Пример #23
0
def build_vae(models, inputs, outputs, z_mean, z_log_var, loss, name):

    encoder, decoder = models
    outputs = decoder(encoder(inputs)[2])
    vae = Model(inputs, outputs, name=name)

    # VAE loss = mse_loss or xent_loss + kl_loss
    if loss == 'mse':
        reconstruction_loss = mse(inputs, outputs)
    elif loss == 'binary':
        reconstruction_loss = binary_crossentropy(inputs, outputs)
    else:
        reconstruction_loss = categorical_crossentropy(inputs, outputs)

    reconstruction_loss *= encoder.input_shape[1] * encoder.input_shape[1]
    reconstruction_loss = K.mean(
        reconstruction_loss,
        [1, 2])  # https://github.com/keras-team/keras/issues/10155
    kl_loss = 1 + z_log_var - K.square(z_mean) - K.exp(
        z_log_var)  # error to keep within distribution
    kl_loss = K.sum(kl_loss, axis=-1)
    kl_loss *= -0.5
    vae_loss = K.mean(reconstruction_loss + kl_loss)
    vae.add_loss(vae_loss)
    vae.compile(optimizer='adam')
    vae.summary()
    return vae
Пример #24
0
    def __init__(self, input_dim, latent_dim):
        self.input_dim = input_dim
        self.latent_dim = latent_dim
        self.encoder = self._build_encoder()
        self.decoder = self._build_decoder()
        inputs = keras.layers.Input(shape=(self.input_dim, ))
        z_mean, z_log_var = self.encoder(inputs)
        z = keras.layers.Lambda(sampling,
                                output_shape=(self.latent_dim, ),
                                name='z')([z_mean, z_log_var])
        outputs = self.decoder(z)
        self.vae = Model(input=inputs, output=outputs, name="VAEMlp")
        """
        Losses 
        """
        reconstruction_loss = mse(inputs, outputs)
        reconstruction_loss *= self.input_dim

        kl_loss = 1 + z_log_var - K.square(z_mean) - K.exp(z_log_var)
        kl_loss = K.sum(kl_loss, axis=-1)
        kl_loss *= -0.5

        vae_loss = K.mean(reconstruction_loss + kl_loss)
        """
        compile 
        """
        self.vae.add_loss(vae_loss)
        self.vae.compile(optimizer="Adam")
        self.vae.summary()
Пример #25
0
 def loss(y_true, y_pred):
     img_loss = losses.mse(K.flatten(cinp),
                           K.flatten(cout_img)) * original_dim
     nll_loss = mean_gaussian_negative_log_likelihood(y_true, y_pred)
     kl_loss = -0.5 * K.sum(
         1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
     return img_weight * img_loss + nll_weight * nll_loss + kl_weight * kl_loss
Пример #26
0
def vae(X,Y=0,intermediate_dim=0,latent_dim=0,batch_size=256,epochs=100,verbose=0,validation_split=0.1):
    if intermediate_dim == 0: intermediate_dim = X.shape[1]
    if latent_dim == 0: latent_dim = int(np.floor(intermediate_dim/20))
    if batch_size == 0: batch_size = X.shape[0] 
    input_dim = X.shape[1]
    output_dim = X.shape[1]
    inputs = Input(shape=(input_dim,), name='encoder_input')
    x = Dropout(0.2)(Dense(intermediate_dim, activation='sigmoid')(inputs))
    z_mean = Dropout(0.2)(Dense(latent_dim, name='z_mean')(x))
    z_log_var = Dropout(0.2)(Dense(latent_dim, name='z_log_var')(x))
    z = Lambda(sampling, output_shape=(latent_dim,), name='z')([z_mean, z_log_var])
    encoder = Model(inputs, [z_mean, z_log_var, z], name='encoder')
    latent_inputs = Input(shape=(latent_dim,), name='z_sampling')
    x = Dropout(0.2)(Dense(intermediate_dim, activation='sigmoid')(latent_inputs))
    outputs = Dropout(0.2)(Dense(input_dim, activation='sigmoid')(x))
    decoder = Model(latent_inputs, outputs, name='decoder')
    outputs = decoder(encoder(inputs)[2])
    vae = Model(inputs, outputs, name='vae_mlp')
    reconstruction_loss = mse(inputs, outputs)
    reconstruction_loss *= input_dim
    kl_loss = 1 + z_log_var - K.square(z_mean) - K.exp(z_log_var)
    kl_loss = K.sum(kl_loss, axis=-1)
    kl_loss *= -0.5
    vae_loss = K.mean(reconstruction_loss + kl_loss)
    vae.add_loss(vae_loss)
    vae.compile(optimizer='adam')
    history = vae.fit(X,
            batch_size=batch_size,
            epochs=epochs,
            validation_split=validation_split,
            verbose=verbose)
    return vae,encoder,decoder,history
Пример #27
0
def loss(inputs, encoder, decoder, original_dim):
    z_mean, z_log_var, z = encoder(inputs)
    outputs = decoder(z)
    reconstruction_loss = mse(inputs, outputs) * original_dim
    kl_loss = -0.5 * K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var),
                           axis=-1)
    return K.mean(reconstruction_loss + kl_loss)
Пример #28
0
def image_gradient_loss(y_true, y_pred):
    """
    Loss based on image gradient
    """
    dy_true, dx_true = tf.image.image_gradients(y_true)
    dy_pred, dx_pred = tf.image.image_gradients(y_pred)
    return losses.mse(dy_true, dy_pred) + losses.mae(dx_true, dx_pred)
Пример #29
0
def get_model(original_dim, scale_width, latent_dim, loss_function="xent"):
    # network parameters
    input_shape = (original_dim, )

    # VAE model = encoder + decoder
    # build encoder model
    inputs = Input(shape=input_shape, name='encoder_input')
    x = inputs
    intermediate_dim = original_dim * scale_width // 2
    dims = []
    while intermediate_dim >= latent_dim * 2:
        x = Dense(intermediate_dim, activation='relu')(x)
        dims.append(intermediate_dim)
        intermediate_dim //= 2
    z_mean = Dense(latent_dim, name='z_mean')(x)
    z_log_var = Dense(latent_dim, name='z_log_var')(x)

    # use reparameterization trick to push the sampling out as input
    # note that "output_shape" isn't necessary with the TensorFlow backend
    z = Lambda(sampling, output_shape=(latent_dim, ),
               name='z')([z_mean, z_log_var])

    # instantiate encoder model
    encoder = Model(inputs, [z_mean, z_log_var, z], name='encoder')
    encoder.summary()
    plot_model(encoder, to_file=f'{model_name}_encoder.png', show_shapes=True)

    # build decoder model
    latent_inputs = Input(shape=(latent_dim, ), name='z_sampling')
    x = latent_inputs
    for intermediate_dim in reversed(dims):
        x = Dense(intermediate_dim, activation='relu')(x)
    outputs = Dense(original_dim, activation='sigmoid')(x)

    # instantiate decoder model
    decoder = Model(latent_inputs, outputs, name='decoder')
    decoder.summary()
    plot_model(decoder, to_file=f'{model_name}_decoder.png', show_shapes=True)

    # instantiate VAE model
    outputs = decoder(encoder(inputs)[2])
    vae = Model(inputs, outputs, name=model_name)
    # VAE loss = mse_loss or xent_loss + kl_loss
    if loss_function == "mse":
        reconstruction_loss = mse(inputs, outputs)
    elif loss_function == "xent":
        reconstruction_loss = binary_crossentropy(inputs, outputs)
    else:
        raise RuntimeError(f"unsupported loss function {loss_function}")

    reconstruction_loss *= original_dim

    kl_loss = 1 + z_log_var - K.square(z_mean) - K.exp(z_log_var)
    kl_loss = K.sum(kl_loss, axis=-1)
    kl_loss *= -0.5
    vae_loss = K.mean(reconstruction_loss + kl_loss)
    vae.add_loss(vae_loss)
    vae.compile(optimizer='adam')
    return vae, encoder, decoder
Пример #30
0
 def vae_loss(true, pred):
     rec_loss = mse(K.flatten(true), K.flatten(pred))
     rec_loss *= 224 * 224 * 3
     kl_loss = 1 + z_log_var - K.square(z_mean) - K.exp(z_log_var)
     kl_loss = K.sum(kl_loss, axis=-1)
     kl_loss *= -0.5
     vae_loss = K.mean(rec_loss + beta * (kl_loss - C))
     return vae_loss
Пример #31
0
def vae_loss(x, x_decoded_mean):
    reconstruction_loss = mse(K.flatten(x), K.flatten(x_decoded_mean))
    reconstruction_loss *= image_size[0] * image_size[1]
    kl_loss = 1 + z_log_var - K.square(z_mean) - K.exp(z_log_var)
    kl_loss = K.sum(kl_loss, axis=-1)
    kl_loss *= -0.5
    vae_loss = K.mean(reconstruction_loss + kl_loss)
    return vae_loss
Пример #32
0
if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    help_ = "Load h5 model trained weights"
    parser.add_argument("-w", "--weights", help=help_)
    help_ = "Use mse loss instead of binary cross entropy (default)"
    parser.add_argument("-m",
                        "--mse",
                        help=help_, action='store_true')
    args = parser.parse_args()
    models = (encoder, decoder)
    data = (x_test, y_test)

    # VAE loss = mse_loss or xent_loss + kl_loss
    if args.mse:
        reconstruction_loss = mse(inputs, outputs)
    else:
        reconstruction_loss = binary_crossentropy(inputs,
                                                  outputs)

    reconstruction_loss *= original_dim
    kl_loss = 1 + z_log_var - K.square(z_mean) - K.exp(z_log_var)
    kl_loss = K.sum(kl_loss, axis=-1)
    kl_loss *= -0.5
    vae_loss = K.mean(reconstruction_loss + kl_loss)
    vae.add_loss(vae_loss)
    vae.compile(optimizer='adam')
    vae.summary()
    plot_model(vae,
               to_file='vae_mlp.png',
               show_shapes=True)
Пример #33
0
 def __call__(self, y_true, y_pred):
     return (self.mse_fraction * losses.mse(y_true, y_pred) +
             (1 - self.mse_fraction) * losses.mae(y_true, y_pred))
Пример #34
0
outputs = decoder(encoder(inputs)[2])
vae = Model(inputs, outputs, name='vae')

if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    help_ = "Load h5 model trained weights"
    parser.add_argument("-w", "--weights", help=help_)
    help_ = "Use mse loss instead of binary cross entropy (default)"
    parser.add_argument("-m", "--mse", help=help_, action='store_true')
    args = parser.parse_args()
    models = (encoder, decoder)
    data = (x_test, y_test)

    # VAE loss = mse_loss or xent_loss + kl_loss
    if args.mse:
        reconstruction_loss = mse(K.flatten(inputs), K.flatten(outputs))
    else:
        reconstruction_loss = binary_crossentropy(K.flatten(inputs),
                                                  K.flatten(outputs))

    reconstruction_loss *= image_size * image_size
    kl_loss = 1 + z_log_var - K.square(z_mean) - K.exp(z_log_var)
    kl_loss = K.sum(kl_loss, axis=-1)
    kl_loss *= -0.5
    vae_loss = K.mean(reconstruction_loss + kl_loss)
    vae.add_loss(vae_loss)
    vae.compile(optimizer='rmsprop')
    vae.summary()
    plot_model(vae, to_file='vae_cnn.png', show_shapes=True)

    if args.weights: