Ejemplo n.º 1
0
 def vae_loss(y_true, y_pred):
     generation_loss = img_rows * img_cols \
         * metrics.binary_crossentropy(x, x_decoded)
     kl_loss = 0.5 * tf.reduce_sum(K.square(z_mean)
             + K.square(z_var) - K.log(K.square(z_var + 1e-8)) - 1,
             axis=1)
     return tf.reduce_mean(generation_loss + kl_loss)
def vae_loss(x, x_decoded_mean):
    # NOTE: binary_crossentropy expects a batch_size by dim
    # for x and x_decoded_mean, so we MUST flatten these!
    x = K.flatten(x)
    x_decoded_mean = K.flatten(x_decoded_mean)
    xent_loss = img_rows * img_cols * metrics.binary_crossentropy(x, x_decoded_mean)
    kl_loss = - 0.5 * K.mean(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
    return xent_loss + kl_loss
Ejemplo n.º 3
0
 def vae_loss(self, x, x_decoded_mean_squash, z_mean, z_log_var):
     x = K.flatten(x)
     x_decoded_mean_squash = K.flatten(x_decoded_mean_squash)
     img_rows, img_cols = self._img_rows, self._img_cols
     # generative or reconstruction loss
     xent_loss = img_rows * img_cols * \
         metrics.binary_crossentropy(x, x_decoded_mean_squash)
     # Kullback-Leibler divergence loss
     kl_loss = - 0.5 * K.mean(
         1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
     return K.mean(xent_loss + kl_loss)
Ejemplo n.º 4
0
 def vae_loss(self, x, x_decoded_mean_squash):
     x = K.flatten(x)
     x_decoded_mean_squash = K.flatten(x_decoded_mean_squash)
     xent_loss = img_rows * img_cols * metrics.binary_crossentropy(x, x_decoded_mean_squash)
     kl_loss = - 0.5 * K.mean(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
     return K.mean(xent_loss + kl_loss)
Ejemplo n.º 5
0
 def vae_loss(self, x, x_decoded_mean):
     xent_loss = input_dim * \
         metrics.binary_crossentropy(x, x_decoded_mean)
     kl_loss = -0.5 * K.sum(
         1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
     return K.mean(xent_loss + kl_loss)
Ejemplo n.º 6
0
    return z_mean + K.exp(z_log_var / 2) * epsilon

# note that "output_shape" isn't necessary with the TensorFlow backend
z = Lambda(sampling, output_shape=(latent_dim,))([z_mean, z_log_var])

# we instantiate these layers separately so as to reuse them later
decoder_h = Dense(intermediate_dim, activation='relu')
decoder_mean = Dense(original_dim, activation='sigmoid')
h_decoded = decoder_h(z)
x_decoded_mean = decoder_mean(h_decoded)

# instantiate VAE model
vae = Model(x, x_decoded_mean)

# Compute VAE loss
xent_loss = original_dim * metrics.binary_crossentropy(x, x_decoded_mean)
kl_loss = - 0.5 * K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
vae_loss = K.mean(xent_loss + kl_loss)

vae.add_loss(vae_loss)
vae.compile(optimizer='rmsprop')
vae.summary()


# train the VAE on MNIST digits
(x_train, y_train), (x_test, y_test) = mnist.load_data()

x_train = x_train.astype('float32') / 255.
x_test = x_test.astype('float32') / 255.
x_train = x_train.reshape((len(x_train), np.prod(x_train.shape[1:])))
x_test = x_test.reshape((len(x_test), np.prod(x_test.shape[1:])))
def vae_loss(_x, x_decoded_mean):
    # Compute VAE loss
    xent_loss = metrics.binary_crossentropy(_x, x_decoded_mean)
    kl_loss = -0.5 * K.mean(
        1 + z_log_sigma - K.square(z_mean) - K.exp(z_log_sigma), axis=-1)
    return xent_loss + kl_loss
Ejemplo n.º 8
0
 def vae_loss(self, x, x_decoded_mean):
     xent_loss = original_dim * metrics.binary_crossentropy(
         x, x_decoded_mean)
     kl_loss = -KLWeight * 0.5 * K.sum(
         1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
     return K.mean(xent_loss + kl_loss) / (original_dim + KLWeight)
Ejemplo n.º 9
0
def bce(y_true, y_pred):
    y_true_f = K.clip(K.batch_flatten(y_true), K.epsilon(), 1.)
    y_pred_f = K.clip(K.batch_flatten(y_pred), K.epsilon(), 1.)
    bce = binary_crossentropy(y_true_f, y_pred_f)
    return bce
Ejemplo n.º 10
0
 def vae_loss(self, x, x_decoded_mean_squash):
     x = K.flatten(x)
     x_decoded_mean_squash = K.flatten(x_decoded_mean_squash)
     xent_loss = img_rows * img_cols * metrics.binary_crossentropy(x, x_decoded_mean_squash)
     kl_loss = - 0.5 * K.mean(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
     return K.mean(xent_loss + kl_loss)
Ejemplo n.º 11
0
 def metric(self, x, x_decoded):
     return self.weight * metrics.binary_crossentropy(x, x_decoded)
Ejemplo n.º 12
0
z_mu, z_sigma = residual_enc_model(tdw_img, E_enc, settings.latent_noise_dim)
epsilon = tf.random_normal(tf.shape(z_mu))
latent_noise_input_ = z_mu + (z_sigma) * epsilon

G_dec = gen_model(E_enc, latent_noise_input_, input_shape, reuse=True)
G_dec_randY = gen_model(E_enc_randY,
                        latent_noise_input_,
                        input_shape,
                        reuse=True)  # randY

z_sigma_sq = tf.square(z_sigma)
z_log_sigma_sq = tf.log(z_sigma_sq + 1e-10)
kld_loss = tf.reduce_mean(-0.5 * tf.reduce_sum(
    1 + z_log_sigma_sq - tf.square(z_mu) - tf.exp(z_log_sigma_sq), 1))
gloss = tf.reduce_mean(
    64 * 64 * 3 * tf.reduce_mean(metrics.binary_crossentropy(tdw_img, G_dec)) +
    kld_loss)

if settings.add_encoder:
    eloss, G_dec_e = enc_graph_simgan(settings, enc_model, simvae_model,
                                      gen_model, tdw_img, latent_code_input)
if settings.add_mi_penalty:
    closs, gloss_ = mi_penalty_graph(settings, enc_model, mi_disc_model,
                                     G_dec_e, latent_code_dim)
if settings.add_infogan_penalty:
    infogan_loss = infogan_penalty_graph(settings, zbar, latent_noise_input)

t_vars = tf.trainable_variables()
sim_vars = [var for var in t_vars if 'simvae' in var.name]
d_vars = [var for var in t_vars if 'dec' in var.name]  #simgan_decoder
c_vars = [
 def vae_loss(self, x_input, x_decoded):
     reconstruction_loss = original_dim * metrics.binary_crossentropy(x_input, x_decoded)
     # ERGamazon: Tried 0.3, 0.7
     kl_loss = - 0.5 * K.sum(1 + z_log_var_encoded - K.square(z_mean_encoded) - 
                             K.exp(z_log_var_encoded), axis=-1)
     return K.mean(reconstruction_loss + (K.get_value(beta) * kl_loss))
Ejemplo n.º 14
0
#outputs = decoder(z)
cvae = Model([img, label], h_decoded)

#def vae_loss(y_true, y_pred):
#    """loss = reconstruction loss + KL loss for each data batch"""
#
#    # E[log P(X|z)]
#    recon = K.sum(binary_crossentropy(y_pred, y_true))
#
#    # D_KL(Q(z|x) || P(z|X))
#    kl = -0.5 * K.sum( 1. + log_sigma - K.exp(log_sigma) - K.square(mu), axis = -1)
#
#    return K.mean(recon + kl)

#define loss
xent_loss = img_rows * img_cols * channels * metrics.binary_crossentropy(
    K.flatten(img), K.flatten(h_decoded))
kl_loss = -0.5 * K.sum(1 + log_sigma - K.square(mu) - K.exp(log_sigma),
                       axis=-1)
cvae_loss = K.mean(xent_loss + kl_loss)
cvae.add_loss(cvae_loss)
cvae.compile(optimizer='adam')
cvae.summary()
cvae.fit([x_train, y_train], batch_size=m, epochs=n_epoch)
#%%
encoder = Model([img, label], [mu, log_sigma, z])

decoder_input = Input(shape=(latent_dim, ))
d = Concatenate(axis=-1)([decoder_input, label])
d = Dense(4 * 4 * 128, activation='relu')(decoder_input)
d = Reshape((4, 4, 128))(d)
d = BatchNormalization()(d)
Ejemplo n.º 15
0
def unnormalised_reconstruction_loss(x_decoded, y):
	rec_loss = metrics.binary_crossentropy(K.flatten(x_decoded), K.flatten(y))
	print("Rec loss: " + str(rec_loss))
	return rec_loss
Ejemplo n.º 16
0
# reshape here to flatten the contexts of each central word

#x_hot_flat=K.reshape(x_hot, (-1,original_dim ))
#
#x_hot = tf.Print(data=[x_hot],input_=x_hot, message="x_hot")

#x_hot_flat=K.reshape(x_hot, (-1,))
#x_hot_flat=K.flatten(x_hot)
##print("shape x_hot=", x_hot_flat.shape)
#x_hot_flat_2 = K.one_hot(x_hot_flat, original_dim)

#x_decoded_mean = tf.Print(data=[x_decoded_mean],input_=x_decoded_mean, message="x_dec")
print("shape x_hot_flat_2=", x_hot_flat_2.shape)
print("x_decoded_mean=", x_decoded_mean.shape)
#reconstruction_loss = original_dim * metrics.categorical_crossentropy(x_decoded_mean,x_hot_flat_2)
reconstruction_loss = original_dim * metrics.binary_crossentropy(
    x_decoded_mean, x_hot_flat_2)
#reconstruction_loss =  tf.Print(data=[reconstruction_loss],input_=reconstruction_loss, message="recon_loss")

print("rec_loss=", reconstruction_loss.shape)
print("rec_loss=", reconstruction_loss.shape)

kl_loss = K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
#kl_loss_prior = K.sum(1 + prior_scale - K.square(prior_location) - K.exp(prior_scale), axis=-1)
#total_kl_loss = kl_loss_posterior + kl_loss_prior
print("z_log_var=", z_log_var.shape)

print("K.square(z_mean)=", K.square(z_mean).shape)
kl_loss *= -0.5
kl_loss = K.repeat_elements(kl_loss, context_sz, axis=0)
#kl_loss = tf.Print(data=[kl_loss],input_=kl_loss, message="kl_loss")
print("kl_loss=", kl_loss.shape)
Ejemplo n.º 17
0
    def __init__(self, config):
        self.config = config

        self.weights_path = os.path.join(config.model_path, 'weights_best.h5')
        self.overfit_path = os.path.join(config.model_path,
                                         'weights_overfit.h5')
        self.logs_path = os.path.join(config.model_path, 'logs')
        self.images_path = os.path.join(config.model_path, 'images')
        self.train_path = os.path.join(config.data_path, 'train')
        self.dev_path = os.path.join(config.data_path, 'dev')
        self.test_path = os.path.join(config.data_path, 'test')

        if not os.path.exists(config.model_path):
            os.makedirs(config.model_path)
        if not os.path.exists(self.logs_path):
            os.makedirs(self.logs_path)

        image_size = config.image_size
        filters = config.filters
        latent_size = config.latent_size
        batch_size = config.batch_size
        learning_rate = config.learning_rate

        x = Input(shape=(image_size, image_size, 3))

        conv1 = Conv2D(3,
                       kernel_size=(2, 2),
                       padding='same',
                       activation='relu')(x)
        conv2 = Conv2D(filters,
                       kernel_size=(2, 2),
                       padding='same',
                       activation='relu',
                       strides=(2, 2))(conv1)
        conv3 = Conv2D(filters,
                       kernel_size=3,
                       padding='same',
                       activation='relu',
                       strides=1)(conv2)
        conv4 = Conv2D(filters,
                       kernel_size=3,
                       padding='same',
                       activation='relu',
                       strides=1)(conv3)
        flat = Flatten()(conv4)

        z_mean = Dense(latent_size)(flat)
        z_stddev = Dense(latent_size)(flat)

        def sampling(args):
            z_mean, z_stddev = args
            epsilon = K.random_normal(shape=(K.shape(z_mean)[0], latent_size),
                                      mean=0.,
                                      stddev=1.0)
            return z_mean + K.exp(z_stddev) * epsilon

        z = Lambda(sampling, output_shape=(latent_size, ))([z_mean, z_stddev])

        decoder_upsample = Dense(filters * (image_size // 2) *
                                 (image_size // 2),
                                 activation='relu')

        output_shape = (batch_size, image_size // 2, image_size // 2, filters)

        decoder_reshape = Reshape(output_shape[1:])
        decoder_deconv1 = Conv2DTranspose(filters,
                                          kernel_size=3,
                                          padding='same',
                                          strides=1,
                                          activation='relu')
        decoder_deconv2 = Conv2DTranspose(filters,
                                          kernel_size=3,
                                          padding='same',
                                          strides=1,
                                          activation='relu')

        output_shape = (batch_size, filters, image_size + 1, image_size + 1)

        decoder_deconv3_upsamp = Conv2DTranspose(filters,
                                                 kernel_size=(3, 3),
                                                 strides=(2, 2),
                                                 padding='valid',
                                                 activation='relu')
        decoder_reconstr = Conv2D(3,
                                  kernel_size=2,
                                  padding='valid',
                                  activation='sigmoid')

        up_decoded = decoder_upsample(z)
        reshape_decoded = decoder_reshape(up_decoded)
        deconv1_decoded = decoder_deconv1(reshape_decoded)
        deconv2_decoded = decoder_deconv2(deconv1_decoded)
        x_decoded_relu = decoder_deconv3_upsamp(deconv2_decoded)
        x_reconstr = decoder_reconstr(x_decoded_relu)

        self.vae = Model(x, x_reconstr)

        xent_loss = image_size * image_size * metrics.binary_crossentropy(
            K.flatten(x), K.flatten(x_reconstr))
        kl_loss = -0.5 * K.sum(
            1 + z_stddev - K.square(z_mean) - K.exp(z_stddev), axis=-1)
        vae_loss = K.mean(xent_loss + kl_loss)
        self.vae.add_loss(vae_loss)

        optimizer = optimizers.Adam(lr=learning_rate)
        self.vae.compile(optimizer=optimizer)
        self.vae.summary()

        self.encoder = Model(x, z_mean)

        decoder_input = Input(shape=(latent_size, ))
        _up_decoded = decoder_upsample(decoder_input)
        _reshape_decoded = decoder_reshape(_up_decoded)
        _deconv1_decoded = decoder_deconv1(_reshape_decoded)
        _deconv2_decoded = decoder_deconv2(_deconv1_decoded)
        _x_decoded_relu = decoder_deconv3_upsamp(_deconv2_decoded)
        _x_reconstr = decoder_reconstr(_x_decoded_relu)
        self.generator = Model(decoder_input, _x_reconstr)

        try:
            self.vae.load_weights(self.weights_path)
            print('Loaded weights')
        except:
            print('Couldn\'t find/load weights')
Ejemplo n.º 18
0
def recon_loss(y_true, y_pred):
    #return(K.sum(K.binary_crossentropy(y_pred, y_true), axis=1))
    return (w * h * metrics.binary_crossentropy(y_true, y_pred))
Ejemplo n.º 19
0
def xent(y_true, y_pred):
    return binary_crossentropy(y_true, y_pred)
############
decoder_h = Dense(intermediate_dim, activation='relu')
print(decoder_h)
decoder_mean = Dense(original_dim, activation = 'sigmoid')
print (decoder_mean)
h_decoded = decoder_h(z)
print (h_decoded)
x_decoded_mean = decoder_mean(h_decoded)
print (K.shape(x_decoded_mean))


# instantiate VAE model
vae = Model(x, x_decoded_mean)

xent_loss = original_dim * metrics.binary_crossentropy(x, x_decoded_mean)  # Entropy loss of input and output
# oss_ = (rho * tf.log(rho / rho_hat)) + (rho_hat * tf.log((1 - rho) / (1 - rho_hat)))
kl_loss = - 0.5 * K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)  # KL-divergence loss
print ('62342367462387468236426378462 ', kl_loss.shape)
# The KL-divergence loss tries to bring the latent variables closer to a unit gaussian distribution.
vae_loss = K.mean(xent_loss + kl_loss)

aa = vae.add_loss(vae_loss)
vae.compile(optimizer='rmsprop', loss=aa)
vae.summary()


#########  model
(x_train, y_train), (x_test, y_test) = mnist.load_data()

x_train = x_train[np.where((y_train==6) | (y_train == 8))[0]]
            vae = Model([x, z_log_var], x_decoded_mean)
        else:
            vae = Model(x, x_decoded_mean)

        # instantiate generator model
        decoder_input = Input(shape=(latent_dim,))
        _h_decoded = decoder_h(decoder_input)
        _x_decoded_mean = decoder_mean(_h_decoded)
        generator = Model(inputs=decoder_input, outputs=_x_decoded_mean)

    # instantiate encoder model
    encoder = Model(x, z_mean)

    # compute VAE loss
    if args.conv:
        xent_loss = original_dim * metrics.binary_crossentropy(K.flatten(x), K.flatten(x_decoded_mean))
    else:
        xent_loss = original_dim * metrics.binary_crossentropy(x, x_decoded_mean)
    kl_loss = - 0.5 * K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
    vae_loss = K.mean(xent_loss + kl_loss)
    vae.add_loss(vae_loss)
    vae.compile(optimizer='rmsprop')
    # vae.summary()

    # train the VAE on MNIST digits
    (x_train, y_train), (x_test, y_test) = mnist.load_data()
    x_train = x_train.astype('float32') / 255.
    x_test = x_test.astype('float32') / 255.
    if args.conv:
        x_train = x_train.reshape((len(x_train), x_train.shape[1], x_train.shape[2], 1))
        x_test = x_test.reshape((len(x_test), x_train.shape[1], x_train.shape[2], 1))
Ejemplo n.º 22
0
# we instantiate these layers separately so as to reuse them later
decoder_h = Dense(intermediate_dim, activation='relu')
decoder_mean = Dense(original_dim, activation='sigmoid')
h_decoded = decoder_h(
    z
)  # from the sampled distribution to an intertmedaite dimension (decoding)
x_decoded_mean = decoder_mean(
    h_decoded
)  # from the intertmedaite dimension to the original size, final layer (decoding)

# instantiate VAE model
vae = Model(x, x_decoded_mean)

# Compute VAE loss
xent_loss = original_dim * metrics.binary_crossentropy(
    x, x_decoded_mean
)  # binary cross entropy (wouldnt it better tro use regresion?)
kl_loss = -0.5 * K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var),
                       axis=-1)  # kl divergence
vae_loss = K.mean(xent_loss + 2 * kl_loss)

vae.add_loss(vae_loss)
vae.compile(optimizer='rmsprop')
vae.summary()

# train the VAE on MNIST digits
(x_train, y_train), (x_test, y_test) = mnist.load_data()

x_train = x_train.astype('float32') / 255.
x_test = x_test.astype('float32') / 255.
x_train = x_train.reshape((len(x_train), np.prod(x_train.shape[1:])))
Ejemplo n.º 23
0
def stacked_vae(
    input_dim,
    hidden_dims=None,
    latent_dim=100,
    initial_beta_val=0,
    learning_rate=0.0005,
    epsilon_std=1.0,
    kappa=1.0,
    epochs=50,
    batch_size=50,
    batch_normalize_inputs=True,
    batch_normalize_intermediaries=True,
    batch_normalize_embedding=True,
    relu_intermediaries=True,
    relu_embedding=True,
    max_beta_val=1,
):
    """
    This is a deep, or stacked, vae.
    `hidden_dims` denotes the size of each successive hidden layer,
    until `latent_dim` which is the middle layer. The default `hidden_dims` is [300].
    """
    if hidden_dims is None:
        hidden_dims = [300]

    # Function for reparameterization trick to make model differentiable
    def sampling(args):
        import tensorflow as tf

        # Function with args required for Keras Lambda function
        z_mean, z_log_var = args

        # Draw epsilon of the same shape from a standard normal distribution
        epsilon = K.random_normal(shape=tf.shape(z_mean),
                                  mean=0.0,
                                  stddev=epsilon_std)

        # The latent vector is non-deterministic and differentiable
        # in respect to z_mean and z_log_var
        z = z_mean + K.exp(z_log_var / 2) * epsilon
        return z

    # Init beta value
    beta = K.variable(initial_beta_val, name="beta")

    # Input place holder for RNAseq data with specific input size
    original_dim = input_dim

    # Input place holder for RNAseq data with specific input size
    rnaseq_input = Input(shape=(original_dim, ), name="input")

    if batch_normalize_inputs:
        batchnorm_input = BatchNormalization(
            name="batchnorm_input")(rnaseq_input)
    else:
        batchnorm_input = rnaseq_input

    prev = batchnorm_input
    encoder_target = batchnorm_input
    if hidden_dims:
        for i, hidden_dim in enumerate(hidden_dims):
            z, z_mean_component = make_variational_layer(
                prev,
                hidden_dim,
                batch_normalize_intermediaries,
                relu_intermediaries,
                sampling,
                name=f"hidden_dim_{i}",
            )
            prev = z
            # the encoder part to have a path that doesn't do sampling or ReLU'ing
            encoder_target = z_mean_component(encoder_target)
    else:
        z = prev

    # variational layer for latent dim
    l_mean_component = Dense(latent_dim,
                             kernel_initializer="glorot_uniform",
                             name="latent_mean")
    l_mean_dense_linear = l_mean_component(z)

    if batch_normalize_embedding:
        l_mean_dense_batchnorm = BatchNormalization(
            name="batchnorm_latent_mean")(l_mean_dense_linear)
    else:
        l_mean_dense_batchnorm = l_mean_dense_linear

    if relu_embedding:
        l_mean_encoded = Activation(
            "relu", name="relu_latent_mean")(l_mean_dense_batchnorm)
    else:
        l_mean_encoded = l_mean_dense_batchnorm

    l_log_var_dense_linear = Dense(latent_dim,
                                   kernel_initializer="glorot_uniform",
                                   name="latent_log_var")(z)

    if batch_normalize_embedding:
        l_log_var_dense_batchnorm = BatchNormalization(
            name="batchnorm_latent_log_var")(l_log_var_dense_linear)
    else:
        l_log_var_dense_batchnorm = l_log_var_dense_linear

    if relu_embedding:
        l_log_var_encoded = Activation(
            "relu", name="relu_latent_log_var")(l_log_var_dense_batchnorm)
    else:
        l_log_var_encoded = l_log_var_dense_batchnorm

    l = Lambda(sampling, output_shape=(latent_dim, ),
               name="sample_latent")([l_mean_encoded, l_log_var_encoded])

    # the encoder part's l to come from the path that only considers mean
    encoder_target = l_mean_component(encoder_target)
    if batch_normalize_embedding:
        encoder_target = BatchNormalization(
            name="batchnorm_encoder_target")(encoder_target)
    if relu_embedding:
        encoder_target = Activation("relu",
                                    name="relu_encoder_target")(encoder_target)

    # decoder latent->hidden
    prev = l
    if hidden_dims:
        for i, hidden_dim in reversed(list(enumerate(hidden_dims))):
            h = Dense(
                hidden_dim,
                kernel_initializer="glorot_uniform",
                activation="relu",
                name=f"decode_hidden_{i}",
            )(prev)
            prev = h
    else:
        h = Dense(
            latent_dim,
            kernel_initializer="glorot_uniform",
            activation="relu",
            name="decode_hidden",
        )(prev)
    reconstruction = Dense(
        original_dim,
        kernel_initializer="glorot_uniform",
        activation="sigmoid",
        name="reconstruction",
    )(h)

    adam = optimizers.Adam(lr=learning_rate)
    vae = Model(rnaseq_input, reconstruction)
    reconstruction_loss = original_dim * metrics.binary_crossentropy(
        rnaseq_input, reconstruction)
    kl_loss = -0.5 * K.sum(
        1 + l_log_var_encoded - K.square(l_mean_encoded) -
        K.exp(l_log_var_encoded),
        axis=-1,
    )
    vae_loss = K.mean(reconstruction_loss + (K.get_value(beta) * kl_loss))
    vae.add_loss(vae_loss)
    vae.compile(optimizer=adam)

    # non-sampling encoder
    encoder = Model(rnaseq_input, encoder_target)

    # sampling encoder
    sampling_encoder = Model(rnaseq_input, l)

    # Also, create a decoder model
    encoded_input = Input(shape=(latent_dim, ))
    prev = encoded_input
    if hidden_dims:
        for i in reversed(range(len(hidden_dims) + 1)):
            prev = vae.layers[-(i + 1)](prev)
    decoder = Model(encoded_input, prev)

    return vae, encoder, sampling_encoder, decoder, beta
Ejemplo n.º 24
0
    Model_Inputs.append(Filter_In)
    Model_Inputs.append(Unit_Con)

    # Produce the Model
    ModVAE = Model(Model_Inputs, Model_Outputs)

    KL_loss = 0
    RE_loss = 0
    MSE_loss = 0

    # Add the Loss Function
    for jj in range(batch_size):
        KL_loss += -0.5 * K.sum(
            1 + Z_log_sd[jj] - K.square(Z_mean[jj]) - K.exp(Z_log_sd[jj]),
            axis=-1)
        RE_loss += im_dim * im_dim * metrics.binary_crossentropy(
            K.flatten(Model_Inputs[jj]), K.flatten(Model_Outputs[jj]))
        MSE_loss += metrics.mean_squared_error(F_mu, Clamped_Latents[jj])

    vae_loss = K.mean(alpha * RE_loss + beta * KL_loss + lambda_ * MSE_loss)
    ModVAE.add_loss(vae_loss)

    ModVAE.compile(optimizer='adam', loss=None)
    ModVAE.summary()

    # ModVAE.load_weights("modvae_32_final.h5")

    if gen_data:
        train_data = LoadData(dup=False)
        test_data = LoadData(method='test')

        ModVAE.load_weights("modvae_32_final.h5")
Ejemplo n.º 25
0
 def vae_loss(x, x_decoded_mean_squash):
     xent_loss = self.image_size * self.image_size * metrics.binary_crossentropy(K.flatten(x),
                                                                                 K.flatten(x_decoded_mean_squash))
     kl_loss = - 0.5 * K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
     vae_loss = K.mean(xent_loss + kl_loss)
     return vae_loss
Ejemplo n.º 26
0
 def vae_loss(x, x_decoded_mean):
     xent_loss = original_dim * metrics.binary_crossentropy(
         x, x_decoded_mean)
     kl_loss = -0.5 * K.sum(
         1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
     return xent_loss + kl_loss
Ejemplo n.º 27
0
 def vae_loss(self, x, x_decoded_mean):
     xent_loss = self.original_dim * metrics.binary_crossentropy(x, x_decoded_mean)
     kl_loss = - 0.5 * backend.sum(1 + self.z_log_var_encoded - backend.square(self.z_mean_encoded) -
                             backend.exp(self.z_log_var_encoded), axis=-1)
     return backend.mean(xent_loss + (backend.get_value(self.beta) * kl_loss))
Ejemplo n.º 28
0
_conv_1 = conv_1(_reshape_1)
up_1 = UpSampling2D((2, 2))
_up_1 = up_1(_conv_1)
conv_2 = Conv2D(16, (3, 3), activation='relu', padding='same')
_conv_2 = conv_2(_up_1)
up_2 = UpSampling2D((2, 2))
_up_2 = up_2(_conv_2)
out = Conv2D(1, (3, 3), activation='sigmoid', padding='same')
outputs = out(_up_2)

vae = Model(inputs, outputs, name='vae')

###########################################################

# Compute VAE loss
xent_loss = original_dim * metrics.binary_crossentropy(K.flatten(inputs),
                                                       K.flatten(outputs))
kl_loss = -0.5 * K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var),
                       axis=-1)
vae_loss = K.mean(xent_loss + kl_loss)

vae.add_loss(vae_loss)
vae.compile(optimizer='rmsprop')
vae.summary()

# train the VAE on MNIST digits
(x_train, y_train), (x_test, y_test) = mnist.load_data()

x_train = x_train.astype('float32') / 255.
x_test = x_test.astype('float32') / 255.
x_train = x_train.reshape(
    (len(x_train), input_shape[0], input_shape[1], input_shape[2]))
Ejemplo n.º 29
0
                             activation='sigmoid')

hid_decoded = decoder_hid(z)
up_decoded = decoder_upsample(hid_decoded)
reshape_decoded = decoder_reshape(up_decoded)
deconv_1_decoded = decoder_deconv_1(reshape_decoded)
deconv_2_decoded = decoder_deconv_2(deconv_1_decoded)
x_decoded_relu = decoder_deconv_3_upsamp(deconv_2_decoded)
x_decoded_mean_squash = decoder_mean_squash(x_decoded_relu)

# instantiate VAE model
vae = Model(x, x_decoded_mean_squash)

# Compute VAE loss
xent_loss = img_rows * img_cols * metrics.binary_crossentropy(
    K.flatten(x),
    K.flatten(x_decoded_mean_squash))
kl_loss = - 0.5 * K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
vae_loss = K.mean(xent_loss + kl_loss)
vae.add_loss(vae_loss)

vae.compile(optimizer='rmsprop')
vae.summary()

# train the VAE on MNIST digits
(x_train, _), (x_test, y_test) = mnist.load_data()

x_train = x_train.astype('float32') / 255.
x_train = x_train.reshape((x_train.shape[0],) + original_img_size)
x_test = x_test.astype('float32') / 255.
x_test = x_test.reshape((x_test.shape[0],) + original_img_size)
Ejemplo n.º 30
0

z = Lambda(sampling, output_shape=(latent_dim, ))([z_mean, z_log_var])

# We instentiate these layers separately so as to reuse them later
decoder_h = Dense(intermediate_dim, activation='relu')
decoder_mean = Dense(original_dim, activation='sigmoid')
h_decoded = decoder_h(z)
#x_decoded_mean为最后还原的x
x_decoded_mean = decoder_mean(h_decoded)

# Instantiate VAE model
vae = Model(x, x_decoded_mean)

# Compute VAE loss
xent_loss = original_dim * metrics.binary_crossentropy(x, x_decoded_mean)
kl_loss = -0.5 * k.sum(1 + z_log_var - k.square(z_mean) - k.exp(z_log_var),
                       axis=-1)
vae_loss = k.mean(xent_loss + kl_loss)

vae.add_loss(vae_loss)
vae.compile(optimizer='rmsprop', loss=None)
vae.summary()

# Train the VAE on Mnist digits
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
x_train = x_train.reshape((len(x_train), np.prod(x_train.shape[1:])))
# np.prod()所有元素相乘的结果
x_test = x_test.reshape((len(x_test), np.prod(x_test.shape[1:])))
Ejemplo n.º 31
0
 def log_p_x_given_z(x, x_decoded):
     # The cross extropy loss here is also -log(p(x|z)).
     xent_loss = -data_size * metrics.binary_crossentropy(x, x_decoded)
     return K.mean(xent_loss)
Ejemplo n.º 32
0
 def log_px_l(x, y):
     return -data_size * metrics.binary_crossentropy(
         K.reshape(sym_x_l, (K.shape(sym_x_l)[0], -1)),
         K.reshape(mux_train_l, (K.shape(sym_x_l)[0], -1)))
Ejemplo n.º 33
0
 def vae_loss(self, x_input, x_decoded):
     reconstruction_loss = original_dim * metrics.binary_crossentropy(x_input, x_decoded)
     kl_loss = - 0.5 * K.sum(1 + l_log_var_dense_linear - K.square(l_mean_dense_linear) -
                             K.exp(l_log_var_dense_linear), axis=-1)
     return K.mean(reconstruction_loss + (K.get_value(beta) * kl_loss))
Ejemplo n.º 34
0
                             padding='valid',
                             activation='sigmoid')

hid_decoded = decoder_hid(z)
up_decoded = decoder_upsample(hid_decoded)
reshape_decoded = decoder_reshape(up_decoded)
deconv_1_decoded = decoder_deconv_1(reshape_decoded)
deconv_2_decoded = decoder_deconv_2(deconv_1_decoded)
x_decoded_relu = decoder_deconv_3_upsamp(deconv_2_decoded)
x_decoded_mean_squash = decoder_mean_squash(x_decoded_relu)

# instantiate VAE model
vae = Model(x, x_decoded_mean_squash)

# Compute VAE loss
xent_loss = img_rows * img_cols * metrics.binary_crossentropy(
    K.flatten(x), K.flatten(x_decoded_mean_squash))
kl_loss = -0.5 * K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var),
                       axis=-1)
vae_loss = K.mean(xent_loss + kl_loss)
if K.backend() == 'mxnet':
    raise NotImplementedError(
        "MXNet Backend: Custom loss is not supported yet.")
vae.add_loss(vae_loss)

vae.compile(optimizer='rmsprop')
vae.summary()

# train the VAE on MNIST digits
(x_train, _), (x_test, y_test) = mnist.load_data()

x_train = x_train.astype('float32') / 255.
Ejemplo n.º 35
0
 def log_px_given_z_u(x, y):
     return -data_size * metrics.binary_crossentropy(
         K.reshape(x_u, (K.shape(x_u)[0], -1)),
         K.reshape(mux_train, (K.shape(x_u)[0], -1)))
Ejemplo n.º 36
0
 def vae_loss(self, x, x_decoded_mean):
     xent_loss = original_dim * metrics.binary_crossentropy(x, x_decoded_mean)
     kl_loss = - 0.5 * K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
     return K.mean(xent_loss + kl_loss)
Ejemplo n.º 37
0
def vae_loss(x, x_decoded_mean):
    xent_loss = metrics.binary_crossentropy(x, x_decoded_mean)
    kl_loss = -0.5 * K.mean(
        1 + z_log_sigma - K.square(z_mean) - K.exp(z_log_sigma), axis=-1)
    print(xent_loss)
    return xent_loss + kl_loss