示例#1
0
    def design_and_compile_full_model(self):
        self.encoder = self.design_and_compile_encoder()
        self.sampler = self.design_and_compile_sampler()
        self.decoder = self.design_and_compile_decoder()
        # encoder input right there
        # shape change it after
        encoder_input = Input(shape=(self.max_sequence_length,
                                     self.vocabulary_size),
                              name="encoder_input")
        # None dim, prepare case where 1 timestep long sequences are sent
        decoder_input = Input(shape=(None, self.vocabulary_size),
                              name="decoder_input")

        z_mean, z_log_var = self.encoder(encoder_input)
        z = self.sampler([z_mean, z_log_var])
        rnn_state_last, x_decoded_mean = self.decoder([z, decoder_input])
        self.model = Model([encoder_input, decoder_input], x_decoded_mean)

        x = encoder_input
        xent_loss = self.original_dim * metrics.binary_crossentropy(
            K.flatten(x), K.flatten(x_decoded_mean))
        kl_loss = -0.5 * K.sum(
            1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
        vae_loss = K.mean(xent_loss + kl_loss)

        self.model.add_loss(vae_loss)
        self.model.compile(optimizer="adam")
示例#2
0
    def compile(self):
        x = Input(shape=(self.input_dim, ), name="input")
        latent_dim = self.layer_sizes[-1]
        encoder = self.make_encoder()
        decoder = self.make_decoder()
        sampling_layer = Lambda(sampling_func,
                                output_shape=(latent_dim, ),
                                name="sampling_layer")

        z_mean, z_log_var = encoder(x)
        z = sampling_layer([z_mean, z_log_var])
        x_decoded_mean = decoder(z)
        model = Model(inputs=x, outputs=x_decoded_mean)

        if self.recons_type == "xent":
            recons_loss = self.input_dim * metrics.binary_crossentropy(
                Flatten()(x),
                Flatten()(x_decoded_mean))
        elif self.recons_type == "mse":
            recons_loss = metrics.mse(x, x_decoded_mean)

        kl_loss = -0.5 * tf.reduce_sum(
            1 + z_log_var - tf.square(z_mean) - tf.exp(z_log_var), axis=-1)
        vae_loss = tf.reduce_mean(recons_loss + self.beta * kl_loss)

        model.add_loss(vae_loss)
        adam = Adam(learning_rate=self.learning_rate)
        model.compile(optimizer=adam)
        self.model = model
        self.encoder = encoder
        self.decoder = decoder
示例#3
0
 def call(self, x):
     z_mean, z_log_var = self.encoder(x)
     z = sampling((z_mean, z_log_var))
     y_pred = self.decoder(z)
     xent_loss = original_dim * metrics.binary_crossentropy(x, y_pred)  #ok
     kl_loss = -0.5 * K.sum(
         1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
     loss = xent_loss + kl_loss
     return loss
示例#4
0
def vae_loss(x, x_decoded_mean):
    #my tips:logloss
    xent_loss = original_dim * metrics.binary_crossentropy(x,
                                                           x_decoded_mean)  #ok

    kl_loss = -0.5 * K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var),
                           axis=-1)

    return xent_loss + kl_loss
 def vae_loss(inputs, outputs):
     xent_loss = metrics.binary_crossentropy(K.flatten(inputs),
                                             K.flatten(outputs))
     xent_loss *= self.image_size * self.image_size
     kl_loss = 1 + z_log_var * 2 - K.square(z_mean) - K.exp(
         z_log_var * 2)
     kl_loss = K.sum(kl_loss, axis=-1)
     kl_loss *= -0.5
     vae_loss = K.mean(xent_loss + kl_loss)
     return vae_loss
示例#6
0
 def vae_loss(self, x_in, x_out):
     # --- reconstruction loss
     x_in_flat = K.flatten(x_in)
     x_out_flat = K.flatten(x_out)
     recon_loss = binary_crossentropy(x_in_flat, x_out_flat)
     #recon_loss = mse(x_in_flat, x_out_flat)
     recon_loss = recon_loss * original_dim
     # --- KL loss
     KL_loss = 1 + z_log_var - K.square(z_mean) - K.exp(z_log_var)
     KL_loss = (-0.5) * K.sum(KL_loss, axis=-1)
     KL_loss = KL_loss
     # --- total loss
     return K.mean(recon_loss + KL_loss)
示例#7
0
def vae_loss_function(true, pred):
    # --- reconstruction loss
    true_flat = K.flatten(true)
    pred_flat = K.flatten(pred)
    # flattening required to be combined with KL loss
    # recon_loss = mse(true_flat, pred_flat) #: select bce or mse
    recon_loss = binary_crossentropy(true_flat, pred_flat)
    recon_loss = recon_loss * original_dim
    # required to reduce the impact of flattening. If not, down-scale KL loss
    # --- KL loss
    KL_loss = 1 + z_log_var - K.square(z_mean) - K.exp(z_log_var)
    KL_loss = (-0.5) * K.sum(KL_loss, axis=-1)
    # --- total loss
    vae_loss = K.mean(KL_loss + recon_loss)
    return vae_loss
示例#8
0
def obj(X, X_mu):
    X = backend.flatten(X)
    X_mu = backend.flatten(X_mu)
    
    Lp = 0.5 * backend.mean( 1 + Z_lsgms - backend.square(Z_mu) - backend.exp(Z_lsgms), axis=-1)     
    
    Lx =  - metrics.binary_crossentropy(X, X_mu) # Pixels have a Bernoulli distribution  
               
    Ly =  Y_normal_logpdf(Y, Y_mu, Y_lsgms) # Voxels have a Gaussian distribution
        
    lower_bound = backend.mean(Lp + 10000 * Lx + Ly)
    
    cost = - lower_bound
              
    return  cost 
示例#9
0
def show_metrics(Y, prediction):
    Y = np.squeeze(Y)
    prediction = np.squeeze(prediction)

    print(confusion_matrix(Y, prediction > .5))
    print("recall", recall_score(Y, prediction > .5))
    print("precision", precision_score(Y, prediction > .5))

    # keras placeholder used for evaluation
    labels_k = backend.placeholder([None], dtype=tf.float32)
    preds_k = backend.placeholder([None], dtype=tf.float32)

    val_loss_op = backend.mean(metrics.binary_crossentropy(labels_k, preds_k))

    loss, = backend.get_session().run([val_loss_op],
                                      feed_dict={
                                          labels_k: Y,
                                          preds_k: prediction
                                      })
    print("log loss", loss)
示例#10
0
    def design_and_compile_full_model(self):
        self.encoder = self.design_and_compile_encoder()
        self.sampler = self.design_and_compile_sampler()
        self.decoder = self.design_and_compile_decoder()
        # encoder input right there
        x = Input(shape=self.X_tr.shape[1:], name="input")
        # define decoder input here
        z_mean, z_log_var = self.encoder(x)
        z = self.sampler([z_mean, z_log_var])
        # pass decoder output here
        x_decoded_mean = self.decoder(z)
        self.model = Model(x, x_decoded_mean)

        xent_loss = self.original_dim * metrics.binary_crossentropy(
            K.flatten(x), K.flatten(x_decoded_mean)
        )
        kl_loss = - 0.5 * K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
        vae_loss = K.mean(xent_loss + kl_loss)

        self.model.add_loss(vae_loss)
        self.model.compile(optimizer='adam')
示例#11
0
def xent(y_true, y_pred):
    return binary_crossentropy(y_true, y_pred)
def bce_logdice_loss(y_true, y_pred):

    return binary_crossentropy(y_true,
                               y_pred) - K.log(1. - dice_loss(y_true, y_pred))
示例#13
0
        activation = self.hidden_layer(z)
        output_layer = self.output_layer(activation)
        return output_layer


encoder = Encoder(intermediate_dim, latent_dim)
decoder = Decoder(intermediate_dim, original_dim)

inputs = Input(batch_shape=(batch_size, original_dim))
z_mean, z_log_var = encoder(inputs)
z = Lambda(sampling, output_shape=(latent_dim, ))([z_mean, z_log_var])
y_pred = decoder(z)
autoencoder = Model(inputs, y_pred, name='autoencoder')

# 重构loss
xent_loss = original_dim * metrics.binary_crossentropy(inputs, y_pred)
# KL loss
kl_loss = -0.5 * K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var),
                       axis=-1)
# vae_loss = K.mean(xent_loss + kl_loss)

vae_loss = xent_loss + kl_loss


#########################################y
#autoencoder.add_loss(vae_loss)
#autoencoder.compile(optimizer='rmsprop')
############################n
#autoencoder.compile(optimizer='rmsprop', loss=vae_loss)
#############################################
# def vae_lossfun(loss):
示例#14
0
def logx_loss(y_true, y_pred):
    y_true_flat = K.flatten(y_true)
    y_pred_flat = K.flatten(y_pred)
    xent_loss = 28 * 28 * metrics.binary_crossentropy(y_true_flat, y_pred_flat)
    return xent_loss