コード例 #1
0
 def __init__(self, latent_dim=100, sparsity_weight=1, sparsity_objective=0.1, decay_positive_weights=0, decay_negative_weights=1, decay_weight=1):
     """
     Create a sparse shallow AE with the custom kl divergence regularizer, enforcing weights non negativity with an asymmetric decay.
     Arguments:
         sparsity_weight: positive float - the weight of the sparsity cost.
         sparsity_objective: float between 0 and 1 - the sparsity parameter.
         decay_positive_weights: positive float - the weight decay parameter for the positive weights.
         decay_negative_weights: positive float - the weight decay parameter for the negative weights.
         decay_weight: positive float - the weight of the whole non negativity cost.
     """
     self.latent_dim = latent_dim
     self.sparsity_weight = sparsity_weight
     self.sparsity_objective = sparsity_objective
     self.decay_positive_weights = decay_positive_weights        
     self.decay_negative_weights = decay_negative_weights
     self.decay_weight = decay_weight
     input_img = Input(shape=(28, 28, 1))  # adapt this if using `channels_first` image data format
     x = Flatten()(input_img)
     encoded = Dense(latent_dim, activation='sigmoid', activity_regularizer=custom_regularizers.KL_divergence(beta=self.sparsity_weight, rho=self.sparsity_objective), kernel_regularizer=custom_regularizers.asymmetric_weight_decay(alpha=self.decay_positive_weights, beta=self.decay_negative_weights, lam=self.decay_weight))(x)
     self.encoder = Model(input_img, encoded, name='encoder')
     encoded_img = Input(shape=(self.latent_dim,))  
     x = Dense(28*28)(encoded_img)
     x = LeakyReLU(alpha=0.1)(x)
     decoded = Reshape((28,28,1))(x)
     self.decoder = Model(encoded_img, decoded, name='decoder')
     encoded = self.encoder(input_img)
     decoded = self.decoder(encoded)
     self.autoencoder = Model(input_img, decoded)
     self.autoencoder.compile(optimizer='adadelta', loss='mean_squared_error')
コード例 #2
0
 def __init__(self, latent_dim=100, beta=1, rho=0.1):
     """
     Create a sparse shallow AE with the custom kl divergence regularizer.
     Arguments:
         beta: positive float - the weight of the sparsity cost.
         rho: float between 0 and 1 - the sparsity parameter
     """
     self.latent_dim = latent_dim
     self.beta = beta
     self.rho = rho
     input_img = Input(shape=(
         28, 28,
         1))  # adapt this if using `channels_first` image data format
     x = Flatten()(input_img)
     encoded = Dense(latent_dim,
                     activation='sigmoid',
                     activity_regularizer=custom_regularizers.KL_divergence(
                         beta=self.beta, rho=self.rho))(x)
     self.encoder = Model(input_img, encoded, name='encoder')
     encoded_img = Input(shape=(self.latent_dim, ))
     x = Dense(28 * 28)(encoded_img)
     x = LeakyReLU(alpha=0.1)(x)
     decoded = Reshape((28, 28, 1))(x)
     self.decoder = Model(encoded_img, decoded, name='decoder')
     encoded = self.encoder(input_img)
     decoded = self.decoder(encoded)
     self.autoencoder = Model(input_img, decoded)
     self.autoencoder.compile(optimizer='adadelta',
                              loss='mean_squared_error')
コード例 #3
0
 def __init__(self, latent_dim=100, nb_rows=28, nb_columns=28, nb_input_channels=1, one_channel_output=True, 
                 sparsity_weight=1, sparsity_objective=0.1):
     """
     Create a sparse shallow AE with the custom kl divergence regularizer, enforcing weights non negativity with Keras NonNeg constraint.
     Arguments:
         sparsity_weight: positive float - the weight of the sparsity cost.
         sparsity_objective: float between 0 and 1 - the sparsity parameter.
     """
     self.latent_dim = latent_dim
     self.nb_rows=nb_rows
     self.nb_columns=nb_columns
     self.nb_input_channels=nb_input_channels
     if one_channel_output:
         self.nb_output_channels=1
     else:
         self.nb_output_channels=nb_input_channels
     self.sparsity_weight = sparsity_weight
     self.sparsity_objective = sparsity_objective
     input_img = Input(shape=(self.nb_rows, self.nb_columns, nb_input_channels))  # adapt this if using `channels_first` image data format
     x = Flatten()(input_img)
     encoded = Dense(latent_dim, activation='sigmoid', 
                         activity_regularizer=custom_regularizers.KL_divergence(beta=self.sparsity_weight,  
                                                                                 rho=self.sparsity_objective))(x)
     self.encoder = Model(input_img, encoded, name='encoder')
     encoded_img = Input(shape=(self.latent_dim,))  
     x = Dense(self.nb_rows*self.nb_columns*self.nb_output_channels, 
                         kernel_constraint=constraints.non_neg())(encoded_img)
     x = LeakyReLU(alpha=0.1)(x)
     decoded = Reshape((self.nb_rows,self.nb_columns,self.nb_output_channels))(x)
     self.decoder = Model(encoded_img, decoded, name='decoder')
     encoded = self.encoder(input_img)
     decoded = self.decoder(encoded)
     self.autoencoder = Model(input_img, decoded)
     self.autoencoder.compile(optimizer='adadelta', loss='mean_squared_error', metrics=['mse'])
コード例 #4
0
 def __init__(self, latent_dim=100, nb_rows=28, nb_columns=28, nb_input_channels=1, one_channel_output=True, 
                                    sparsity_weight=1, sparsity_objective=0.1):
     """
     Create a sparse shallow AE with the custom kl divergence regularizer. Multiple losses are computed for each code coefficient (KL_div + rec_err)
     """
     self.latent_dim = latent_dim
     self.nb_input_channels=nb_input_channels
     self.nb_rows=nb_rows
     self.nb_columns=nb_columns
     if one_channel_output:
         self.nb_output_channels=1
     else:
         self.nb_output_channels=nb_input_channels
     self.sparsity_weight = sparsity_weight
     self.sparsity_objective = sparsity_objective
     input_img = Input(shape=(self.nb_rows, self.nb_columns, self.nb_input_channels))  # adapt this if using `channels_first` image data format
     x = Flatten()(input_img)
     encoded = Dense(latent_dim, activation='sigmoid', 
                     activity_regularizer=custom_regularizers.KL_divergence(beta=self.sparsity_weight, 
                                                                             rho=self.sparsity_objective))(x)
     self.encoder = Model(input_img, encoded, name='encoder')
     encoded_img = Input(shape=(self.latent_dim,))  
     x = Dense(self.nb_rows*self.nb_columns*self.nb_output_channels)(encoded_img)
     x = LeakyReLU(alpha=0.1)(x)
     decoded = Reshape((self.nb_rows,self.nb_columns,self.nb_output_channels))(x)
     self.decoder = Model(encoded_img, decoded, name='decoder')
     encoded = self.encoder(input_img)
     decoded = self.decoder(encoded)
     self.autoencoder = Model(input_img, decoded)
     self.autoencoder.compile(optimizer='adadelta', loss='mean_squared_error', metrics=['mse'])
コード例 #5
0
def createAndTrain(x_train,
                   y_train,
                   x_test,
                   y_test,
                   latent_dim,
                   strDate,
                   sparse=False,
                   save=False):

    print('\n\n **************** Code size: ', latent_dim,
          ' **************** \n')

    input_img = Input(
        shape=(28, 28,
               1))  # adapt this if using `channels_first` image data format
    x = Flatten()(input_img)

    if sparse:
        encoded = Dense(
            latent_dim,
            activation='sigmoid',
            activity_regularizer=custom_regularizers.KL_divergence())(x)
    else:
        encoded = Dense(latent_dim, activation='sigmoid')(x)

    encoder = Model(input_img, encoded, name='encoder')
    encoder.summary()

    encoded_img = Input(
        shape=(latent_dim,
               ))  # adapt this if using `channels_first` image data format

    x = Dense(28 * 28)(encoded_img)
    x = LeakyReLU(alpha=0.1)(x)
    decoded = Reshape((28, 28, 1))(x)

    decoder = Model(encoded_img, decoded, name='decoder')

    encoded = encoder(input_img)
    decoded = decoder(encoded)

    autoencoder = Model(input_img, decoded)
    autoencoder.compile(optimizer='adadelta', loss='mean_squared_error')

    encoded = encoder(input_img)
    decoded = decoder(encoded)
    autoencoder = Model(input_img, decoded)
    autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')

    t0 = time.time()

    autoencoder.fit(
        x_train,
        x_train,
        epochs=200,
        verbose=2,
        batch_size=128,
        shuffle=True,
        validation_data=(x_test, x_test),
    )

    t1 = time.time()

    if ((latent_dim % 50 == 0) or (save == True)):
        model_path = dir + '_AEinfoGAN_' + str(latent_dim) + '.h5'
        autoencoder.save(model_path)
    training_time = t1 - t0
    training_error = autoencoder.evaluate(x_train, x_train, verbose=0)
    test_error = autoencoder.evaluate(x_test, x_test, verbose=0)

    return training_time, training_error, test_error