예제 #1
0
def test_autoencoder_advanced():
    encoder = containers.Sequential([core.Dense(5, input_shape=(10,))])
    decoder = containers.Sequential([core.Dense(10, input_shape=(5,))])
    X_train = np.random.random((100, 10))
    X_test = np.random.random((100, 10))

    model = Sequential()
    model.add(core.Dense(output_dim=10, input_dim=10))
    autoencoder = core.AutoEncoder(encoder=encoder, decoder=decoder,
                                   output_reconstruction=True)
    model.add(autoencoder)

    # training the autoencoder:
    model.compile(optimizer='sgd', loss='mse')
    assert autoencoder.output_reconstruction

    model.fit(X_train, X_train, nb_epoch=1, batch_size=32)

    # predicting compressed representations of inputs:
    autoencoder.output_reconstruction = False  # the autoencoder has to be recompiled after modifying this property
    assert not autoencoder.output_reconstruction
    model.compile(optimizer='sgd', loss='mse')
    representations = model.predict(X_test)
    assert representations.shape == (100, 5)

    # the model is still trainable, although it now expects compressed representations as targets:
    model.fit(X_test, representations, nb_epoch=1, batch_size=32)

    # to keep training against the original inputs, just switch back output_reconstruction to True:
    autoencoder.output_reconstruction = True
    model.compile(optimizer='sgd', loss='mse')
    model.fit(X_train, X_train, nb_epoch=1)

    reconstructions = model.predict(X_test)
    assert reconstructions.shape == (100, 10)
예제 #2
0
def test_autoencoder_second_layer():
    # regression test for issue #1275
    encoder = core.Dense(input_dim=10, output_dim=2)
    decoder = core.Dense(input_dim=2, output_dim=10)
    model = Sequential()
    model.add(core.Dense(input_dim=20, output_dim=10))
    model.add(core.AutoEncoder(encoder=encoder, decoder=decoder,
                               output_reconstruction=False))
    model.compile(loss='mse', optimizer='sgd')
예제 #3
0
    def _pre_train(self, X):
        logger.info(u"Pre-training the network")

        encoders = []

        layers = self._hidden_layers[:]  # Copy the hidden layers list
        layers.insert(0, self.input_dim)

        for i, (n_in, n_out) in enumerate(zip(layers[:-1], layers[1:]),
                                          start=1):
            logger.info(u"Training layer {}: Input {} -> Output {}".format(
                i, n_in, n_out))

            autoencoder = models.Sequential()

            encoder = containers.Sequential()
            encoder.add(core.Dropout(self._dropout_ratio,
                                     input_shape=(n_in, )))
            encoder.add(
                core.Dense(input_dim=n_in,
                           output_dim=n_out,
                           activation=self._activation,
                           init=self._weight_init))

            decoder = containers.Sequential()
            decoder.add(
                core.Dense(input_dim=n_out,
                           output_dim=n_in,
                           activation=self._activation,
                           init=self._weight_init))

            autoencoder.add(
                core.AutoEncoder(encoder=encoder,
                                 decoder=decoder,
                                 output_reconstruction=False))

            logger.info(u"Compiling the autoencoder")
            autoencoder.compile(optimizer=self._optimizer,
                                loss='mean_squared_error')

            logger.info(u"Fitting the data")
            autoencoder.fit(X,
                            X,
                            batch_size=self._batch_size,
                            nb_epoch=self._pre_train_epochs)

            # Store trained weight and update training data
            encoders.append(autoencoder.layers[0].encoder)
            X = autoencoder.predict(X)
            pass

        return encoders
예제 #4
0
 def new_encdecs(self, compile=True, use_dropout=None, use_noise=None):
     self.enc_decs = []
     if not use_dropout is None:
         self.enc_use_drop = self.drop_rate > 0 and use_dropout
     if not use_noise is None:
         self.enc_use_noise = self.sigma_base > 0 and use_noise
     if self.l1 != 0 or self.l2 != 0:
         regularizer = WeightRegularizer(l1=self.l1, l2=self.l2)
     else:
         regularizer = None
     for (i, (n_in, n_out)) in enumerate(
             zip(self.layer_sizes[:-1], self.layer_sizes[1:])):
         ae = Sequential()
         enc_l = []
         if self.enc_use_noise:
             enc_l.append(
                 noise.GaussianNoise(self.sigma_base *
                                     (self.sigma_fact**-i),
                                     input_shape=(n_in, )))
         enc_l.append(
             core.Dense(input_dim=n_in,
                        output_dim=n_out,
                        activation='sigmoid',
                        W_regularizer=regularizer))
         if self.enc_use_drop:
             enc_l.append(core.Dropout(self.drop_rate))
         enc = containers.Sequential(enc_l)
         dec = containers.Sequential([
             core.Dense(input_dim=n_out,
                        output_dim=n_in,
                        activation='sigmoid')
         ])
         ae.add(
             core.AutoEncoder(encoder=enc,
                              decoder=dec,
                              output_reconstruction=True))
         if compile:
             ae.compile(loss='mse', optimizer=self.enc_opt)
         self.enc_decs.append(ae)
예제 #5
0
def test_autoencoder():
    layer_1 = core.Layer()
    layer_2 = core.Layer()

    layer = core.AutoEncoder(layer_1, layer_2)
    _runner(layer)