def __init__(self,
                 input_shape=None,
                 n_epoch=None,
                 batch_size=None,
                 encoder_layers=None,
                 decoder_layers=None,
                 filters=None,
                 kernel_size=None,
                 strides=None,
                 pool_size=None,
                 denoising=None):
        args, _, _, values = inspect.getargvalues(inspect.currentframe())
        values.pop("self")

        for arg, val in values.items():
            setattr(self, arg, val)

        loss_history = LossHistory()

        early_stop = keras.callbacks.EarlyStopping(monitor="val_loss",
                                                   patience=10)

        reduce_learn_rate = keras.callbacks.ReduceLROnPlateau(
            monitor="val_loss", factor=0.1, patience=20)

        self.callbacks_list = [loss_history, early_stop, reduce_learn_rate]

        for i in range(self.encoder_layers):
            if i == 0:
                self.input_data = Input(shape=self.input_shape)
                self.encoded = BatchNormalization()(self.input_data)
                self.encoded = keras.layers.Conv1D(
                    filters=self.filters,
                    kernel_size=self.kernel_size,
                    strides=self.strides,
                    activation="elu",
                    padding="same")(self.encoded)
                self.encoded = Dropout(rate=0.5)(self.encoded)
            elif i > 0 and i < self.encoder_layers - 1:
                self.encoded = BatchNormalization()(self.encoded)
                self.encoded = keras.layers.Conv1D(
                    filters=self.filters,
                    kernel_size=self.kernel_size,
                    strides=self.strides,
                    activation="elu",
                    padding="same")(self.encoded)
                self.encoded = Dropout(rate=0.5)(self.encoded)
            elif i == self.encoder_layers - 1:
                self.encoded = BatchNormalization()(self.encoded)
                self.encoded = keras.layers.Conv1D(
                    filters=self.filters,
                    kernel_size=self.kernel_size,
                    strides=self.strides,
                    activation="elu",
                    padding="same")(self.encoded)

        self.encoded = keras.layers.MaxPooling1D(strides=self.pool_size,
                                                 padding="valid")(self.encoded)
        self.encoded = BatchNormalization()(self.encoded)
        self.encoded = keras.layers.Conv1D(filters=self.filters,
                                           kernel_size=self.kernel_size,
                                           strides=self.strides,
                                           activation="elu",
                                           padding="same")(self.encoded)
        self.decoded = keras.layers.UpSampling1D(size=self.pool_size)(
            self.encoded)

        for i in range(self.decoder_layers):
            if i < self.decoder_layers - 1:
                self.decoded = BatchNormalization()(self.decoded)
                self.decoded = keras.layers.Conv1D(
                    filters=self.filters,
                    kernel_size=self.kernel_size,
                    strides=self.strides,
                    activation="elu",
                    padding="same")(self.decoded)
                self.decoded = Dropout(rate=0.5)(self.decoded)
            else:
                self.decoded = BatchNormalization()(self.decoded)
                self.decoded = keras.layers.Conv1D(
                    filters=self.filters,
                    kernel_size=self.kernel_size,
                    strides=self.strides,
                    activation="elu",
                    padding="same")(self.decoded)

        # 3D tensor with shape: (batch_size, new_steps, filters).
        # Remember think of this as a 2D-Lattice per observation.
        # Rows represent time and columns represent some quantities of interest that evolve over time.
        self.decoded = BatchNormalization()(self.decoded)
        self.decoded = keras.layers.Conv1D(filters=self.input_shape[1],
                                           kernel_size=self.kernel_size,
                                           strides=self.strides,
                                           activation="sigmoid",
                                           padding="same")(self.decoded)

        self.autoencoder = Model(self.input_data, self.decoded)
        self.autoencoder.compile(optimizer=keras.optimizers.Adam(),
                                 loss="mean_squared_error")
예제 #2
0
    def __init__(self,
                 input_shape=None,
                 n_epoch=None,
                 batch_size=None,
                 encoder_layers=None,
                 decoder_layers=None,
                 filters=None,
                 kernel_size=None,
                 strides=None,
                 pool_size=None,
                 denoising=None):
        args, _, _, values = inspect.getargvalues(inspect.currentframe())
        values.pop("self")

        for arg, val in values.items():
            setattr(self, arg, val)

        loss_history = LossHistory()

        early_stop = keras.callbacks.EarlyStopping(monitor="val_loss",
                                                   patience=10)

        reduce_learn_rate = keras.callbacks.ReduceLROnPlateau(
            monitor="val_loss", factor=0.1, patience=20)

        self.callbacks_list = [loss_history, early_stop, reduce_learn_rate]

        # INPUTS
        self.input_data = Input(shape=self.input_shape)

        # ENCODER
        self.encoded = BatchNormalization()(self.input_data)
        for i in range(self.encoder_layers):
            self.encoded = BatchNormalization()(self.encoded)
            self.encoded = convolutional.Conv2D(filters=self.filters,
                                                kernel_size=self.kernel_size,
                                                strides=self.strides,
                                                activation="elu",
                                                padding="same")(self.encoded)
            self.encoded = Dropout(rate=0.5)(self.encoded)
            self.encoded = pooling.MaxPooling2D(strides=self.pool_size,
                                                padding="same")(self.encoded)

        # DECODER
        self.decoded = BatchNormalization()(self.encoded)
        for i in range(self.decoder_layers):
            self.decoded = BatchNormalization()(self.decoded)
            self.decoded = convolutional.Conv2D(filters=self.filters,
                                                kernel_size=self.kernel_size,
                                                strides=self.strides,
                                                activation="elu",
                                                padding="same")(self.decoded)
            self.decoded = Dropout(rate=0.5)(self.decoded)
            self.decoded = convolutional.UpSampling2D(size=self.pool_size)(
                self.decoded)

        # ACTIVATION
        self.decoded = convolutional.Conv2D(filters=self.input_shape[2],
                                            kernel_size=self.kernel_size,
                                            strides=self.strides,
                                            activation="sigmoid",
                                            padding="same")(self.decoded)
        self.autoencoder = Model(self.input_data, self.decoded)
        self.autoencoder.compile(optimizer=keras.optimizers.Adam(),
                                 loss="mean_squared_error")
    def __init__(self,
                 n_feat=None,
                 n_epoch=None,
                 batch_size=None,
                 encoder_layers=None,
                 decoder_layers=None,
                 n_hidden_units=None,
                 encoding_dim=None,
                 denoising=None):
        args, _, _, values = inspect.getargvalues(inspect.currentframe())
        values.pop("self")

        for arg, val in values.items():
            setattr(self, arg, val)

        loss_history = LossHistory()

        early_stop = keras.callbacks.EarlyStopping(monitor="val_loss",
                                                   patience=10)

        reduce_learn_rate = keras.callbacks.ReduceLROnPlateau(
            monitor="val_loss", factor=0.1, patience=20)

        self.callbacks_list = [loss_history, early_stop, reduce_learn_rate]

        for i in range(self.encoder_layers):
            if i == 0:
                self.input_data = Input(shape=(self.n_feat, ))
                self.encoded = BatchNormalization()(self.input_data)
                self.encoded = Dense(units=self.n_hidden_units,
                                     activation="elu")(self.encoded)
                self.encoded = Dropout(rate=0.5)(self.encoded)
            elif i > 0 and i < self.encoder_layers - 1:
                self.encoded = BatchNormalization()(self.encoded)
                self.encoded = Dense(units=self.n_hidden_units,
                                     activation="elu")(self.encoded)
                self.encoded = Dropout(rate=0.5)(self.encoded)
            elif i == self.encoder_layers - 1:
                self.encoded = BatchNormalization()(self.encoded)
                self.encoded = Dense(units=self.n_hidden_units,
                                     activation="elu")(self.encoded)

        self.mu = Dense(units=self.encoding_dim,
                        activation="linear")(self.encoded)
        self.log_sigma = Dense(units=self.encoding_dim,
                               activation="linear")(self.encoded)
        z = Lambda(self.sample_z, output_shape=(self.encoding_dim, ))(
            [self.mu, self.log_sigma])

        self.decoded_layers_dict = {}

        decoder_counter = 0

        for i in range(self.decoder_layers):
            if i == 0:
                self.decoded_layers_dict[decoder_counter] = BatchNormalization(
                )
                decoder_counter += 1
                self.decoded_layers_dict[decoder_counter] = Dense(
                    units=self.n_hidden_units, activation="elu")
                decoder_counter += 1
                self.decoded_layers_dict[decoder_counter] = Dropout(rate=0.5)

                self.decoded = self.decoded_layers_dict[decoder_counter - 2](z)
                self.decoded = self.decoded_layers_dict[decoder_counter - 1](
                    self.decoded)
                self.decoded = self.decoded_layers_dict[decoder_counter](
                    self.decoded)

                decoder_counter += 1
            elif i > 0 and i < self.decoder_layers - 1:
                self.decoded_layers_dict[decoder_counter] = BatchNormalization(
                )
                decoder_counter += 1
                self.decoded_layers_dict[decoder_counter] = Dense(
                    units=self.n_hidden_units, activation="elu")
                decoder_counter += 1
                self.decoded_layers_dict[decoder_counter] = Dropout(rate=0.5)

                self.decoded = self.decoded_layers_dict[decoder_counter - 2](
                    self.decoded)
                self.decoded = self.decoded_layers_dict[decoder_counter - 1](
                    self.decoded)
                self.decoded = self.decoded_layers_dict[decoder_counter](
                    self.decoded)

                decoder_counter += 1
            elif i == self.decoder_layers - 1:
                self.decoded_layers_dict[decoder_counter] = BatchNormalization(
                )
                decoder_counter += 1
                self.decoded_layers_dict[decoder_counter] = Dense(
                    units=self.n_hidden_units, activation="elu")

                self.decoded = self.decoded_layers_dict[decoder_counter - 1](
                    self.decoded)
                self.decoded = self.decoded_layers_dict[decoder_counter](
                    self.decoded)
                decoder_counter += 1

        # Output would have shape: (batch_size, n_feat).
        self.decoded_layers_dict[decoder_counter] = Dense(units=self.n_feat,
                                                          activation="sigmoid")
        self.decoded = self.decoded_layers_dict[decoder_counter](self.decoded)

        self.autoencoder = Model(self.input_data, self.decoded)
        self.autoencoder.compile(optimizer=keras.optimizers.Adam(),
                                 loss=self.vae_loss)
    def __init__(self,
                 input_shape=None,
                 n_epoch=None,
                 batch_size=None,
                 encoder_layers=None,
                 decoder_layers=None,
                 n_hidden_units=None,
                 encoding_dim=None,
                 stateful=None,
                 denoising=None):
        args, _, _, values = inspect.getargvalues(inspect.currentframe())
        values.pop("self")

        for arg, val in values.items():
            setattr(self, arg, val)

        loss_history = LossHistory()

        early_stop = keras.callbacks.EarlyStopping(monitor="val_loss",
                                                   patience=10)

        reduce_learn_rate = keras.callbacks.ReduceLROnPlateau(
            monitor="val_loss", factor=0.1, patience=20)

        self.callbacks_list = [loss_history, early_stop, reduce_learn_rate]

        # 2D-lattice with time on the x-axis (across rows) and with space on the y-axis (across columns).
        if self.stateful is True:
            self.input_data = Input(batch_shape=self.input_shape)
            self.n_rows = self.input_shape[1]
            self.n_cols = self.input_shape[2]
        else:
            self.input_data = Input(shape=self.input_shape)
            self.n_rows = self.input_shape[0]
            self.n_cols = self.input_shape[1]

        for i in range(self.encoder_layers):
            if i == 0:
                # Returns a sequence of n_rows vectors of dimension n_hidden_units.
                self.encoded = LSTM(units=self.n_hidden_units,
                                    return_sequences=True,
                                    stateful=self.stateful)(self.input_data)
            else:
                self.encoded = LSTM(units=self.n_hidden_units,
                                    return_sequences=True,
                                    stateful=self.stateful)(self.encoded)

        # Returns 1 vector of dimension encoding_dim.
        self.encoded = LSTM(units=self.encoding_dim,
                            return_sequences=False,
                            stateful=self.stateful)(self.encoded)

        # Returns a sequence containing n_rows vectors where each vector is of dimension encoding_dim.
        # output_shape: (None, n_rows, encoding_dim).
        self.decoded = RepeatVector(self.n_rows)(self.encoded)

        for i in range(self.decoder_layers):
            self.decoded = LSTM(units=self.n_hidden_units,
                                return_sequences=True,
                                stateful=self.stateful)(self.decoded)

        # If return_sequences is True: 3D tensor with shape (batch_size, timesteps, units).
        # Else: 2D tensor with shape (batch_size, units).
        # Note that n_rows here is timesteps and n_cols here is units.
        # If return_state is True: a list of tensors.
        # The first tensor is the output. The remaining tensors are the last states, each with shape (batch_size, units).
        # If stateful is True: the last state for each sample at index i in a batch will be used as initial state for the sample of index i in the following batch.
        # For LSTM (notLSTM) If unroll is True: the network will be unrolled, else a symbolic loop will be used. Unrolling can speed-up a RNN, although it tends to be more memory-intensive. Unrolling is only suitable for short sequences.
        self.decoded = LSTM(units=self.n_cols,
                            return_sequences=True,
                            stateful=self.stateful)(self.decoded)

        self.autoencoder = Model(self.input_data, self.decoded)
        self.autoencoder.compile(optimizer=keras.optimizers.Adam(),
                                 loss="mean_squared_error")