def get_model(hyperparameters, predictors=[]):

    # Initialising the RNN
    model = Sequential()
    regularizer = l2(0.01)
    optimizer = Adam(lr=hyperparameters['learning_rate'])

    model.add(
        LSTM(units=30,
             input_shape=(hyperparameters['input_sequence_length'],
                          len(predictors)),
             return_sequences=True,
             kernel_regularizer=regularizer))
    model.add(GaussianNoise(1e-4))
    #     model.add(BatchNormalization())

    model.add(
        LSTM(units=20, return_sequences=True, kernel_regularizer=regularizer))
    #     model.add(GaussianNoise(1e-4))
    #     model.add(BatchNormalization())

    model.add(
        LSTM(units=10, kernel_regularizer=regularizer, return_sequences=False))
    model.add(GaussianNoise(1e-4))
    #     model.add(BatchNormalization())

    model.add(
        Dense(hyperparameters['output_sequence_length'], activation='relu'))

    model.compile(optimizer=optimizer, loss='mean_squared_error')

    print(model.summary())

    return model
예제 #2
0
def cnn_model(labels):
    # include_top=Falseによって、モデルから全結合層を削除
    input_tensor = Input(shape=(299, 299, 3))
    # input_tensor = GaussianNoise(stddev=0.1)(input_tensor)
    # xception_model = Xception(include_top=False, input_shape=(299, 299, 3), pooling='avg')
    xception_model = Xception(include_top=False, pooling='avg', input_tensor=input_tensor)
    # 全結合層の構築
    top_model = Sequential()
    # top_model.add(Flatten(input_shape=xception_model.output_shape[1:]))
    # top_model.add(Activation("relu"))
    # top_model.add(Dropout(0.3))
    # top_model.add(Dense(1024,))
    # top_model.add(Activation("softmax"))
    # top_model.add(BatchNormalization(input_shape=xception_model.output_shape[1:]))
    top_model.add(GaussianNoise(stddev=0.2,input_shape=xception_model.output_shape[1:]))
    top_model.add(Dropout(0.3))
    top_model.add(Dense(len(labels),input_shape=xception_model.output_shape[1:]))
    top_model.add(Activation("softmax"))

    # 全結合層を削除したモデルと上で自前で構築した全結合層を結合
    model = Model(inputs=xception_model.input, outputs=top_model(xception_model.output))

    # 図3における14層目までのモデル重みを固定(VGG16のモデル重みを用いる)
    # print('model.layers:',len(xception_model.layers))
    # xception_model.trainable = False
    for layer in xception_model.layers[:-50]:
        layer.trainable = False
    return model
예제 #3
0
def DeepLabV3PlusUNet(input_shape, classes=66, *args, **kwargs):

    input = tf.keras.Input(shape=input_shape)
    x = GaussianNoise(0.1)(input)
    base_model = ResNet101V2(input_tensor=x, include_top=False)
    # base_model.summary()

    skip_connections = [
        base_model.get_layer('conv1_conv').output,  # (None, 128, 128, 64)
        base_model.get_layer('conv2_block2_out').output,  # (None, 64, 64, 256)
        base_model.get_layer('conv3_block3_out').output,  # (None, 32, 32, 512)
        base_model.get_layer(
            'conv4_block22_out').output,  # (None, 16, 16, 1024)
    ]

    image_features = base_model.output  # (None, 8, 8, 2048)
    x_a = ASPP(image_features)  # (None, 8, 8, 2048)
    output = Concatenate()([image_features, x_a])
    for c in (1024, 512, 256, 64):
        a = upsample_by_cnn(output, c)
        b = skip_connections.pop()
        print(a.name, a.shape)
        print(b.name, b.shape)
        output = Concatenate()([a, b])
        # output = Concatenate()([upsample_by_cnn(output, c), skip_connections.pop()])

    x = upsample_by_cnn(output, 32)
    x = Conv2D(classes, (1, 1), name='output_layer')(x)
    x = Activation('softmax', dtype='float32')(x)

    model = Model(inputs=input, outputs=x, name='DeepLabV3_Plus')
    print(f'*** Output_Shape => {model.output_shape} ***')
    return model
    def encode_embedding_input(input_layer,
                               type='subject',
                               reduce_size=False,
                               reduce_size_n=32
                               ):

        conv1_size = CONV_SIZES[type][0]

        if noise:
            input_layer = GaussianNoise(stddev=.001)(input_layer)

        if normalization:
            input_layer = BatchNormalization()(input_layer)

        if reduce_size:
            input_layer = Dense(reduce_size_n, activation="sigmoid")(input_layer)

        conv1 = Conv1D(conv1_size, (2,), activation=mish, padding='same')(input_layer)
        pool1 = MaxPooling1D((2,), padding='same')(conv1)

        if type in ['subject', 'object']:
            return Flatten()(pool1)

        conv2_size = CONV_SIZES[type][1]
        conv2 = Conv1D(conv2_size, (2,), activation=mish, padding='same')(pool1)
        pool2 = MaxPooling1D((2,), padding='same')(conv2)
        return Flatten()(pool2)
예제 #5
0
    def build_discriminator(self, model_yaml, is_training=True):

        discri_input = Input(shape=tuple([256, 256, 12]), name="d_input")
        if model_yaml["d_activation"] == "lrelu":
            d_activation = lambda x: tf.nn.leaky_relu(
                x, alpha=model_yaml["lrelu_alpha"])
        else:
            d_activation = model_yaml["d_activation"]

        if model_yaml["add_discri_noise"]:
            x = GaussianNoise(self.sigma_val,
                              input_shape=self.model_yaml["dim_gt_image"],
                              name="d_GaussianNoise")(discri_input)
        else:
            x = discri_input
        for i, layer_index in enumerate(model_yaml["dict_discri_archi"]):
            layer_val = model_yaml["dict_discri_archi"][layer_index]
            layer_key = model_yaml["layer_key"]
            layer_param = dict(zip(layer_key, layer_val))
            pad = layer_param["padding"]
            vpadding = tf.constant([[0, 0], [pad, pad], [pad, pad],
                                    [0, 0]])  # the last dimension is 12
            x = tf.pad(
                x,
                vpadding,
                model_yaml["discri_opt_padding"],
                name="{}_padding_{}".format(
                    model_yaml["discri_opt_padding"],
                    layer_index))  # the type of padding is defined the yaml,
            # more infomration  in https://www.tensorflow.org/api_docs/python/tf/pad
            #
            # x = ZeroPadding2D(
            #   padding=(layer_param["padding"], layer_param["padding"]), name="d_pad_{}".format(layer_index))(x)
            x = Conv2D(layer_param["nfilter"],
                       layer_param["kernel"],
                       padding="valid",
                       activation=d_activation,
                       strides=(layer_param["stride"], layer_param["stride"]),
                       name="d_conv{}".format(layer_index))(x)
            if i > 0:
                x = BatchNormalization(momentum=model_yaml["bn_momentum"],
                                       trainable=is_training,
                                       name="d_bn{}".format(layer_index))(x)

        # x = Flatten(name="flatten")(x)
        # for i, dlayer_idx in enumerate(model_yaml["discri_dense_archi"]):
        #    dense_layer = model_yaml["discri_dense_archi"][dlayer_idx]
        #    x = Dense(dense_layer, activation=d_activation, name="dense_{}".format(dlayer_idx))(x)

        if model_yaml["d_last_activ"] == "sigmoid":
            x_final = tf.keras.layers.Activation('sigmoid',
                                                 name="d_last_activ")(x)
        else:
            x_final = x
        model_discri = Model(discri_input, x_final, name="discriminator")
        model_discri.summary()
        return model_discri
    def encode_embedding_input(input_layer):
        conv1_size, conv2_size = (CONV_SIZES[2], CONV_SIZES[1])

        input_layer = GaussianNoise(stddev=.1)(input_layer)
        conv1 = Conv1D(conv1_size, (2,), activation='mish', padding='same')(input_layer)
        pool1 = MaxPooling1D((2,), padding='same')(conv1)
        conv2 = Conv1D(conv2_size, (2,), activation='mish', padding='same')(pool1)
        pool2 = MaxPooling1D((2,), padding='same')(conv2)
        return Flatten()(pool2)
예제 #7
0
    def build_model(self):
        """
        Function to build the seq2seq model used.
        :return: Encoder model, decoder model (used for predicting) and full model (used for training).
        """
        # Define model inputs for the encoder/decoder stack
        x_enc = Input(shape=(self.seq_len_in, self.input_feature_amount), name="x_enc")
        x_dec = Input(shape=(self.seq_len_out, self.output_feature_amount), name="x_dec")

        # Add noise
        x_dec_t = GaussianNoise(0.2)(x_dec)

        # Define the encoder GRU, which only has to return a state
        encoder_gru = GRU(self.state_size, return_sequences=True, return_state=True, name="encoder_gru")
        encoder_out, encoder_state = encoder_gru(x_enc)

        # Decoder GRU
        decoder_gru = GRU(self.state_size, return_state=True, return_sequences=True,
                          name="decoder_gru")
        # Use these definitions to calculate the outputs of out encoder/decoder stack
        dec_intermediates, decoder_state = decoder_gru(x_dec_t, initial_state=encoder_state)

        # Define the attention layer
        attn_layer = AttentionLayer(name="attention_layer")
        attn_out, attn_states = attn_layer([encoder_out, dec_intermediates])

        # Concatenate decoder and attn out
        decoder_concat_input = Concatenate(axis=-1, name='concat_layer')([dec_intermediates, attn_out])

        # Define the dense layer
        dense = Dense(self.output_feature_amount, activation='linear', name='output_layer')
        dense_time = TimeDistributed(dense, name='time_distributed_layer')
        decoder_pred = dense_time(decoder_concat_input)

        # Define the encoder/decoder stack model
        encdecmodel = tsModel(inputs=[x_enc, x_dec], outputs=decoder_pred)

        # Define the separate encoder model for inferencing
        encoder_inf_inputs = Input(shape=(self.seq_len_in, self.input_feature_amount), name="encoder_inf_inputs")
        encoder_inf_out, encoder_inf_state = encoder_gru(encoder_inf_inputs)
        encoder_model = tsModel(inputs=encoder_inf_inputs, outputs=[encoder_inf_out, encoder_inf_state])

        # Define the separate encoder model for inferencing
        decoder_inf_inputs = Input(shape=(1, self.output_feature_amount), name="decoder_inputs")
        encoder_inf_states = Input(shape=(self.seq_len_in, self.state_size), name="encoder_inf_states")
        decoder_init_state = Input(shape=(self.state_size,), name="decoder_init")

        decoder_inf_out, decoder_inf_state = decoder_gru(decoder_inf_inputs, initial_state=decoder_init_state)
        attn_inf_out, attn_inf_states = attn_layer([encoder_inf_states, decoder_inf_out])
        decoder_inf_concat = Concatenate(axis=-1, name='concat')([decoder_inf_out, attn_inf_out])
        decoder_inf_pred = TimeDistributed(dense)(decoder_inf_concat)
        decoder_model = tsModel(inputs=[encoder_inf_states, decoder_init_state, decoder_inf_inputs],
                                outputs=[decoder_inf_pred, attn_inf_states, decoder_inf_state])

        return encoder_model, decoder_model, encdecmodel
예제 #8
0
def create_model(input_width, input_height):
    m = tf.keras.models.Sequential()
    m.add(InputLayer(input_shape=(input_width, input_height, 6)))
    m.add(Conv2D(filters=32, kernel_size=(4, 4), activation='relu'))
    m.add(GaussianNoise(0.01))
    m.add(MaxPool2D(pool_size=(2, 2)))
    m.add(Conv2D(filters=48, kernel_size=(4, 4), activation='relu'))
    m.add(GaussianNoise(0.01))
    m.add(MaxPool2D(pool_size=(2, 2)))
    m.add(Conv2D(filters=64, kernel_size=(4, 4), activation='relu'))
    m.add(GaussianNoise(0.01))
    m.add(MaxPool2D(pool_size=(2, 2)))
    m.add(Conv2D(filters=64, kernel_size=(4, 4), activation='relu'))
    m.add(GaussianNoise(0.01))
    m.add(MaxPool2D(pool_size=(2, 2)))
    m.add(Conv2D(filters=64, kernel_size=(4, 4), activation='relu'))
    m.add(GaussianNoise(0.01))
    m.add(MaxPool2D(pool_size=(2, 2)))
    m.add(Conv2D(filters=64, kernel_size=(4, 4), activation='relu'))
    m.add(GaussianNoise(0.01))
    m.add(MaxPool2D(pool_size=(2, 2)))
    m.add(Flatten())
    m.add(Dense(128, activation='relu'))
    m.add(Dropout(0.1))
    m.add(Dense(2, activation='softmax'))
    m.compile(optimizer='adam', loss='categorical_crossentropy')
    return m
예제 #9
0
 def produce_noisy_input(self, input, sigma_val):
     if self.model_yaml["add_discri_white_noise"]:
         # print("[INFO] On each batch GT label we add Gaussian Noise before training discri on labelled image")
         new_gt = GaussianNoise(sigma_val,
                                input_shape=self.model_yaml["dim_gt_image"],
                                name="d_inputGN")(input)
         if self.model_yaml["add_relu_after_noise"]:
             new_gt = tf.keras.layers.Activation(
                 lambda x: tf.keras.activations.tanh(x),
                 name="d_before_activ")(new_gt)
     else:
         new_gt = input
     return new_gt
    def encode_embedding_input(input_layer, large=False):
        conv1_size, conv2_size = (CONV_SIZES[2], CONV_SIZES[1]) if large else (CONV_SIZES[1], CONV_SIZES[0])

        if noise:
            input_layer = GaussianNoise(stddev=.001)(input_layer)
        
        if normalization:
            input_layer = BatchNormalization()(input_layer)
        
        conv1 = Conv1D(conv1_size, (2,), activation='mish', padding='same')(input_layer)
        pool1 = MaxPooling1D((2,), padding='same')(conv1)
        conv2 = Conv1D(conv2_size, (2,), activation='mish', padding='same')(pool1)
        pool2 = MaxPooling1D((2,), padding='same')(conv2)
        return Flatten()(pool2)
예제 #11
0
def build_model(train_set: pd.DataFrame, lr: float) -> Model:
    act_func = 'elu'
    encoding_dim = 20
    features = train_set.shape[1]
    inp = Input(shape=(features, ))
    x = GaussianNoise(stddev=0.3)(inp)
    x = Dense(encoding_dim * 2, activation=act_func)(x)
    out = Dense(encoding_dim * 2, activation=act_func)(x)
    out = Dense(features, activation=act_func)(x)
    model = Model(inputs=[inp], outputs=[out])
    model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=lr),
                  loss="mae")
    model.summary()
    return model
def restore_obj(input_shape):
    """ mask object embedding and try to restore it alone """

    INNER_SIZE = 50

    def encode_embedding_input(input_layer):
        conv1 = Conv1D(128, (2,), activation='relu', padding='same')(input_layer)
        pool1 = MaxPooling1D((2,), padding='same')(conv1)
        conv2 = Conv1D(32, (2,), activation='relu', padding='same')(pool1)
        pool2 = MaxPooling1D((2,), padding='same')(conv2)
        return Flatten()(pool2)

    def decode_embedding_input(latent, name):
        latent = Reshape((1, INNER_SIZE))(latent)
        conv1 = Conv1D(128, (1,), activation='relu', padding='same', name=name + '_conv1')(latent)
        up1 = UpSampling1D(input_shape[0], name=name + '_up1')(conv1)
        conv2 = Conv1D(input_shape[1], (6,), activation='relu', padding='same', name=name + '_conv2')(up1)
        return conv2

    input_subject = Input(shape=input_shape[0], name='input_subject')
    input_sub_noised = GaussianNoise(stddev=.001)(input_subject)
    input_object = Input(shape=input_shape[1], name='input_object')
    input_rel = Input(shape=input_shape[2], name='input_rel')
    input_rel_noised = GaussianNoise(stddev=.001)(input_rel)

    encode_subject = encode_embedding_input(input_subject)
    encode_rel = encode_embedding_input(input_rel)

    x = concatenate([encode_subject, encode_rel])
    latent = Dense(INNER_SIZE, activation='sigmoid', name='embedding')(x)

    output_object = decode_embedding_input(latent, 'output_object')

    model = Model(inputs=[input_subject, input_object, input_rel],
                  outputs=[input_sub_noised, output_object, input_rel_noised])

    return model
예제 #13
0
def test_delete_channels_noise(channel_index, data_format):
    layer_test_helper_flatten_2d(GaussianNoise(0.5), channel_index,
                                 data_format)
    layer_test_helper_flatten_2d(GaussianDropout(0.5), channel_index,
                                 data_format)
    layer_test_helper_flatten_2d(AlphaDropout(0.5), channel_index, data_format)
예제 #14
0
def create_keras_model(inputShape,
                       nClasses,
                       scale=2,
                       noise=1e-3,
                       depth=5,
                       activation='relu',
                       n_filters=64,
                       l2_reg=1e-4):
    """
    Deep residual network that keeps the size of the input throughout the whole network
    """
    def residual(inputs, n_filters):
        x = ReflectionPadding2D()(inputs)
        x = Conv2D(n_filters, (3, 3),
                   padding='valid',
                   kernel_initializer='he_normal',
                   kernel_regularizer=l2(l2_reg))(x)
        x = BatchNormalization()(x)
        x = Activation(activation)(x)
        x = ReflectionPadding2D()(x)
        x = Conv2D(n_filters, (3, 3),
                   padding='valid',
                   kernel_initializer='he_normal',
                   kernel_regularizer=l2(l2_reg))(x)
        x = BatchNormalization()(x)
        x = Add()([x, inputs])

        return x

    inputs = Input(shape=inputShape)
    x = GaussianNoise(noise)(inputs)

    x = ReflectionPadding2D()(x)
    x = Conv2D(n_filters, (3, 3),
               padding='valid',
               kernel_initializer='he_normal',
               kernel_regularizer=l2(l2_reg))(x)
    x0 = Activation(activation)(x)

    x = residual(x0, n_filters)

    for i in range(depth - 1):
        x = residual(x, n_filters)

    x = ReflectionPadding2D()(x)
    x = Conv2D(n_filters, (3, 3),
               padding='valid',
               kernel_initializer='he_normal',
               kernel_regularizer=l2(l2_reg))(x)
    x = BatchNormalization()(x)
    x = Add()([x, x0])

    # Upsampling for super-resolution
    x = UpSampling2D(size=(scale, scale))(x)

    x = ReflectionPadding2D()(x)
    x = Conv2D(n_filters, (3, 3),
               padding='valid',
               kernel_initializer='he_normal',
               kernel_regularizer=l2(l2_reg))(x)
    x = Activation(activation)(x)

    outputs = Conv2D(nClasses, (1, 1),
                     padding='same',
                     kernel_initializer='he_normal',
                     kernel_regularizer=l2(l2_reg))(x)

    model = Model(inputs=inputs, outputs=outputs, name='enhance')

    return model
예제 #15
0

def add_noise(img):
    '''Add random noise to an image'''
    VARIABILITY = 50
    deviation = VARIABILITY * random.random()
    noise = np.random.normal(0, deviation, img.shape)
    img += noise
    np.clip(img, 0., 255.)
    return img


# Press the green button in the gutter to run the script.
if __name__ == '__main__':
    train_photos = np.array(load_files(train_data_path))
    sample = GaussianNoise(20, dtype=tf.float64)

    # Image transformer
    datagen = ImageDataGenerator(
        shear_range=0.2,
        zoom_range=[0.75, 1],
        rotation_range=5,
        horizontal_flip=True,
        dtype='uint8',
        preprocessing_function=lambda x: random_hue(x, seed=9, max_delta=0.05))
    # datagen = ImageDataGenerator(
    #     shear_range=0.0,
    #     zoom_range=0.0,
    #     rotation_range=0,
    #     horizontal_flip=False,
    #     dtype='uint8'
예제 #16
0
def create_model(noise=True,
                 first_kernel_size=(7, 7),
                 n_filters=64,
                 n_covul_layers=5,
                 activation='swish',
                 dense_neurons=1024,
                 dropout=0.5,
                 lr=0.0001):
    kernel = (3, 3)
    n_classes = len(classes)

    input_layer = Input(shape=(300, 300, 3))
    if noise:
        input_layer = GaussianNoise(0.1)(input_layer)

    model = BatchNormalization(axis=[1, 2])(input_layer)

    model = Conv2D(filters=n_filters,
                   kernel_size=first_kernel_size,
                   activation=activation)(model)
    model = BatchNormalization(axis=[1, 2])(model)
    model = MaxPooling2D((2, 2))(model)

    for i in range(2, n_covul_layers):
        model = Conv2D(filters=n_filters * i,
                       kernel_size=kernel,
                       activation=activation)(model)
        model = Conv2D(filters=n_filters * i,
                       kernel_size=kernel,
                       activation=activation,
                       padding='same')(model)
        model = BatchNormalization(axis=[1, 2])(model)
        model = MaxPooling2D((2, 2))(model)

    model = Conv2D(filters=n_filters * (n_covul_layers + 1),
                   kernel_size=kernel,
                   activation=activation,
                   padding='same')(model)
    model = Conv2D(filters=n_filters * (n_covul_layers + 1),
                   kernel_size=kernel,
                   activation=activation,
                   padding='same')(model)
    model = BatchNormalization(axis=[1, 2])(model)
    model = MaxPooling2D((2, 2))(model)

    model = Flatten()(model)
    model = Dense(dense_neurons, activation=activation)(model)
    model = BatchNormalization()(model)
    model = Dropout(dropout)(model)

    model = Dense(dense_neurons / 2, activation=activation)(model)
    model = BatchNormalization()(model)
    model = Dropout(dropout)(model)

    output = Dense(n_classes, activation="softmax")(model)

    model = Model(input_layer, output)
    model.compile(loss="sparse_categorical_crossentropy",
                  optimizer=keras.optimizers.Adam(lr=lr),
                  metrics=["accuracy"])

    return model
    def train(self, data):
        """Pretrain the latent layers of the model."""
        # network parameters
        original_dim = data.shape[1]
        input_shape = (original_dim, )
        batch_size = params_training.batch_size
        latent_dim = params_training.num_latent
        epochs = params_training.num_epochs
        layer_num = 0

        # build encoder model
        inputs = Input(shape=input_shape, name='encoder_input')
        inputs_noisy = GaussianNoise(stddev=0.1)(inputs)
        hidden = inputs_noisy
        for i, hidden_dim in enumerate(self.hidden_dim, 1):
            hidden_layer = Dense(hidden_dim,
                                 activation='sigmoid',
                                 name='hidden_e_{}'.format(i),
                                 weights=self.pretrain[layer_num])
            hidden = hidden_layer(hidden)
            layer_num += 1
            logger.debug("Hooked up hidden layer with %d neurons" % hidden_dim)
        z_mean = Dense(latent_dim,
                       activation=None,
                       name='z_mean',
                       weights=self.pretrain[layer_num])(hidden)
        layer_num += 1
        z_log_sigma = Dense(latent_dim,
                            activation=None,
                            name='z_log_sigma',
                            weights=self.pretrain[layer_num])(hidden)
        layer_num += 1
        z = Lambda(self.sampling, output_shape=(latent_dim, ),
                   name='z')([z_mean, z_log_sigma])
        encoder = Model(inputs, [z_mean, z_log_sigma, z], name='encoder')

        # build decoder model
        latent_inputs = Input(shape=(latent_dim, ), name='z_sampling')
        hidden = latent_inputs
        for i, hidden_dim in enumerate(self.hidden_dim[::-1],
                                       1):  # Reverse because decoder.
            hidden = Dense(hidden_dim,
                           activation='sigmoid',
                           name='hidden_d_{}'.format(i),
                           weights=self.pretrain[layer_num])(hidden)
            layer_num += 1
            logger.debug("Hooked up hidden layer with %d neurons" % hidden_dim)
        outputs = Dense(original_dim, activation='sigmoid')(hidden)
        decoder = Model(latent_inputs, outputs, name='decoder')

        # Build the DAE
        outputs = decoder(encoder(inputs)[2])
        sdae = Model(inputs, outputs, name='vae_mlp')

        reconstruction_loss = binary_crossentropy(inputs,
                                                  outputs) * original_dim
        kl_loss = 1 + z_log_sigma - K.square(z_mean) - K.exp(z_log_sigma)
        kl_loss = K.sum(kl_loss, axis=-1)
        kl_loss *= -0.5
        vae_loss = K.mean(reconstruction_loss + kl_loss)

        sdae.add_loss(vae_loss)
        sdae.compile(optimizer='adam')
        saver = ModelCheckpoint(check_path(TEMPORARY_SDAE_PATH),
                                save_weights_only=True,
                                verbose=1)
        tensorboard_config = TensorBoard(
            log_dir=check_path(TEMPORARY_SDAE_PATH))
        logger.info("Checkpoint has been saved for SDAE.")
        # train the autoencoder
        logger.warning("Pretraining started, Don't interrupt.")
        sdae.fit(data,
                 epochs=epochs,
                 batch_size=batch_size,
                 callbacks=[saver, tensorboard_config])
        logger.info("Model has been pretrained successfully.")