Exemplo n.º 1
0
    def _go_cached(self):
        """ Generate the model needed to use cached batches (i.e. drop the non-trainable steps) """
        print("Using cached mode (skipping non-trainable steps)")
        first_trainable = None
        if self.model_trainable == None:
            print("Trainable model not found. Building.")
            mdl = Sequential()
            for i in range(len(self.model.layers)):
                if self.model.layers[i].trainable == True:
                    if first_trainable is None:
                        first_trainable = self.model.layers[i]
                        mdl.add(Dropout(0, input_shape=(14, 14, 512)))
                        mdl.add(self.model.layers[i])
                    else:
                        mdl.add(self.model.layers[i])

            if first_trainable == None:
                print(
                    "All layers non-trainable. Don't know what to cut. Exiting"
                )
                return

            self.model_trainable = mdl
            self.model_trainable.compile(optimizer=Nadam(),
                                         loss='categorical_crossentropy',
                                         metrics=['accuracy'])
            self.model_trainable.summary()
Exemplo n.º 2
0
def build_model(seq_len: int, word_embedding_dim: int, vocab_size: int,
                hidden_state_dim: int, learning_rate: float):
    sequence_input = Input(shape=(seq_len, ), dtype='int32')
    embeddings = Embedding(vocab_size,
                           word_embedding_dim,
                           input_length=seq_len)(sequence_input)
    lstm = Bidirectional(
        LSTM(hidden_state_dim,
             return_sequences=True,
             return_state=True,
             dropout=.5,
             recurrent_dropout=.4))(embeddings)
    lstm, forward_h, forward_c, backward_h, backward_c = Bidirectional(
        LSTM(hidden_state_dim,
             return_sequences=True,
             return_state=True,
             dropout=0.5,
             recurrent_dropout=.4))(lstm)
    state_h = Add()([forward_h, backward_h])
    attention = Attention(hidden_state_dim)
    context_vector, attention_weights = attention(lstm, state_h)
    dense = Dense(100, activation='relu')(context_vector)
    dropout = Dropout(rate=.3)(dense)
    output = Dense(1, activation='sigmoid')(dropout)
    model = Model(inputs=sequence_input, outputs=output, name="TweetsModel")

    print(model.summary())

    model.compile(optimizer=Nadam(lr=learning_rate),
                  loss='binary_crossentropy',
                  metrics=['accuracy'])

    return model
Exemplo n.º 3
0
def boosting_rank_net(input_shape, hns=[8, 6, 4, 4], classes=2):
    """
    """
    res_model = boosting_res_net((input_shape[1], ),
                                 hns,
                                 out_layer_name='proba')
    res_model = Model(res_model.input,
                      res_model.get_layer('pre_sigmoid').output)

    inputs = Input(input_shape)
    minor_inputs = Lambda(lambda x: x[:, 0], name='minor_input')(inputs)
    pred_minor = res_model(minor_inputs)
    minor_out_proba = Lambda(lambda x: x, name='minor_out_proba')(pred_minor)
    major_inputs = Lambda(lambda x: x[:, 1], name='major_input')(inputs)
    pred_major = res_model(major_inputs)
    major_out_proba = Lambda(lambda x: x, name='major_out_proba')(pred_major)

    sub = Subtract()([major_out_proba, minor_out_proba])
    sub = Lambda(lambda x: x * RANK_SCALE, name='rank_scale_layer')(sub)
    proba = Activation('sigmoid')(sub)

    model = Model(inputs, proba)
    model.compile(optimizer=Nadam(lr=0.001), loss=min_pred)

    return model
Exemplo n.º 4
0
def _model(train, test, target):
    optimizer = Nadam(lr=0.0002)
    reshape_X_train, reshape_X_test, reshape_y_train, reshape_y_test, reshape_scaled_train, validation = selfprocessing(
        train, test, target)

    num = train.shape[1]
    print(num)
    model = Sequential()
    model.add(
        LSTM(11,
             activation='linear',
             return_sequences=True,
             input_shape=(1, num)))
    model.add(Dropout(0.4))
    model.add(
        LSTM(22,
             activation='linear',
             return_sequences=True,
             input_shape=(1, num)))
    model.add(Dropout(0.4))
    model.add(
        LSTM(44,
             activation='linear',
             return_sequences=True,
             input_shape=(1, num)))
    model.add(Dropout(0.4))
    model.add(LSTM(88, activation='linear', input_shape=(1, num)))
    model.add(Dropout(0.1))
    model.add(Dense(1, activation='sigmoid'))
    model.compile(loss='mae', optimizer=optimizer, metrics=['mse'])
    result = model.fit(reshape_X_train,
                       reshape_y_train,
                       epochs=30000,
                       shuffle=False,
                       callbacks=[
                           EarlyStopping(monitor='loss',
                                         min_delta=0.001,
                                         mode='min',
                                         patience=30)
                       ],
                       batch_size=1280)
    evaluate = model.evaluate(reshape_X_test, reshape_y_test)
    pred = model.predict(validation)
    old = model.predict(reshape_scaled_train)
    print(minmax.inverse_transform(pred))
    print(evaluate)

    plt.figure(1)
    plt.plot(result.history['loss'])
    plt.title('loss' + str(round(result.history['loss'][-1], 6)))
    plt.show()
    plt.figure(2)
    plt.plot(minmax.inverse_transform(old))
    plt.plot(minmax.inverse_transform(target))
    plt.show()
    plt.figure(3)
    plt.plot(minmax.inverse_transform(pred))
    plt.show()
Exemplo n.º 5
0
 def compile(self):
     optimizer = Nadam(lr=self.lr,
                       beta_1=0.9,
                       beta_2=0.999,
                       epsilon=1e-08,
                       schedule_decay=0.004)
     # optimizer = SGD()
     self.probabilityNetwork = make_parallel(self.probabilityNetwork, 2)
     self.probabilityNetwork.compile(optimizer=optimizer,
                                     loss='mse',
                                     metrics=['acc', 'mse'])
Exemplo n.º 6
0
    def __init__(self,
                 input_shape=(778, 576, 1),
                 num_categories=5,
                 parallel_mode=False,
                 verbose=False):
        """
        https://keras.io/getting-started/functional-api-guide/#multi-input-and-multi-output-models
        https://keras.io/getting-started/functional-api-guide/#shared-layers
        https://blog.keras.io/building-autoencoders-in-keras.html
        https://github.com/fchollet/keras/blob/master/examples/variational_autoencoder_deconv.py
        
        """

        self.num_categories = num_categories

        filters = 128

        img_rows, img_cols, img_chns = input_shape
        input_img = Input(shape=input_shape, name="main_input")

        if verbose:
            print("Network input shape is", input_img.get_shape())

        x = Conv2D(filters, (3, 3),
                   padding='same',
                   activity_regularizer=l2(10e-8))(input_img)
        x = BatchNormalization()(x)
        x = LeakyReLU(alpha=0.05)(x)

        x = Conv2D(filters, (3, 3),
                   padding='same',
                   activity_regularizer=l2(10e-8))(x)
        x = BatchNormalization()(x)
        x = LeakyReLU(alpha=0.05)(x)

        x = Conv2D(filters, (3, 3),
                   padding='same',
                   activity_regularizer=l2(10e-8))(x)
        x = BatchNormalization()(x)
        x = LeakyReLU(alpha=0.05)(x)

        self.framer = Model(input_img, x)
        optimizer = Nadam(lr=0.0002,
                          beta_1=0.9,
                          beta_2=0.999,
                          epsilon=1e-08,
                          schedule_decay=0.004,
                          clipnorm=0.618)

        self.framer.compile(optimizer=optimizer,
                            loss='categorical_crossentropy',
                            metrics=['acc'])
Exemplo n.º 7
0
 def compile(self):
     # print("LR: ",self.lr)
     # self.lr = 10**np.random.uniform(-2.2, -3.8)
     optimizer = Nadam(lr=self.lr,
                       beta_1=0.9,
                       beta_2=0.999,
                       epsilon=1e-08,
                       schedule_decay=0.004)
     # optimizer = SGD()
     # self.probabilityNetwork = make_parallel(self.probabilityNetwork, 2)
     self.probabilityNetwork.compile(
         optimizer=optimizer,
         loss='categorical_crossentropy',
         metrics=['acc', 'mse', 'categorical_crossentropy'])
Exemplo n.º 8
0
def res_net(input_shape, hns=[8, 6, 4, 4], classes=2):
    """
    """
    inputs = Input(shape=input_shape)
    x = BatchNormalization()(inputs)
    x = identity_block(x, hns[0], name='block0', dropout=False)
    x = identity_block(x, hns[1], name='block1', dropout=False)
    x = identity_block(x, hns[2], name='block2', dropout=False)
    #x = identity_block(x, hns[3], name = 'block3', dropout = True)
    x = Dense(1, name='pre_sigmoid')(x)
    x = BatchNormalization()(x)
    proba = Activation('sigmoid')(x)
    model = Model(inputs, x)
    model.compile(optimizer=Nadam(lr=0.001), loss='binary_crossentropy')

    return model
Exemplo n.º 9
0
def boosting_parallel_res_net(input_shape, hns = [8, 6, 4, 7], classes = 2):
    """
    """
    boost_input = Input(shape=(1,))
    # res_module
    res_shape = (input_shape[0] - 1,)
    boost_res_net_model = boosting_res_net(input_shape)
    res_inputs = Input(shape = res_shape)

    boost_res_net_out_list = [boost_res_net_model([res_inputs, boost_input]) for i in range(8)]
    boost_res_net_out = concatenate(boost_res_net_out_list, axis = 1)

    x = Dense(4, activation = 'sigmoid')(boost_res_net_out)
    proba = Dense(1, activation = 'sigmoid')(x)
    model = Model([res_inputs, boost_input], proba)
    model.compile(optimizer=Nadam(lr = 0.001), loss='binary_crossentropy')

    return model
Exemplo n.º 10
0
def boosting_dnn(input_shape, hns = [8, 6, 4, 7], classes = 2):
    """
    """
    inputs = Input(input_shape)
    boost_input = Lambda(lambda x: x[:, -1])(inputs)
    # dnn_input = Lambda(lambda x: x[:, :-1])(inputs)
    dnn_input = inputs
    #dnn_module
    # dnn_model = create_dnn((input_shape[0] - 1,), hns)
    dnn_model = create_dnn((input_shape[0],), hns)
    dnn_pre_sigmoid = Model(dnn_model.input, dnn_model.get_layer('pre_sigmoid').output)(dnn_input)
    # boost
    pre_sigmoid = Add(name = 'pre_sigmoid')([dnn_pre_sigmoid, boost_input])
    proba = Activation('sigmoid')(pre_sigmoid)

    model = Model(inputs, proba)
    model.compile(optimizer=Nadam(lr = 0.001), loss='binary_crossentropy')

    return model
Exemplo n.º 11
0
def boosting_res_net(input_shape, hns = [128, 64, 16, 4], classes = 2, out_layer_name = None):
    """
    """
    inputs = Input(input_shape)
    boost_input = Lambda(lambda x: x[:, -1])(inputs)
    # res_module
    res_inputs = Lambda(lambda x: x[:, :-1])(inputs)
    res_model = res_net((input_shape[0] - 1, ), hns)
    #res_inputs = inputs
    #res_model = res_net(input_shape, hns)
    res_pre_sigmoid = Model(res_model.input, res_model.get_layer('pre_sigmoid').output)(res_inputs)
    # boost
    pre_sigmoid = Add(name = 'pre_sigmoid')([res_pre_sigmoid, boost_input])
    proba = Activation('sigmoid', name = out_layer_name)(pre_sigmoid)

    model = Model(inputs, proba)
    model.compile(optimizer=Nadam(lr = 0.001), loss='binary_crossentropy')

    return model
Exemplo n.º 12
0
    def _generate_precache_model(self, batches):
        if self.model_nontrainable == None:
            last_non_trainable = None
            mdl = Sequential()
            for i in range(len(self.model.layers)):
                if self.model.layers[i].trainable == True:
                    break
                last_non_trainable = self.model.layers[i]
                mdl.add(self.model.layers[i])

            if last_non_trainable == None:
                print(
                    "All layers trainable. Don't know what to cache. Exiting")
                return None

            print("Cached model - using output of", last_non_trainable.name)
            mdl.compile(optimizer=Nadam(),
                        loss='categorical_crossentropy',
                        metrics=['accuracy'])
            self.model_nontrainable = mdl

        cnt = batches.full_step_count()
        batch_data = batches.iter.next()
        data = []
        label = []
        for i in range(cnt):
            batch_data = batches.iter.next()
            if (i % 10 == 0):
                print("Loaded {0} datapoints\r".format(i * batches.batch_size),
                      end='',
                      flush=True)
            data.append(batch_data[0])
            label.append(batch_data[1])
        imgs = np.concatenate([data[i] for i in range(cnt)])
        labels = np.concatenate([label[i] for i in range(cnt)])
        y = self.model_nontrainable.predict(imgs, verbose=1)
        print(y.shape)
        return {'data': y, 'labels': labels}
Exemplo n.º 13
0
    x = GlobalAveragePooling2D()(x)
    x = Dense(units=53, activation='softmax', name='output_predictions')(x)
    model = Model(inputs=input_image, outputs=x, name='Classifier')

# In[3]:

parallel_model = multi_gpu_model(model=model, gpus=4)
tb = TensorBoard(log_dir='logs', write_graph=True)
mc = ModelCheckpoint(filepath='models/top_weights.h5',
                     monitor='val_acc',
                     save_best_only='True',
                     save_weights_only='True',
                     verbose=1)
es = EarlyStopping(monitor='val_loss', patience=15, verbose=1)
rlr = ReduceLROnPlateau()
callbacks = [tb, mc, es, rlr]
nadam = Nadam(lr=1e-3)
parallel_model.compile(optimizer=nadam,
                       loss='categorical_crossentropy',
                       metrics=['accuracy'])

# In[ ]:

parallel_model.fit_generator(train_generator,
                             steps_per_epoch=train_steps,
                             epochs=epochs,
                             validation_data=validation_generator,
                             validation_steps=val_steps,
                             workers=8,
                             callbacks=callbacks)
Exemplo n.º 14
0
#!/usr/bin/env python3
import tensorflow as tf
from tensorflow.python.keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint
from tensorflow.python.keras.optimizers import Nadam

from neural_network_solver.dataset import dataset
from neural_network_solver.model import autoencoder

dataset_generator = dataset()

model = autoencoder()
model.compile(optimizer=Nadam(lr=0.001),
              loss=tf.keras.losses.categorical_crossentropy,
              metrics=['accuracy'])

history = model.fit(dataset_generator,
                    validation_split=0.2,
                    batch_size=128,
                    epochs=999,
                    verbose=1,
                    callbacks=[
                        EarlyStopping(patience=8, verbose=1),
                        ReduceLROnPlateau(patience=4, min_lr=1e-05),
                        ModelCheckpoint(
                            './neural_network_solver/models/autoencoder.hdfs',
                            monitor='val_loss',
                            verbose=False,
                            save_best_only=True,
                            save_weights_only=False,
                            mode='auto',
                        )
Exemplo n.º 15
0
            y_i = [0, 1]

    except Exception as e:
        print(e)
        break

    X.append(x_i)
    Y.append(y_i)

X = [(np.array(x) - np.mean(x)) / np.std(x)
     for x in X]  # comment it to remove normalization
X, Y = np.array(X), np.array(Y)

X_train, X_test, Y_train, Y_test = create_Xt_Yt(X, Y)

opt = Nadam(lr=0.001)

model = Sequential()

model.add(Dense(64, input_dim=30, activity_regularizer=regularizers.l2(0.01)))
model.add(BatchNormalization())
model.add(LeakyReLU())

model.add(Dropout(0.5))

model.add(Dense(16, activity_regularizer=regularizers.l2(0.01)))
model.add(BatchNormalization())
model.add(LeakyReLU())

model.add(Dense(2))
model.add(Activation('softmax'))
import common_function
from tensorflow.python.keras.preprocessing.image import ImageDataGenerator
from tensorflow.python.keras.optimizers import Nadam
import os
import asyncio
from tensorflow.python.keras.callbacks import ModelCheckpoint
from tensorflow.python.keras.layers import Dropout, Flatten, Dense, Activation
from tensorflow.python.keras.models import load_model, Model

# Just disables the warning, doesn't enable AVX/FMA (no GPU)
# os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

epochs = 40
l_rate = 1.0e-4
decay = l_rate / epochs
nadam = Nadam(lr=l_rate, beta_1=0.9, beta_2=0.999)
batch_size = 32
img_width, img_height = 24, 24
path_data_set = './ytd'
input_img, merged = model_3_branch.get_model(img_width, img_height)
num_train_images = 424961  # training images: 424961  # total images: 605855
file_path = 'tbe_cnn_ytd_nadam.h5'

datagen = ImageDataGenerator(
    rescale=1. / 255,
    shear_range=0.2,
    zoom_range=0.2,
    horizontal_flip=True,
)

Exemplo n.º 17
0
    def __init__(self,
                 input_shape=(64, 64, 1),
                 num_categories=5,
                 parallel_mode=False,
                 verbose=False):
        """
        https://keras.io/getting-started/functional-api-guide/#multi-input-and-multi-output-models
        https://keras.io/getting-started/functional-api-guide/#shared-layers
        https://blog.keras.io/building-autoencoders-in-keras.html
        https://github.com/fchollet/keras/blob/master/examples/variational_autoencoder_deconv.py
        
        """

        self.num_categories = num_categories

        # number of filters

        # convolution kernel size
        filters = 128

        latent_dim = 64
        epsilon_std = 1.0
        noise_std = .01

        img_rows, img_cols, img_chns = input_shape
        input_img = Input(shape=input_shape, name="main_input")

        if verbose:
            print("Network input shape is", input_img.get_shape())

        x = Conv2D(filters, (3, 3),
                   padding='same',
                   activity_regularizer=l2(10e-8))(input_img)
        x = LeakyReLU(alpha=0.05)(x)
        x = BatchNormalization()(x)
        x = MaxPooling2D((2, 2), padding='same')(x)
        x = Conv2D(filters, (3, 3),
                   padding='same',
                   activity_regularizer=l2(10e-8))(x)
        x = LeakyReLU(alpha=0.05)(x)
        x = BatchNormalization()(x)
        x = MaxPooling2D((2, 2), padding='same')(x)
        x = Conv2D(filters, (3, 3),
                   padding='same',
                   activity_regularizer=l2(10e-8))(x)
        x = LeakyReLU(alpha=0.05)(x)
        x = BatchNormalization()(x)
        x = MaxPooling2D((2, 2), padding='same', name="encoded")(x)

        conv_output_shape = K.int_shape(x)

        intermediate_dim = conv_output_shape[1] * conv_output_shape[
            2] * conv_output_shape[3]

        if verbose:
            print("Convolution output shape is", conv_output_shape)

        x = Flatten()(x)

        hidden = Dense(intermediate_dim, activation='selu')(x)

        z_mean = Dense(latent_dim)(hidden)
        z_log_var = Dense(latent_dim)(hidden)

        def sampling(args):
            z_mean, z_log_var = args
            return K.random_normal(
                shape=K.shape(z_log_var), mean=0., stddev=noise_std) * K.exp(
                    .5 * z_log_var) + z_mean

        z = Lambda(sampling)([z_mean, z_log_var])

        encoding_shape = K.int_shape(z)

        if verbose:
            print("Encoding shape is", encoding_shape, "(", latent_dim,
                  "dimensions )")

        # this model maps an input to its encoded representation
        encoder = Model(input_img, z)

        # next, declare the auto_encoding output side

        n = 0
        n += 1
        ae = Dense(intermediate_dim, activation='selu')(z)
        n += 1
        ae = Reshape(conv_output_shape[1:])(ae)
        n += 1
        ae = Conv2DTranspose(filters, (3, 3), padding='same')(ae)
        n += 1
        ae = LeakyReLU(alpha=0.05)(ae)
        n += 1
        ae = BatchNormalization()(ae)
        n += 1
        ae = UpSampling2D((2, 2))(ae)
        n += 1
        ae = Conv2DTranspose(filters, (3, 3), padding='same')(ae)
        n += 1
        ae = LeakyReLU(alpha=0.05)(ae)
        n += 1
        ae = BatchNormalization()(ae)
        n += 1
        ae = UpSampling2D((2, 2))(ae)
        n += 1
        ae = Conv2DTranspose(filters, (3, 3), padding='same')(ae)
        n += 1
        ae = LeakyReLU(alpha=0.05)(ae)
        n += 1
        ae = UpSampling2D((2, 2))(ae)
        n += 1
        decoded = Conv2D(1, (2, 2),
                         padding='same')(ae)  # activation='sigmoid',

        if verbose:
            print("Decoder output shape is", decoded.get_shape())

        # this is a pipe from the input image to the reconstructed output
        autoencoder = Model(input_img, decoded)

        # use right side of architecture encoded input to construct an image
        encoded_input = Input(shape=encoding_shape[1:])
        deco = encoded_input
        for l in range(-n, 0):
            deco = autoencoder.layers[l](deco)
        decoder = Model(encoded_input, deco)

        # and then, the classifier
        n = 0
        n += 1
        cl = Dense(filters, activation='selu')(z)
        n += 1
        cl = AlphaDropout(0.1)(cl)
        n += 1
        cl = Dense(filters, activation='selu')(cl)
        n += 1
        cl = AlphaDropout(0.1)(cl)
        n += 1
        cl = Dense(filters, activation='selu')(cl)
        n += 1
        classified = Dense(num_categories, activation='softmax')(cl)

        if verbose:
            print("Classifier output shape is", classified.get_shape())

        # provide classification on images
        imageclassifier = Model(input_img, classified)
        # imageclassifier.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['acc'])

        # and classifications on encoded representations
        encoded_input = Input(shape=(encoding_shape[1:]))
        fc = encoded_input
        for l in range(-n, 0):
            fc = imageclassifier.layers[l](fc)
        featureclassifier = Model(encoded_input, fc)

        # featureclassifier.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['acc'])

        # Some KL loss
        def vae_objective(x, x_decoded):
            kl_loss = -0.5 * K.sum(
                1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
            base_loss = tf.reduce_sum(metrics.binary_crossentropy(
                x, x_decoded),
                                      axis=[-1, -2])
            return base_loss + kl_loss

        # Finally, compile the full model (1 input, 2 outputs)
        classycoder = Model(inputs=[input_img], outputs=[decoded, classified])
        #           = make_parallel(classycoder, 2) # Todo?
        optimizer = Nadam(lr=0.0002,
                          beta_1=0.9,
                          beta_2=0.999,
                          epsilon=1e-08,
                          schedule_decay=0.004,
                          clipnorm=0.618)
        #         = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, clipnorm=1.0)

        classycoder.compile(optimizer=optimizer,
                            loss=[vae_objective, 'categorical_crossentropy'],
                            loss_weights=[0.1, 0.9],
                            metrics=['acc'])

        # API mapping

        self.encoding_dims = latent_dim
        self.encoding_shape = encoding_shape

        # Recieve a image and encode it into its latent space representation
        if parallel_mode: self.encoder = make_parallel(encoder, config.GPUs)
        else: self.encoder = encoder

        # reconstructs an image from its encoded representation
        if parallel_mode: self.decoder = make_parallel(decoder, config.GPUs)
        else: self.decoder = decoder

        # direct pipe from input_image to its classification
        if parallel_mode:
            self.imageclassifier = make_parallel(imageclassifier, config.GPUs)
        else:
            self.imageclassifier = imageclassifier

        # direct pipe from encoded representation to classification
        if parallel_mode:
            self.featureclassifier = make_parallel(featureclassifier,
                                                   config.GPUs)
        else:
            self.featureclassifier = featureclassifier

        # multiple output model, train this for the rest to work. # todo: make_parallel
        self.classycoder = classycoder
Exemplo n.º 18
0
 def compile(self):
     self.model.compile(optimizer=Nadam(),
                        loss='categorical_crossentropy',
                        metrics=['accuracy'])
     print("Model compiled")
Exemplo n.º 19
0
    x = Flatten()(x)
    x = Dense(32, activation='relu')(x)
    x = Dense(2, activation='softmax')(x)  # UNSW-NB15 is 2 and 10,CICIDS2017 is 15
    model = Model(inputs=input_singal, outputs=x)
    return model


model = build_model()
model.summary()

time_start = time.time()

reduce_lr = keras.callbacks.ReduceLROnPlateau(moniter='val_loss',
                                              factor=0.1,
                                              patience=10)
nadam = Nadam(lr=0.008, beta_1=0.9, beta_2=0.999, epsilon=1e-08, schedule_decay=0.004)
model.compile(loss="categorical_crossentropy", optimizer="nadam", metrics=["accuracy"])

save_dir = os.path.join(os.getcwd(), 'model_save')
if not os.path.isdir(save_dir):
    os.makedirs(save_dir)
filename = "model_{epoch:02d}.hdf5"
ckpt_callback = tf.keras.callbacks.ModelCheckpoint(
    os.path.join(save_dir, filename), monitor='val_acc'
    , verbose=1, save_best_only=False)
# checkpoint = tf.train.Checkpoint(model=model)
# checkpoint.restore(tf.train.latest_checkpoint("../dataset/SGM-CNN/classification decision/model_save"))
history = model.fit(x_train, y_train,
                    epochs=10,
                    batch_size=batch_size,
                    verbose=2,