Ejemplo n.º 1
0
from tensorflow.keras.optimizers import Adam

from cnn import cnn
from model_preprocessor import Preprocessor

model = cnn()

x_train, x_test, y_train, y_test = Preprocessor.load_data_binary(10000)

# Define Deep Learning Model
''' Training phrase '''
epochs = 15
batch_size = 64
adam = Adam(lr=1e-4, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
model.compile(optimizer=adam, loss='binary_crossentropy', metrics=['accuracy'])

model.fit(x_train,
          y_train,
          epochs=epochs,
          batch_size=batch_size,
          validation_split=0.11)
Ejemplo n.º 2
0
        model.add(Dropout(0.2))
        model.add(LSTM(units=128, return_sequences=True))
        model.add(Dropout(0.5))
        model.add(SeqSelfAttention(attention_activation='relu'))
        model.add(Flatten())
        model.add(Dense(9344, activation='relu'))
        model.add(Dropout(0.5))
        model.add(Dense(21, activation='softmax'))

        return model


with tf.device("/GPU:0"):

    # Load data using model preprocessor
    x_train, x_test, y_train, y_test = Preprocessor.load_data()

    # Define Deep Learning Model
    model_name = "LSTM_ATT"
    model = lstm_att()

    # Define early stopping
    es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=3)
    mc = ModelCheckpoint(filepath='./trained_models/' + model_name + '.hdf5',
                         monitor='val_loss',
                         mode='min',
                         save_best_only=True,
                         verbose=1)
    ''' Training phrase '''
    epochs = 10
    batch_size = 64
Ejemplo n.º 3
0
        hidden2 = BatchNormalization(mode=0)(hidden2)
        hidden2 = Dropout(0.5)(hidden2)

        # Output layer (last fully connected layer)
        output = Dense(4, activation='softmax', name='output')(hidden2)

        # Compile model and define optimizer
        model = Model(input=[main_input], output=[output])

        return model


with tf.device("/GPU:0"):

    # Load data using model preprocessor
    x_train, x_test, y_train, y_test = Preprocessor.load_data_multi()

    # Define Deep Learning Model
    model_name = "ENSEMBLE_CNN_BILSTM_MULTI"
    model = cnn_bilstm_att()

    # Define early stopping
    es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=3)
    mc = ModelCheckpoint(filepath='./trained_models/' + model_name + '.hdf5',
                         monitor='val_loss',
                         mode='min',
                         save_best_only=True,
                         verbose=1)
    ''' Training phrase '''
    epochs = 10
    batch_size = 64