texts = [] texts = dataset['Statement'] texts = texts.map(lambda x: clean_text(x)) label = dataset['Label'].astype(int).values.tolist() labelEncoder = LabelEncoder() encoded_label = labelEncoder.fit_transform(label) y_test = np.reshape(encoded_label, (-1, 1)) encoded_test = tokenizer_train.texts_to_sequences(texts=texts) X_test = sequence.pad_sequences(encoded_test, maxlen=time_step, padding='post') vocab_size = embedding_matrix.shape[0] ############################################################################################### model_1 = create_model(vocabulary_size=embedding_matrix.shape[0], embedding_size=100, embedding_matrix=embedding_matrix) model_2 = create_model(vocabulary_size=embedding_matrix.shape[0], embedding_size=100, embedding_matrix=embedding_matrix) model_3 = create_model(vocabulary_size=embedding_matrix.shape[0], embedding_size=100, embedding_matrix=embedding_matrix) model_4 = create_model(vocabulary_size=embedding_matrix.shape[0], embedding_size=100, embedding_matrix=embedding_matrix) model_5 = create_model(vocabulary_size=embedding_matrix.shape[0], embedding_size=100, embedding_matrix=embedding_matrix) models = []
print('Fold: ', Fold) X_train_train = X_train[train] X_train_val = X_train[val] y_train_train = y_train[train] y_train_val = y_train[val] print("Initializing Callback :/...") model_name = 'Models/Bi_LSTM/Cross_Validation/Callbacks/FR/Model_cv_bi_lstm_FR_1_Callbacks_kfold_' + str( Fold) + '.h5' cb = callback(model_name=model_name) # create model print("Creating and Fitting Model...") model = create_model(vocabulary_size=vocab_size, embedding_size=embedding_size, embedding_matrix=embedding_matrix) history = model.fit(X_train_train, y_train_train, validation_data=(X_train_val, y_train_val), epochs=10, batch_size=128, shuffle=True, callbacks=cb) # Save each fold model print("Saving Model...") model_name = 'Models/Bi_LSTM/Cross_Validation/FR/Model_cv_bi_lstm_FR_1_kfold_' + str( Fold) + '.h5' ########################################3 model.save(model_name)