model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) # fit the keras model on the dataset model.fit(x_train, y_train, epochs=150, batch_size=10) # evaluate the keras model _, accuracy = model.evaluate(x_train, y_train) print('Test Accuracy: %.2f' % (accuracy * 100)) # make probability predictions with the model predictions = model.predict(x_test) # round predictions rounded = [round(x[0]) for x in predictions] # make class predictions with the model predictions = model.predict_classes(x_test) # In[89]: # Plot accuracy comparisons between scenario 1(assumed features) & scenario 2(features from analysis) plot_data = pd.read_excel( r'C:\Users\Life\Desktop\GMU\CS-504\dataset\AccuracyResults.xlsx') plot_data # In[98]: # Plot accuracy comparisons between scenario 1(assumed features) & scenario 2(features from analysis) # Logistic Regression, SVM, Naive Bayes plot_data = pd.read_excel( r'C:\Users\Life\Desktop\GMU\CS-504\dataset\AccuracyResults.xlsx')
model.add(Dense(1, activation='sigmoid')) # compile keras model, classification model model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) # fit keras model model.fit(np.array(predictor), np.array(y_train), epochs=10, batch_size=10) model_loss = pd.DataFrame(model.history.history) model_loss.plot() # evaluate keras mdoel accuracy _, accuracy = model.evaluate(pred_dt, y_train) print('Accuracy: %.2f' % (accuracy * 100)) train_pred = model.predict_classes(predictor) print(classification_report(y_train, train_pred)) print(confusion_matrix(y_train, train_pred)) #[[286 1] #Building KNN model ----------------------------------------------- from sklearn.neighbors import KNeighborsClassifier as KNC knn = KNC(n_neighbors=7) knn.fit(predictor, y_train.values.ravel()) y_train_pred = knn.predict(predictor) knn.score(predictor, y_train) # 81.86% # check prediction accuracy of train data and classification error print(confusion_matrix(y_train, y_train_pred)) print(classification_report(y_train, y_train_pred)) # accuracy = 82%