def plot_confusion(classifier, test_pts, test_labels): classes = [ 'STANDING', 'SITTING', 'LYING', 'WALKING', 'WALKING_DOWNSTAIRS', 'WALKING_UPSTAIRS' ] cl = ['STANDING', 'SITTING', 'LYING', 'WALK', 'WALK_DOWN', 'WALK_UP'] pred_label = classifier.predict(test_pts) # print(true_label) result = cf(test_labels, pred_label, labels=classes) # res_nor = np.ndarray((6, 6), dtype=float) # for i in range(0, 6): # s = result[i].sum() # for j in range(0, 6): # res_nor[i][j] = float(result[i][j] / s) print(result) # print(res_nor) fig = plt.figure() ax = fig.add_subplot(111) cax = ax.matshow(result) # plt.matshow(result) fig.colorbar(cax) ax.set_xticklabels([''] + cl) ax.set_yticklabels([''] + cl) plt.xlabel("Predicted Label") plt.ylabel("True Label") plt.legend(loc='best') plt.show()
def plot_confusion(classifier, test_pts, test_labels): classes = ['STANDING', 'SITTING', 'LYING', 'WALKING', 'WALKING_DOWNSTAIRS', 'WALKING_UPSTAIRS'] cl = ['STANDING', 'SITTING', 'LYING', 'WALK', 'WALK_DOWN', 'WALK_UP'] pred_label = classifier.predict(test_pts) # print(true_label) result = cf(test_labels, pred_label, labels=classes) res_nor = np.ndarray((6, 6), dtype=float) # for i in range(0, 6): # s = result[i].sum() # for j in range(0, 6): # res_nor[i][j] = float(result[i][j] / s) print(result) # print(res_nor) fig = plt.figure() ax = fig.add_subplot(111) cax = ax.matshow(result) # plt.matshow(result) fig.colorbar(cax) ax.set_xticklabels([''] + cl) ax.set_yticklabels([''] + cl) plt.xlabel("Predicted Label") plt.ylabel("True Label") plt.legend(loc='best') plt.show()
def print_out(name,one,two): plt.figure() df = pd.DataFrame( cf(one,two), index=labels,columns=labels) sns.heatmap(df,annot=True,cmap='Blues', fmt='g') plt.savefig(name+'.png')
def DTtest(trainData, trainLabel, size): train_data, test_data, train_label, test_label = \ train_test_split(trainData, trainLabel, test_size=size) dt = DecisionTreeClassifier() dt.fit(train_data, train_label) predict = dt.predict(test_data, test_label) # number of mix-matched label error = 0 for x in range(len(predict)): if predict[x] != test_label[x]: error += 1 # confusion matrix cf(test_label, predict) # accuracy print(1 - (error / len(predict))) return error
def classification_report(model, testloader, device=None): if device is not None: model.to(device) y_true = [] y_pred = [] for (images, labels) in testloader: if device is not None: images = images.to(device) labels = labels.to(device) y_true.append(labels) y_pred.append(model(images)) y_true = torch.cat(y_true) y_pred = torch.cat(y_pred) y_pred = y_pred.argmax(dim=1) return cf(y_true.cpu().numpy(), y_pred.cpu().numpy())
def classification_report_pgd(model, testloader, device=None, pgd_params={}): if device is not None: model.to(device) y_true = [] y_pred = [] for (images, labels) in testloader: if device is not None: images = images.to(device) labels = labels.to(device) attacked_images = pgd(model, nn.CrossEntropyLoss(), images, labels, device=device, **pgd_params) y_true.append(labels) y_pred.append(model(attacked_images)) y_true = torch.cat(y_true) y_pred = torch.cat(y_pred) y_pred = y_pred.argmax(dim=1) return cf(y_true.cpu().numpy(), y_pred.cpu().numpy())
#predicted values from x for possible values of y Ynewpred = lr.predict(X) bank[ "Y Predicted"] = Ynewpred # making a new column with these values in bank dataframe YProbability = pd.DataFrame( lr.predict_proba(X.iloc[:, :]) ) #making a data frame from predicted values of y from using x variables banknew = pd.concat( [bank, YProbability], axis=1) #making new data frame with these y probability values #forming confusion matrix from sklearn.metrics import confusion_matrix as cf cf1 = cf(Y, Ynewpred) #between y original and y predicted by the model print(cf1) # y n #y [39162 760] #n [ 4226 1063]] # accuracy = 39162+1063/39612+760+4226+1063 == 0.88 #(39162+1063)/(39612+760+4226+1063) from sklearn.metrics import roc_curve as rocc from sklearn.metrics import roc_auc_score as rocarea fpr, tpr, thresholdasd = rocc(Y, Ynewpred) auc = rocarea(Y, Ynewpred) import matplotlib.pyplot as plt plt.plot(fpr, tpr, color='red', label='ROC')
test_indices, y_test = filter_indices(test_i, labels_reduced) cv.append((train_indices, labels, test_indices, y_test)) y = [] # initialization of result list confusion = np.zeros((5, 5)) # confusion matrix initialization for train_indices, labels, test_indices, y_test in cv: Rferns = FernEnsemble(ps, fn, fs, dist=dist) Rferns.train(polsar_reduced, train_indices, labels) y_pred = Rferns.predict(polsar_reduced, test_indices, prediction='maximum') y.append( (y_pred, y_test) ) # for each cv iteration save prediction y_pred and the true labels y_test confusion += cf( y_test, y_pred) # for each iteration add the results to the confusion matrix name = str(dist) + '_ps' + str(ps) + '_fn' + str(fn) + '_fs' + str(fs) pickle.dump(y, open(name + '.p', "wb")) # save the results as pickle file for later inspection ################################################################################################ ####################### Visualization of Output ################################################ ################################################################################################ import matplotlib.pyplot as plt import matplotlib.patches as mpatches import numpy as np from matplotlib import gridspec
#Dropout layer for Convergence X = Dropout(0.5)(X) #output a sigmoid to squash the matrix into output probabilities X = Dense(1, activation='sigmoid')(X) model = Model(inputs=X_input, outputs=X, name="CNN") return model model = model_nn(input_shape=(64, 64, 1)) model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) # Calling load_dataset to store the dataset X_train_orig, X_test_orig, Y_train, Y_test = load_dataset() # Normalize image vectors X_train = X_train_orig / 255. X_test = X_test_orig / 255. model.fit(X_train, Y_train, epochs=15, batch_size=32) y_pred = model.predict(X_test) # The output of the model is array of real numbers therefore values greater than 0.5 will be evaluated as 1 otherwise 0 y_pred = (y_pred > 0.5) # Confusion Matrix cf(Y_test, y_pred) # Save the model for further use model.save('models/CNN_Model.h5', overwrite=True)
def confusion_matrix(self): assert self.y_test.shape == self.final_pred.shape matrix = cf(self.y_test, self.final_pred) print(matrix)