def train_mlp(): dataset = pd.read_csv(file_name, header=0) data = dataset.iloc[:, :].values np.random.shuffle(data) X = data[:, :14] for i in range(len(X)): X[i] /= 100 y = data[:, 14:] X_train, X_test, y_train, y_test = \ train_test_split(X, y, test_size=.2, random_state=42) print("vai") start = time.time() mlp.fit(X, y.ravel()) end = time.time() print("Iter number: " + str(mlp.n_iter_)) print("Tempo de treinamento: " + str(round(end - start, 2)) + " segundos") y_pred = mlp.predict(X_test) print("Training set score: %f" % mlp.score(X_train, y_train)) print("Test set score: %f" % mlp.score(X_test, y_test)) cnf_matrix = confusion_matrix(y_test, y_pred) np.set_printoptions(precision=2) plt.figure() helper.plot_confusion_matrix(cnf_matrix, classes=class_names, title='Matriz de confusão') plt.show()
def print_test_accuracy(show_example_errors=False, show_confusion_matrix=False): correct, cls_pred = predict_cls_test() acc, num_correct = cls_accuracy(correct) num_images = len(correct) msg = 'Accuracy on Test-set: {0:.2%} ({1}/{2})' print(msg.format(acc, num_correct, num_images)) if show_example_errors: print('Example Errors:') helper.plot_example_errors(data, cls_pred, correct) if show_confusion_matrix: print('Confusion Matrix:') helper.plot_confusion_matrix(cls_pred, data.test.cls, num_classes)
def main(): # Prints hyperparameters print("================ Hyperparameters ====================") statprinter() # Gets tranforms transform = dataTransforms() # input data trainData = trainProcess(transform) testData = testProcess(transform) validData, input_dimension = validProcess(transform) # Load Data print("================LOADING DATA====================") trainLoader, testLoader, validLoader = getLoaders(trainData, testData, validData) printDataStats(trainData, testData, validData) model = RNN(input_size, hidden_size, num_layers, num_classes).to(device) criterion, optimizer = setCriteria(model) print("================TRAINING MODEL====================") saved_losses, validation_losses = trainModel(trainLoader, testLoader, model, criterion, optimizer) # Save the model checkpoint torch.save(model.state_dict(), 'lstm_model.ckpt') print("================TEST MODEL====================") correct, total, actual, pred = testModel(testLoader, model, len(testData)) accuracy = 100 * correct / total plot(saved_losses, validation_losses, accuracy) confusionmMat = confusion_matrix(actual, pred) print("...................................") print("Confusion Matrix:", confusionmMat) print("...................................") plt.figure() plot_confusion_matrix(confusionmMat, classes=classes_str1, normalize=True, title='Normalized confusion matrix') perClassAccuracy(correct, total, testLoader, model)
# helper.getPCASIFT() implments PCA-SIFT analysis on a given image for image_path in image_paths: des_list.append((image_path, helper.getPCASIFT(image_path))) # stack vertically descriptors = des_list[0][1] for image_path, descriptor in des_list[0:]: descriptors = np.vstack((descriptors, descriptor)) # histogram of features test_features = np.zeros((len(image_paths), num_clusters), "float32") for i in range(len(image_paths)): words, distance = vq(des_list[i][1], voc) for w in words: test_features[i][w] += 1 # classify (prediction) retval, results, neigh_resp, dists = clf.findNearest(test_features, k=num_neighbors) correct_count = 0 for i in range(0, len(results)): if(results[i] == image_classes[i]): correct_count += 1 accuracy = round(correct_count / len(results), 2) cnf_matrix = confusion_matrix(y_true=image_classes, y_pred=results) # Plot normalized confusion matrix helper.plot_confusion_matrix(cnf_matrix, classes=class_names, normalize=True, title=str(num_clusters) + ' clusters, ' + str(num_neighbors) + ' neighbors, accuracy=' + str(accuracy))
activation='tanh', hidden_layer_sizes=(11, 5, 3, 2, 10), learning_rate="constant", early_stopping=True, max_iter=200) nn.fit(X_train, y_train) nn_predictions = nn.predict(X_test) nn_accuracy = accuracy_score(y_test, nn_predictions) print 'Wine Quality Neural Network Accuracy:', nn_accuracy print '-----------------' cnf_matrix = confusion_matrix(y_test, nn_predictions) plt.figure() class_names = sorted(y_test.unique()) plot_confusion_matrix(cnf_matrix, classes=class_names, title='Wine Quality Neural Network Confusion Matrix') plt.show() #DecisionTreeClassifier singleDT = DecisionTreeClassifier(max_depth=43, max_features=5) singleDT.fit(X_train, y_train) DT_predictions = singleDT.predict(X_test) DT_accuracy = accuracy_score(y_test, DT_predictions) print 'Wine Quality Decision Tree Accuracy:', DT_accuracy print '-----------------' cv = ShuffleSplit(n_splits=10, test_size=0.3, random_state=0) plot_learning_curve(singleDT, 'Wine Quality Decision Tree Learning Curve', X,
hidden_layer_sizes=(11, 5, 3, 2, 10), learning_rate="constant", early_stopping=True, max_iter=200) nn.fit(X_train, y_train) nn_predictions = nn.predict(X_test) nn_accuracy = accuracy_score(y_test, nn_predictions) print 'Pen Digit Neural Network Accuracy:', nn_accuracy print '-----------------' cnf_matrix = confusion_matrix(y_test, nn_predictions) plt.figure() class_names = sorted(y_test.unique()) plot_confusion_matrix(cnf_matrix, classes=class_names, title='Pen Digits Neural Network Confusion Matrix') plt.show() #DecisionTreeClassifier singleDT = DecisionTreeClassifier(max_depth=22, max_features=5) singleDT.fit(X_train, y_train) DT_predictions = singleDT.predict(X_test) DT_accuracy = accuracy_score(y_test, DT_predictions) print 'Pen Digit Decision Tree Accuracy:', DT_accuracy print '-----------------' cv = ShuffleSplit(n_splits=10, test_size=0.3, random_state=0) plot_learning_curve(singleDT, 'Pen Digit Decision Tree Learning Curve', X,