# Test classifier testSet = DataUtils.read_dataset(directory +"Set-"+ setNo +"-validate.csv") predictionsRaw = classifier.predict(testSet.features) # Outbox labels predictions = [] predictionsExpl = [] for row in range(len(predictionsRaw)): # Convert float to int predictions.append(int(predictionsRaw[row])) # Splice int into 3 values order = predictions[row]/100 sat = (predictions[row]/10)%10 gran = predictions[row]%10 predictionsExpl.append([order, sat, gran]) # Write results to csv file results = DataUtils.ResultSet(testSet.id, predictionsExpl, testSet.labels) DataUtils.write_resultset(results, directory +"Result-"+ setNo +".csv") # Metrics of classifier import Metrics matrix_complete = Metrics.ConfusionMatrix(predictions, inboxLabels(testSet.labels)) # Readable printing #Metrics.printConfusionMatrix("COMPLETE", matrix_complete) # Simple printing Metrics.printConfusionMatrixCompact(matrix_complete) print ""
print DataUtils.removeMultipleRows(range(len(names)), missingColumns), "\t", # Cropped training and validation data cropFeatures = DataUtils.removeMultipleColumns(features, missingColumns) cropValidation = DataUtils.removeMultipleColumns( testSet.features, missingColumns) # Select classifier classifier = svm.SVC() # Train classifier classifier = classifier.fit(cropFeatures, labels) # Test classifier predictionsRaw = classifier.predict(cropValidation) # Outbox labels predictions = [] for row in range(len(predictionsRaw)): # Convert float to int predictions.append(int(predictionsRaw[row])) # Metrics of classifier matrix_complete = Metrics.ConfusionMatrix(predictions, testLabels) # Simple printing Metrics.printConfusionMatrixCompact(matrix_complete) print "" #print cropFeatures[0]
import Metrics import pandas as pd confusion_matrix = Metrics.ConfusionMatrix(14) Metrics.ConfusionMatrix.count_predicted(1, 1) v = torch.max(outputs.data, 1)[1] unique, counts = np.unique(labels_test, return_counts=True) dict(zip(unique, counts)) for i in range(len(list(model.parameters()))): print(list(model.parameters())[i].size()) #Nuumber of parameters in out model ------------------------------------------- p = 1 s = 0 for i in range(len(parameters)): p = 1 for j in range(len(parameters[i].size())): p = parameters[i].size(j) * p s = s + p if ((epoch - (epoch % 100)) // 100) % 2 == 0: old_ratio[epoch % 100] = loss.data[0] else: new_ratio[epoch % 100] = loss.data[0] if (epoch % 200 == 0) and (epoch > 200):
str_p = "" str_a = "" for col in range(dim): p_single = int(p[col]) a_single = int(a[col]) str_p += str(p_single) + " " str_a += str(a_single) + " " predict_single[col].append(p_single) actual_single[col].append(a_single) str_p = str_p[0:-1] str_a = str_a[0:-1] predict_complete.append(str_p) actual_complete.append(str_a) import Metrics matrix_complete = Metrics.ConfusionMatrix(predict_complete, actual_complete) matrix_single = [] for col in range(dim): matrix_single.append( Metrics.ConfusionMatrix(predict_single[col], actual_single[col])) # Hardcoded for better formatting #Metrics.printConfusionMatrix("COMPLETE", matrix_complete) #Metrics.printConfusionMatrix("EXPLORATION ORDER", matrix_single[0]) #Metrics.printConfusionMatrix("SATURATION STRATEGY", matrix_single[1]) #Metrics.printConfusionMatrix("SATURATION GRANULARITY", matrix_single[2]) # Simple printing Metrics.printConfusionMatrixCompact(matrix_complete) for col in range(dim):
c = ["Cat"] d = ["Dog"] r = ["Rabbit"] import Metrics predict = 7 * c + 8 * d + 12 * r actual = 5 * c + 2 * d + 3 * c + 3 * d + 2 * r + d + 11 * r confMatrix = Metrics.ConfusionMatrix(predict, actual) # Basic stats: print "Matrix:" print confMatrix.matrix print "Groups:", confMatrix.index print "Number of groups:", confMatrix.size print "Number of tests:", confMatrix.quantity # Advanced stats: confMatrix.createSubMatrices() print "" print "Recall (macro) :", confMatrix.recallMacro() print "Precision (macro):", confMatrix.precisionMacro() print "Accuracy (macro) :", confMatrix.accuracyMacro() print "Accuracy (micro) :", confMatrix.accuracyMicro()