def evaluation_multiclassovrevaluation_modular(traindat, label_traindat, testdat, label_testdat): from shogun.Features import MulticlassLabels from shogun.Evaluation import MulticlassOVREvaluation, ROCEvaluation from modshogun import MulticlassLibLinear, RealFeatures, ContingencyTableEvaluation, ACCURACY from shogun.Mathematics import Math Math.init_random(1) ground_truth_labels = MulticlassLabels(label_traindat) svm = MulticlassLibLinear(1.0, RealFeatures(traindat), MulticlassLabels(label_traindat)) svm.train() predicted_labels = svm.apply() binary_evaluator = ROCEvaluation() evaluator = MulticlassOVREvaluation(binary_evaluator) mean_roc = evaluator.evaluate(predicted_labels, ground_truth_labels) #print mean_roc binary_evaluator = ContingencyTableEvaluation(ACCURACY) evaluator = MulticlassOVREvaluation(binary_evaluator) mean_accuracy = evaluator.evaluate(predicted_labels, ground_truth_labels) #print mean_accuracy return mean_roc, mean_accuracy
def evaluation_rocevaluation_modular(ground_truth, predicted): from shogun.Features import Labels from shogun.Evaluation import ROCEvaluation ground_truth_labels = Labels(ground_truth) predicted_labels = Labels(predicted) evaluator = ROCEvaluation() evaluator.evaluate(predicted_labels,ground_truth_labels) return evaluator.get_ROC(), evaluator.get_auROC()
# # labels_predict = svm.apply(features_test) # train_pred = svm.apply(features_train) # alphas = svm.get_alphas() # b = svm.get_bias() # eval = AccuracyMeasure() # train_eval = AccuracyMeasure() # # accuracy = eval.evaluate(labels_predict.get_labels(), labels_test) # # print(accuracy) # train_eval.evaluate(train_pred, labels_train) # train_accuracy = train_eval.get_accuracy() * 100 eval.evaluate(labels_predict, labels_test) accuracy = eval.get_accuracy() * 100 roc = ROCEvaluation() roc_pred = roc.evaluate(labels_predict, labels_test) test = roc.get_ROC() plt.plot(test[0], test[1], marker='.') plt.show() print('Concatenated') print('Alphas:', alpha) print('Beta:', beta) print('AUC:', roc_pred) # # print('Train Accuracy(%):', train_accuracy) print('Accuracy(%):', accuracy)
gk = GaussianKernel(features, features, 1.0) svm = LibSVM(1000.0, gk, labels) svm.train() lda = LDA(1, features, labels) lda.train() ## plot points subplot(211) plot(pos[0, :], pos[1, :], "r.") plot(neg[0, :], neg[1, :], "b.") grid(True) title('Data', size=10) # plot ROC for SVM subplot(223) ROC_evaluation = ROCEvaluation() ROC_evaluation.evaluate(svm.apply(), labels) roc = ROC_evaluation.get_ROC() print roc plot(roc[0], roc[1]) fill_between(roc[0], roc[1], 0, alpha=0.1) text( mean(roc[0]) / 2, mean(roc[1]) / 2, 'auROC = %.5f' % ROC_evaluation.get_auROC()) grid(True) xlabel('FPR') ylabel('TPR') title('LibSVM (Gaussian kernel, C=%.3f) ROC curve' % svm.get_C1(), size=10) # plot ROC for LDA subplot(224)