def evaluation_prcevaluation_modular (ground_truth, predicted):
	from modshogun import BinaryLabels
	from modshogun import PRCEvaluation

	ground_truth_labels = BinaryLabels(ground_truth)
	predicted_labels = BinaryLabels(predicted)

	evaluator = PRCEvaluation()
	evaluator.evaluate(predicted_labels,ground_truth_labels)

	return evaluator.get_PRC(), evaluator.get_auPRC()
def evaluate(predicted_labels, labels, prefix="Results"):
	from modshogun import PRCEvaluation, ROCEvaluation, AccuracyMeasure

	prc_evaluator = PRCEvaluation()
	roc_evaluator = ROCEvaluation()
	acc_evaluator = AccuracyMeasure()

	auPRC = prc_evaluator.evaluate(predicted_labels, labels)
	auROC = roc_evaluator.evaluate(predicted_labels, labels)
	acc = acc_evaluator.evaluate(predicted_labels, labels)

	print ('{0}: auPRC = {1:.5f}, auROC = {2:.5f}, acc = {3:.5f} '+
				'({4}% incorrectly classified)').format(
				prefix, auPRC, auROC, acc, (1-acc)*100)
def evaluate(predicted_labels, labels, prefix="Results"):
    from modshogun import PRCEvaluation, ROCEvaluation, AccuracyMeasure

    prc_evaluator = PRCEvaluation()
    roc_evaluator = ROCEvaluation()
    acc_evaluator = AccuracyMeasure()

    auPRC = prc_evaluator.evaluate(predicted_labels, labels)
    auROC = roc_evaluator.evaluate(predicted_labels, labels)
    acc = acc_evaluator.evaluate(predicted_labels, labels)

    print('{0}: auPRC = {1:.5f}, auROC = {2:.5f}, acc = {3:.5f} ' +
          '({4}% incorrectly classified)').format(prefix, auPRC, auROC, acc,
                                                  (1 - acc) * 100)
Exemplo n.º 4
0
def evaluation_prcevaluation_modular(ground_truth, predicted):
    from modshogun import BinaryLabels
    from modshogun import PRCEvaluation

    ground_truth_labels = BinaryLabels(ground_truth)
    predicted_labels = BinaryLabels(predicted)

    evaluator = PRCEvaluation()
    evaluator.evaluate(predicted_labels, ground_truth_labels)

    return evaluator.get_PRC(), evaluator.get_auPRC()
Exemplo n.º 5
0
gk=GaussianKernel(features, features, 1.0)
svm = LibSVM(1000.0, gk, labels)
svm.train()
lda=LDA(1,features,labels)
lda.train()

## plot points
subplot(211)
plot(pos[0,:], pos[1,:], "r.")
plot(neg[0,:], neg[1,:], "b.")
grid(True)
title('Data',size=10)

# plot PRC for SVM
subplot(223)
PRC_evaluation=PRCEvaluation()
PRC_evaluation.evaluate(svm.apply(),labels)
PRC = PRC_evaluation.get_PRC()
plot(PRC[0], PRC[1])
fill_between(PRC[0],PRC[1],0,alpha=0.1)
text(0.55,mean(PRC[1])/3,'auPRC = %.5f' % PRC_evaluation.get_auPRC())
grid(True)
xlabel('Precision')
ylabel('Recall')
title('LibSVM (Gaussian kernel, C=%.3f) PRC curve' % svm.get_C1(),size=10)

# plot PRC for LDA
subplot(224)
PRC_evaluation.evaluate(lda.apply(),labels)
PRC = PRC_evaluation.get_PRC()
plot(PRC[0], PRC[1])
Exemplo n.º 6
0
gk = GaussianKernel(features, features, 1.0)
svm = LibSVM(1000.0, gk, labels)
svm.train()
lda = LDA(1, features, labels)
lda.train()

## plot points
subplot(211)
plot(pos[0, :], pos[1, :], "r.")
plot(neg[0, :], neg[1, :], "b.")
grid(True)
title('Data', size=10)

# plot PRC for SVM
subplot(223)
PRC_evaluation = PRCEvaluation()
PRC_evaluation.evaluate(svm.apply(), labels)
PRC = PRC_evaluation.get_PRC()
plot(PRC[0], PRC[1])
fill_between(PRC[0], PRC[1], 0, alpha=0.1)
text(0.55, mean(PRC[1]) / 3, 'auPRC = %.5f' % PRC_evaluation.get_auPRC())
grid(True)
xlabel('Precision')
ylabel('Recall')
title('LibSVM (Gaussian kernel, C=%.3f) PRC curve' % svm.get_C1(), size=10)

# plot PRC for LDA
subplot(224)
PRC_evaluation.evaluate(lda.apply(), labels)
PRC = PRC_evaluation.get_PRC()
plot(PRC[0], PRC[1])