Esempio n. 1
0
def evaluation_thresholds_modular(index):
    from modshogun import Labels, ROCEvaluation
    import numpy
    numpy.random.seed(17)
    output = numpy.arange(-1, 1, 0.001)
    output = (0.3 * output + 0.7 * (numpy.random.rand(len(output)) - 0.5))
    label = [-1.0] * (len(output) // 2)
    label.extend([1.0] * (len(output) // 2))
    label = numpy.array(label)

    pred = Labels(output)
    truth = Labels(label)

    evaluator = ROCEvaluation()
    evaluator.evaluate(pred, truth)

    [fp, tp] = evaluator.get_ROC()

    thresh = evaluator.get_thresholds()
    b = thresh[index]

    #print("tpr", numpy.mean(output[label>0]>b), tp[index])
    #print("fpr", numpy.mean(output[label<0]>b), fp[index])

    return tp[index], fp[index], numpy.mean(output[label > 0] > b), numpy.mean(
        output[label < 0] > b)
def evaluation_thresholds_modular(index):
	from modshogun import Labels, ROCEvaluation
	import numpy
	numpy.random.seed(17)
	output=numpy.arange(-1,1,0.001)
	output=(0.3*output+0.7*(numpy.random.rand(len(output))-0.5))
	label=[-1.0]*(len(output)/2)
	label.extend([1.0]*(len(output)/2))
	label=numpy.array(label)

	pred=Labels(output)
	truth=Labels(label)

	evaluator=ROCEvaluation()
	evaluator.evaluate(pred, truth)

	[fp,tp]=evaluator.get_ROC()

	thresh=evaluator.get_thresholds()
	b=thresh[index]

	#print "tpr", numpy.mean(output[label>0]>b), tp[index]
	#print "fpr", numpy.mean(output[label<0]>b), fp[index]

	return tp[index],fp[index],numpy.mean(output[label>0]>b),numpy.mean(output[label<0]>b)
def evaluation_rocevaluation_modular (ground_truth, predicted):
	from modshogun import BinaryLabels
	from modshogun import ROCEvaluation

	ground_truth_labels = BinaryLabels(ground_truth)
	predicted_labels = BinaryLabels(predicted)

	evaluator = ROCEvaluation()
	evaluator.evaluate(predicted_labels,ground_truth_labels)

	return evaluator.get_ROC(), evaluator.get_auROC()
Esempio n. 4
0
def evaluation_rocevaluation_modular(ground_truth, predicted):
    from modshogun import BinaryLabels
    from modshogun import ROCEvaluation

    ground_truth_labels = BinaryLabels(ground_truth)
    predicted_labels = BinaryLabels(predicted)

    evaluator = ROCEvaluation()
    evaluator.evaluate(predicted_labels, ground_truth_labels)

    return evaluator.get_ROC(), evaluator.get_auROC()
Esempio n. 5
0
svm.train()
lda=LDA(1,features,labels)
lda.train()

## plot points
subplot(211)
plot(pos[0,:], pos[1,:], "r.")
plot(neg[0,:], neg[1,:], "b.")
grid(True)
title('Data',size=10)

# plot ROC for SVM
subplot(223)
ROC_evaluation=ROCEvaluation()
ROC_evaluation.evaluate(svm.apply(),labels)
roc = ROC_evaluation.get_ROC()
print roc
plot(roc[0], roc[1])
fill_between(roc[0],roc[1],0,alpha=0.1)
text(mean(roc[0])/2,mean(roc[1])/2,'auROC = %.5f' % ROC_evaluation.get_auROC())
grid(True)
xlabel('FPR')
ylabel('TPR')
title('LibSVM (Gaussian kernel, C=%.3f) ROC curve' % svm.get_C1(),size=10)

# plot ROC for LDA
subplot(224)
ROC_evaluation.evaluate(lda.apply(),labels)
roc = ROC_evaluation.get_ROC()
plot(roc[0], roc[1])
fill_between(roc[0],roc[1],0,alpha=0.1)