def eval():
     start = time.clock()    
     print "Calculating predictions of ",len(testing_labels)," labels' sets..."
     predicted_labels = jrs_multilabel_classifier.classify_multilabel(testing2training_distances, training_labels, multilabel_classifier)            
     accuracy, precision, recall, hammingloss, subset01loss, fmeasure =  jrs_evaluation.jrs_evaluate(testing_labels, predicted_labels)
     print " accuracy:", accuracy,"\n precision:", precision,"\n recall:", recall,"\n fmeasure:", fmeasure                
     lcount = [len(ll) for ll in predicted_labels]
     print " avg labels in predicted:", float(sum(lcount))/(len(lcount))
     print " done in", (time.clock() - start), "sec..."
     return predicted_labels
Пример #2
0
 def eval():
     start = time.clock()
     print "Calculating predictions of ", len(
         testing_labels), " labels' sets..."
     predicted_labels = jrs_multilabel_classifier.classify_multilabel(
         testing2training_distances, training_labels,
         multilabel_classifier)
     accuracy, precision, recall, hammingloss, subset01loss, fmeasure = jrs_evaluation.jrs_evaluate(
         testing_labels, predicted_labels)
     print " accuracy:", accuracy, "\n precision:", precision, "\n recall:", recall, "\n fmeasure:", fmeasure
     lcount = [len(ll) for ll in predicted_labels]
     print " avg labels in predicted:", float(
         sum(lcount)) / (len(lcount))
     print " done in", (time.clock() - start), "sec..."
     return predicted_labels
Пример #3
0
 def eval():
     start = time.clock()    
     print "Calculating predictions of ",len(testing_labels)," labels' sets..."
     predicted_labels = jrs_multilabel_classifier.classify_multilabel(testing2training_distances, training_labels, multilabel_classifier)
     try:
         print testing_labels[:10],"\n", predicted_labels[:10]                                    
         accuracy, precision, recall, hammingloss, subset01loss, fmeasure =  jrs_evaluation.jrs_evaluate(testing_labels, predicted_labels)
     except:
         print "[knn] Error in jrs_evaluation.jrs_evaluate(testing_labels, predicted_labels):",testing_labels, predicted_labels
     print " accuracy:", accuracy,"\n precision:", precision,"\n recall:", recall,"\n fmeasure:", fmeasure                
     lcount = [len(ll) for ll in predicted_labels]
     #print " avg labels in predicted:", float(sum(lcount))/(len(lcount))            
     #accuracy, precision, recall, hammingloss, subset01loss, fmeasure =  jrs_evaluation.jrs_evaluate(filter_out_labels(testing_labels), filter_out_labels(predicted_labels))
     #print " postfiltering-accuracy:", accuracy,"\n postfiltering-precision:", precision,"\n postfiltering-recall:", recall,"\n postfiltering-fmeasure:", fmeasure
     print " done in", (time.clock() - start), "sec..."
     return predicted_labels
Пример #4
0
def eval(testing2training_distances, training_labels, testing_labels, multilabel_classifier):
        predicted_labels = jrs_multilabel_classifier.classify_multilabel(testing2training_distances, training_labels, multilabel_classifier)
        accuracy, precision, recall, hammingloss, subset01loss, fmeasure =  jrs_evaluation.jrs_evaluate(testing_labels, predicted_labels)
        return fmeasure