def eval():
     start = time.clock()    
     print "Calculating predictions of ",len(testing_labels)," labels' sets..."
     predicted_labels = jrs_multilabel_classifier.classify_multilabel(testing2training_distances, training_labels, multilabel_classifier)            
     accuracy, precision, recall, hammingloss, subset01loss, fmeasure =  jrs_evaluation.jrs_evaluate(testing_labels, predicted_labels)
     print " accuracy:", accuracy,"\n precision:", precision,"\n recall:", recall,"\n fmeasure:", fmeasure                
     lcount = [len(ll) for ll in predicted_labels]
     print " avg labels in predicted:", float(sum(lcount))/(len(lcount))
     print " done in", (time.clock() - start), "sec..."
     return predicted_labels
Example #2
0
 def eval():
     start = time.clock()
     print "Calculating predictions of ", len(
         testing_labels), " labels' sets..."
     predicted_labels = jrs_multilabel_classifier.classify_multilabel(
         testing2training_distances, training_labels,
         multilabel_classifier)
     accuracy, precision, recall, hammingloss, subset01loss, fmeasure = jrs_evaluation.jrs_evaluate(
         testing_labels, predicted_labels)
     print " accuracy:", accuracy, "\n precision:", precision, "\n recall:", recall, "\n fmeasure:", fmeasure
     lcount = [len(ll) for ll in predicted_labels]
     print " avg labels in predicted:", float(
         sum(lcount)) / (len(lcount))
     print " done in", (time.clock() - start), "sec..."
     return predicted_labels
Example #3
0
 def eval():
     start = time.clock()    
     print "Calculating predictions of ",len(testing_labels)," labels' sets..."
     predicted_labels = jrs_multilabel_classifier.classify_multilabel(testing2training_distances, training_labels, multilabel_classifier)
     try:
         print testing_labels[:10],"\n", predicted_labels[:10]                                    
         accuracy, precision, recall, hammingloss, subset01loss, fmeasure =  jrs_evaluation.jrs_evaluate(testing_labels, predicted_labels)
     except:
         print "[knn] Error in jrs_evaluation.jrs_evaluate(testing_labels, predicted_labels):",testing_labels, predicted_labels
     print " accuracy:", accuracy,"\n precision:", precision,"\n recall:", recall,"\n fmeasure:", fmeasure                
     lcount = [len(ll) for ll in predicted_labels]
     #print " avg labels in predicted:", float(sum(lcount))/(len(lcount))            
     #accuracy, precision, recall, hammingloss, subset01loss, fmeasure =  jrs_evaluation.jrs_evaluate(filter_out_labels(testing_labels), filter_out_labels(predicted_labels))
     #print " postfiltering-accuracy:", accuracy,"\n postfiltering-precision:", precision,"\n postfiltering-recall:", recall,"\n postfiltering-fmeasure:", fmeasure
     print " done in", (time.clock() - start), "sec..."
     return predicted_labels
Example #4
0
def eval(testing2training_distances, training_labels, testing_labels, multilabel_classifier):
        predicted_labels = jrs_multilabel_classifier.classify_multilabel(testing2training_distances, training_labels, multilabel_classifier)
        accuracy, precision, recall, hammingloss, subset01loss, fmeasure =  jrs_evaluation.jrs_evaluate(testing_labels, predicted_labels)
        return fmeasure
 print "------------------------------------------"
         
 print "Classifying file:",  features_matrix_path
 f = open(features_matrix_path)
 predicted_labels = []
 for i,line in enumerate(f.xreadlines()):
     if i%1000==0: print "",i,"..."
     
     row = [int(x) for x in line.split()]
     ll = []
     for label,feature_ixs in label2feature_ixs.iteritems():
         says_yes = sum(row[ix]>0 for ix in feature_ixs)
         if says_yes >= len(feature_ixs)*MIN_FRACTION_OF_VOTES:
             ll.append(label)        
     ll = sorted(ll)
     
     predicted_labels.append(ll)
             
     print "",i," oracle",labels[i]," pred",ll
     print "","len=",len(labels[:(i+1)]), len(predicted_labels)
     accuracy, precision, recall, hammingloss, subset01loss, fmeasure =  jrs_evaluation.jrs_evaluate(labels[:(i+1)], predicted_labels)
     print "\t\t\t\t\t","%.2f" %precision,"%.2f" %recall,"%.2f" %fmeasure
 print "------------------------------------------"
 
 accuracy, precision, recall, hammingloss, subset01loss, fmeasure =  jrs_evaluation.jrs_evaluate(labels, predicted_labels)
 print "\t\t\t\t\t","%.2f" %precision,"%.2f" %recall,"%.2f" %fmeasure
 
 print "Wrining results to", out_path 
 jrs_io.store_labels(open(out_path,"w"), predicted_labels)