Пример #1
0
    i = i + 1

    precision = func_eval._precision_score(y_pred, y_real)
    precisions.append(precision)

    print(precision)

print("recall:")
i = 0
recalls = []
while i < top_k:

    y_pred = test_pred[:, 0:i + 1]

    i = i + 1
    recall = func_eval.new_recall(y_pred, y_real)
    recalls.append(recall)

    print(recall)

#----------------below is commentted------------------

#print("Test Precision, Recall and F1-Score...")
#print(metrics.classification_report(test_labels, test_pred, digits=4))
#print("Macro average Test Precision, Recall and F1-Score...")
#print(metrics.precision_recall_fscore_support(test_labels, test_pred, average='macro'))
#print("Micro average Test Precision, Recall and F1-Score...")
#print(metrics.precision_recall_fscore_support(test_labels, test_pred, average='micro'))
#
## doc and word embeddings
#print('embeddings:')
Пример #2
0
#-----then evaluate each label and append to recalls,ncdgs,precisions--------

for j,label in enumerate(new_sort_list):
    indices = [i for i, l in enumerate(y_test) if l == label]
    each_label_real=[y_test[i] for i in indices]
#    if(each_label_real==[]):
#        f1_score[j]=0
#        print(f1_score[j])
#        continue
    
    each_label_predict=np.array([test_predict_top_k[i] for i in indices])
    each_labels_to_eval=np.array([labels_to_eval[i] for i in indices])
    
    precision=func_eval._precision_score(each_label_predict,each_labels_to_eval)
    precisions[j]=precision
    recall=func_eval.new_recall(each_label_predict,each_labels_to_eval)
    recalls[j]=recall
    ncdg=func_eval._NDCG_score(each_label_predict,each_labels_to_eval)
    ncdgs[j]=ncdg
        
        
#print('\nrecalls:',recalls)
#print('precisions:',precisions)
#print('ncdgs:',ncdgs)

#----------3. split labels into group by label frequency-------------------------
#-----------get the evaluation of each group------------------------------

    
    
group_recalls=[]    
Пример #3
0
    print(ndcg_i)

#sys.exit(0)

print("precision:")
i = 0
precisions = []

while i < top_k:

    y_pred = test_predict_top_k[:, 0:i + 1]
    i = i + 1

    precision = func_eval._precision_score(y_pred, labels_to_eval)
    precisions.append(precision)

    print(precision)

print("recall:")
i = 0
recalls = []
while i < top_k:

    y_pred = test_predict_top_k[:, 0:i + 1]

    i = i + 1
    recall = func_eval.new_recall(y_pred, labels_to_eval)
    recalls.append(recall)

    print(recall)
Пример #4
0
     i = i+1
 
     precision = func_eval._precision_score(y_pred,real_labels)
     precisions.append(precision)
 
     print(precision)
 
 print("recall:")
 i = 0
 recalls = []
 while i < top_k:
     
     y_pred = pred_labels[:,  0:i+1]
 
     i = i+1   
     recall = func_eval.new_recall(y_pred, real_labels)
     recalls.append(recall)
 
     print(recall)
     
 print("f1:")
 f1s=[]
 for i in range(len(precisions)):
     if(precisions[i]+recalls[i]!=0):
         f1=((precisions[i]*recalls[i])/(precisions[i]+recalls[i]))
         print(f1)
     else:
         f1=0.0
         print(0.0)
     f1s.append(f1)