def main(argv):
    """main method """   
    
    if len(argv)!=3:
        raise ValueError('Invalid number of parameters.')


    task_type=int(argv[0])
    pred=argv[1]
    gold=argv[2] 

    if(task_type==1):
        result=evaluate_ei(pred,gold)
        print ("Pearson correlation between "+os.path.basename(pred)+" and "+os.path.basename(gold)+":\t"+str(result[0]) )
        print ("Pearson correlation for gold scores in range 0.5-1 between "+os.path.basename(pred)+" and "+os.path.basename(gold)+":\t"+str(result[1]))
        
    elif(task_type==2):
        result=evaluate_oc(pred,gold)
        print ("Pearson correlation between "+os.path.basename(pred)+" and "+os.path.basename(gold)+":\t"+str(result[0]))
        print ("Pearson correlation for some emotions between "+os.path.basename(pred)+" and "+os.path.basename(gold)+":\t"+str(result[1])) 
        print ("Weighted quadratic Kappa between "+os.path.basename(pred)+" and "+os.path.basename(gold)+":\t"+str(result[2]))
        print ("Weighted quadratic Kappa for some emotions between "+os.path.basename(pred)+" and "+os.path.basename(gold)+":\t"+str(result[3]))
        
        
    else:
        result=evaluate_multilabel(pred,gold)
        print ("Multi-label accuracy (Jaccard index) between "+os.path.basename(pred)+" and "+os.path.basename(gold)+":\t"+str(result[0]))
        print ("Micro-averaged F1 score between "+os.path.basename(pred)+" and "+os.path.basename(gold)+":\t"+str(result[1]))
        print ("Macro-averaged F1 score between "+os.path.basename(pred)+" and "+os.path.basename(gold)+":\t"+str(result[2]))
Exemple #2
0
def v_oc_scores(lang, submissions, metrics, ref_files):
    """calculates metrics for ei-reg valence tasks """

    valence_scores = evaluate_oc(submissions['v-oc-' + lang],
                                 ref_files['valence_oc_' + lang])

    metrics['r_oc_valence_' + lang] = valence_scores[0]
    metrics['r_some_oc_valence_' + lang] = valence_scores[1]
    metrics['kappa_oc_valence_' + lang] = valence_scores[2]
    metrics['kappa_some_oc_valence_' + lang] = valence_scores[3]
Exemple #3
0
def ei_oc_scores(lang, submissions, metrics, ref_files):
    """calculates metrics for ei-oc tasks """

    anger_scores = evaluate_oc(submissions['ei-oc-' + lang + '-anger'],
                               ref_files['ei_oc_' + lang + '_anger'])
    fear_scores = evaluate_oc(submissions['ei-oc-' + lang + '-fear'],
                              ref_files['ei_oc_' + lang + '_fear'])
    joy_scores = evaluate_oc(submissions['ei-oc-' + lang + '-joy'],
                             ref_files['ei_oc_' + lang + '_joy'])
    sadness_scores = evaluate_oc(submissions['ei-oc-' + lang + '-sadness'],
                                 ref_files['ei_oc_' + lang + '_sadness'])

    metrics['r_oc_anger_' + lang] = anger_scores[0]
    metrics['r_oc_fear_' + lang] = fear_scores[0]
    metrics['r_oc_joy_' + lang] = joy_scores[0]
    metrics['r_oc_sadness_' + lang] = sadness_scores[0]
    metrics['r_oc_macro_avg_' + lang] = numpy.mean(
        [anger_scores[0], fear_scores[0], joy_scores[0], sadness_scores[0]])

    metrics['r_some_oc_anger_' + lang] = anger_scores[1]
    metrics['r_some_oc_fear_' + lang] = fear_scores[1]
    metrics['r_some_oc_joy_' + lang] = joy_scores[1]
    metrics['r_some_oc_sadness_' + lang] = sadness_scores[1]
    metrics['r_some_oc_macro_avg_' + lang] = numpy.mean(
        [anger_scores[1], fear_scores[1], joy_scores[1], sadness_scores[1]])

    metrics['kappa_oc_anger_' + lang] = anger_scores[2]
    metrics['kappa_oc_fear_' + lang] = fear_scores[2]
    metrics['kappa_oc_joy_' + lang] = joy_scores[2]
    metrics['kappa_oc_sadness_' + lang] = sadness_scores[2]
    metrics['kappa_oc_macro_avg_' + lang] = numpy.mean(
        [anger_scores[2], fear_scores[2], joy_scores[2], sadness_scores[2]])

    metrics['kappa_some_oc_anger_' + lang] = anger_scores[3]
    metrics['kappa_some_oc_fear_' + lang] = fear_scores[3]
    metrics['kappa_some_oc_joy_' + lang] = joy_scores[3]
    metrics['kappa_some_oc_sadness_' + lang] = sadness_scores[3]
    metrics['kappa_some_oc_macro_avg_' + lang] = numpy.mean(
        [anger_scores[3], fear_scores[3], joy_scores[3], sadness_scores[3]])