コード例 #1
0
 def __init__(self, ref_file_path):
     old_gs = old_read_gs(ref_file_path)
     gs = read_gs(ref_file_path)
     old_filelist = old_gs.keys()
     filelist = gs.keys()
     list_check_equality("gold_standard", old_filelist, filelist)
     for filename in filelist:
         dict_check_equality(filename, old_gs, gs)
コード例 #2
0
    def run_experiment(self, crank_flag, single_rank_flag, expand_rank_flag): 
        ref = read_gs(self.ref_file_path)
        eval_score = self.run_evaluation(ref, crank_flag, single_rank_flag, expand_rank_flag)
#        print eval_score
        for winsize in self.winsize_list:
            for cutoff in self.cutoff_list:
                for lamb in self.lamb_list: 
                    avg_eval_score = self.get_avg_evaluation(eval_score[winsize][cutoff][lamb])
                    avg_eval_score['winsize'] = winsize 
                    avg_eval_score['cutoffpath'] = cutoff
                    avg_eval_score['lambda'] = lamb
                    avg_eval_score['k1'] = self.k1
                    avg_eval_score['b'] = self.b
                    for col_name in self.csv_column:
                        self.save_fp.write(str(avg_eval_score[col_name]))
                        if col_name in self.csv_column[:-1]:
                            self.save_fp.write(',')
                        elif col_name is self.csv_column[-1]:
                            self.save_fp.write('\n')
        print 'excluded files in evaluation : ', self.excluded_files
コード例 #3
0
 def run_experiment(self): 
     ref = read_gs(self.ref_file_path)
     eval_score = self.run_evaluation(ref)
     winsize = self.winsize
     for k1 in self.k1_list:
         for b in self.b_list:
             for cutoff in self.cutoff_list:
                 for lamb in self.lamb_list: 
                     avg_eval_score = self.get_avg_evaluation(eval_score[cutoff][lamb][k1][b])
                     avg_eval_score['winsize'] = winsize 
                     avg_eval_score['cutoffpath'] = cutoff
                     avg_eval_score['lambda'] = lamb
                     avg_eval_score['k1'] = k1
                     avg_eval_score['b'] = b
                     for col_name in self.csv_column:
                         self.save_fp.write(str(avg_eval_score[col_name]))
                         if col_name in self.csv_column[:-1]:
                             self.save_fp.write(',')
                         elif col_name is self.csv_column[-1]:
                             self.save_fp.write('\n')
         print 'excluded files in evaluation : ', self.excluded_files
コード例 #4
0
     k1 = 1.2
     b = 0.75
     if len(relscore) > 4:
         k1 = relscore[3]
         b = relscore[4]
     ft1 = RelscoreFunctionalTest(
         text_dict, data_set_name, relscore[0], int(relscore[1]), int(relscore[2]), float(k1), float(b)
     )
 # check equality of two methods of getting keyphrase_candidate
 for relscore_type in settings["relscore"]:
     for winsize in settings["winsize"]:
         print "relscore_type : %s, winsize : %s" % (relscore_type, winsize)
         old_eval_score = dict()
         refactor_eval_score = dict()
         old_gs = old_read_gs("/Users/KimKR/Desktop/NEXT_LAB/keyword/gold_standard/%s.ref" % (data_set_name))
         refactor_gs = read_gs("/Users/KimKR/Desktop/NEXT_LAB/keyword/gold_standard/%s.ref" % (data_set_name))
         for file_name in text_dict.keys():
             sentences = text_dict[file_name]
             old_relscore = read_relscore(
                 ("/Users/KimKR/Desktop/NEXT_LAB/keyword/rel_score/%s/%s" % (settings["dataset"], relscore_type)),
                 file_name,
             )
             refact_relscore = read_relscore("/Users/KimKR/Desktop/NEXT_LAB/keyword/relScore", file_name)
             # check equality of keyphrase_candidates
             ft = GraphFunctionalTest(file_name, sentences, old_relscore, refact_relscore, int(winsize))
             #                old_gs[file_name] = old_check_gold_standard(sentences, old_gs[file_name])
             #                refactor_gs[file_name] = check_gold_standard(sentences, refactor_gs[file_name])
             old_eval_score[file_name] = evaluate(ft.kokako_graph.score_candidates(), old_gs[file_name])
             refactor_eval_score[file_name] = evaluate(
                 ft.refactored_graph.score_candidates(1, ft.lamb), refactor_gs[file_name]
             )