예제 #1
0
def get_rouge(args):

    model_specific_dir = create_fname_identifier(args).replace('.', '_') + '/'
    rouge_fname = args.system_summ_path + model_specific_dir

    if args.source == 'dm':
        r = Rouge155(rouge_args='-e /home/kristjan/data1/softwares/rouge/ROUGE/RELEASE-1.5.5/data -c 95 -2 -1 -U -r 1000 -n 4 -w 1.2 -a -m -b 75')
    else:
        r = Rouge155()

    r.system_dir = rouge_fname
    r.model_dir = args.model_summ_path + 'dev/'
    r.system_filename_pattern = 'sum.(\d+).txt'
    r.model_filename_pattern = 'dev_cnn_#ID#.txt'

    fname = args.rouge_dir + create_fname_identifier(args) + '_rouge.out'
    ofp = open(fname, 'w+')

    ofp.write(r.convert_and_evaluate())
    ofp.close()

    tmp_dir = r._config_dir
    print 'Cleaning up..', tmp_dir

    shutil.rmtree(tmp_dir)
예제 #2
0
 def eval(self,hypList, refList):
     number = len(hypList)
     n_ref=  len(refList) / number
     # Generate Files
     os.system("rm "+self.settings['rouge_hyp_dir']+"*")
     os.system("rm "+self.settings['rouge_ref_dir']+"*")
     
     for Index in range(0, number):
         hypName = self.settings['rouge_hyp_dir']+'hyp.'+str(Index).zfill(6)+'.txt'
         f1 = open(hypName,'w')
         f1.write(RougeTrick(hypList[Index]))
         
         for i in range(n_ref):
             refName = self.settings['rouge_ref_dir']+'ref.'+chr(ord('A')+i)+'.'+str(Index).zfill(6)+'.txt'
             f2 = open(refName, 'w')
             f2.write(RougeTrick(refList[Index*n_ref +i]))
     if number == 500:
         R = Rouge155('./ROUGE-RELEASE-1.5.5', '-e ./ROUGE-RELEASE-1.5.5/data -n 4 -m -w 1.2 -c 95 -r 1000 -b 75 -a')
     else:
         R = Rouge155('./ROUGE-RELEASE-1.5.5')
     R.system_dir = self.settings['rouge_hyp_dir']
     R.model_dir = self.settings['rouge_ref_dir']
     R.system_filename_pattern = 'hyp.(\d+).txt'
     R.model_filename_pattern = 'ref.[A-Z].#ID#.txt'
     
     output = R.convert_and_evaluate()
     print(output)
     output_dict = R.output_to_dict(output)
     return output_dict
예제 #3
0
def main():
    args = get_args()
    if args.split_sents:
        from pyrouge.utils.sentence_splitter import PunktSentenceSplitter
        tmp = mkdtemp()
        PunktSentenceSplitter.split_files(args.input_dir, tmp)
        args.input_dir = tmp
    Rouge155.convert_summaries_to_rouge_format(args.input_dir, args.output_dir)
def main():
	args = get_args()
	if args.split_sents:
		from pyrouge.utils.sentence_splitter import PunktSentenceSplitter
		tmp = mkdtemp()
		PunktSentenceSplitter.split_files(args.input_dir, tmp)
		args.input_dir = tmp
	Rouge155.convert_summaries_to_rouge_format(args.input_dir, args.output_dir)
예제 #5
0
def convert2rouge_format():
    model_input_dir = 'doc/model/04model'
    model_output_dir = 'doc/model/04model_rouge'
    system_input_dir = 'doc/systems/04systems'
    system_output_dir = 'doc/systems/04systems_rouge'

    Rouge155.convert_summaries_to_rouge_format(model_input_dir,
                                               model_output_dir)
    Rouge155.convert_summaries_to_rouge_format(system_input_dir,
                                               system_output_dir)
예제 #6
0
def RougeTest_pyrouge(ref, hyp, id=0, rouge_metric='all', compute_score=True,
                      path='./result', max_num_of_bytes=-1):
    # initialization
    if not os.path.exists('./result'):
        os.mkdir('./result')
    if not os.path.exists(path):
        os.mkdir(path)

    # write new ref and hyp
    with codecs.open(os.path.join(path, 'ref.' + str(id) + '.txt'), 'w', encoding="UTF-8") as f:
        f.write(Rouge155.convert_text_to_rouge_format('\n'.join(ref).decode('UTF-8', 'ignore')))
    with codecs.open(os.path.join(path, 'hyp.' + str(id) + '.txt'), 'w', encoding="UTF-8") as f:
        f.write(Rouge155.convert_text_to_rouge_format('\n'.join(hyp).decode('UTF-8', 'ignore')))

    if compute_score:
        if max_num_of_bytes > 0:
            r = Rouge155('%s/SciSoft/ROUGE-1.5.5/' % home_path,
                         '-e %s/SciSoft/ROUGE-1.5.5/data -a -c 95 -m -n 2 -b %d' % (home_path, max_num_of_bytes))
        else:
            r = Rouge155('%s/SciSoft/ROUGE-1.5.5/' % home_path,
                         '-e %s/SciSoft/ROUGE-1.5.5/data -a -c 95 -m -n 2' % home_path)
        r.system_dir = path
        r.model_dir = path
        r.system_filename_pattern = 'hyp.(\d+).txt'
        r.model_filename_pattern = 'ref.#ID#.txt'

        output = r.evaluate()
        # print(output)
        output_dict = r.output_to_dict(output)
        # cleanup
        shutil.rmtree(path)
        shutil.rmtree(r._config_dir)

        if rouge_metric[1] == 'f':
            return output_dict["rouge_%s_f_score" % rouge_metric[0]]
        elif rouge_metric[1] == 'r':
            return output_dict["rouge_%s_recall" % rouge_metric[0]]
        elif rouge_metric == 'avg_f':
            return (output_dict["rouge_1_f_score"] + output_dict["rouge_2_f_score"] + output_dict[
                "rouge_l_f_score"]) / 3
        elif rouge_metric == 'avg_r':
            return (output_dict["rouge_1_recall"] + output_dict["rouge_2_recall"] + output_dict["rouge_l_recall"]) / 3
        else:
            return (output_dict["rouge_1_precision"], output_dict["rouge_1_recall"], output_dict["rouge_1_f_score"],
                    output_dict["rouge_2_precision"], output_dict["rouge_2_recall"], output_dict["rouge_2_f_score"],
                    output_dict["rouge_l_precision"], output_dict["rouge_l_recall"], output_dict["rouge_l_f_score"])
    else:
        return None
예제 #7
0
def compute_rouge(sentences, targets):
    target_path = "./dir/target"
    senten_path = "./dir/senten"
    if not os.path.exists(target_path):
        os.mkdir(target_path)
    if not os.path.exists(senten_path):
        os.mkdir(senten_path)
    count = 1
    for sent, tgt in zip(sentences, targets):
        with open(target_path+"/text.A."+str(count)+".txt", mode='w', encoding='utf-8') as f:
            with open(senten_path+"/text."+str(count)+".txt", mode='w', encoding='utf-8') as g:
                count += 1
                # print(sent)
                # print(tgt)
                for s in sent:
                    g.write(str(s)+" ")
                for t in tgt:
                    f.write(str(t)+" ")

    r = Rouge155()
    r.system_dir = senten_path
    r.model_dir = target_path
    r.system_filename_pattern = 'text.(\d+).txt'
    r.model_filename_pattern = 'text.[A-Z].#ID#.txt'

    output = r.convert_and_evaluate()
    # print(output)
    output_dict = r.output_to_dict(output)
    rg1 = output_dict["rouge_1_f_score"]
    rg2 = output_dict["rouge_2_f_score"]
    rgl = output_dict["rouge_l_f_score"]

    return rg1, rg2, rgl
예제 #8
0
def main():
    # parse arguments
    parser = ArgumentParser()
    parser.add_argument(
        "-m",
        "--model",
        dest="model",
        help="path to directory containing gold standard summaries",
        metavar="MODEL")
    parser.add_argument("-s",
                        "--system",
                        dest="system",
                        help="path to directory containing your summaries",
                        metavar="SYSTEM")
    args = parser.parse_args()
    if not args.model or not args.system:
        parser.print_help()
        return

    model_path = os.path.normpath(args.model)
    system_path = os.path.normpath(args.system)

    # set up rouge
    r = Rouge155()
    r.system_dir = system_path
    r.model_dir = model_path
    r.system_filename_pattern = '(\w+).txt'
    r.model_filename_pattern = '#ID#.txt'

    output = r.convert_and_evaluate()
    print(output)
    output_dict = r.output_to_dict(output)
    print(output_dict)
예제 #9
0
def run_pyrouge_eval(dir_data=None,
                     model_dir=None,
                     system_dir=None,
                     model_filename_pattern="example.[A-Z].#ID#.txt",
                     system_filename_pattern="example.(\d+).txt"):
    """
    """
    if model_dir is None:
        model_dir = os.path.join(dir_data, "model")
    if system_dir is None:
        system_dir = os.path.join(dir_data, "reference")
    #
    rg = Rouge155()
    rg.model_dir = model_dir
    rg.system_dir = system_dir
    #
    rg.model_filename_pattern = model_filename_pattern
    rg.system_filename_pattern = system_filename_pattern
    #
    # rg.model_filename_pattern = "#ID#_result.txt"
    # rg.system_filename_pattern = "(\d+)_reference.txt"
    #
    # rg.model_filename_pattern = "example.[A-Z].#ID#.txt"
    # rg.system_filename_pattern = "example.(\d+).txt"
    #
    logging.getLogger('global').setLevel(
        logging.WARNING)  # silence pyrouge logging
    output = rg.convert_and_evaluate()
    output_dict = rg.output_to_dict(output)
    #
    return output_dict, output
예제 #10
0
def run_rouge(summaries_references):
    """Run Perl ROUGE 1.5.5 script"""

    summaries, references = summaries_references

    temp_dir = tempfile.mkdtemp()
    system_dir = os.path.join(temp_dir, 'system')
    model_dir = os.path.join(temp_dir, 'model')
    # directory for generated summaries
    os.makedirs(system_dir)
    # directory for reference summaries
    os.makedirs(model_dir)

    for i, (summary, ref_candidates) in enumerate(zip(summaries, references)):

        for j, ref_candidate in enumerate(ref_candidates):
            with open(os.path.join(model_dir, f'{i}.{j}.txt'),
                      encoding='utf-8',
                      mode='w') as f:
                f.write('\n'.join(ref_candidate))

        with open(os.path.join(system_dir, f'{i}.txt'),
                  encoding='utf-8',
                  mode='w') as f:
            f.write('\n'.join(summary))

    rouge_args = [
        '-e',
        os.path.join(rouge_dir, "data"),
        '-a',
        '-c',
        95,
        '-m',
        '-n',
        2,
        # '-w', 1.2,
    ]

    args_str = ' '.join(map(str, rouge_args))
    rouge = Rouge155(rouge_dir=rouge_dir, rouge_args=args_str, log_level=0)
    # rouge = Rouge155(rouge_args=args_str, log_level=0)
    rouge.system_dir = system_dir
    rouge.model_dir = model_dir
    rouge.system_filename_pattern = '(\d+).txt'
    rouge.model_filename_pattern = '#ID#.\d+.txt'
    output = rouge.convert_and_evaluate()

    output_dict = rouge.output_to_dict(output)

    output_dict = {
        'ROUGE-1': output_dict['rouge_1_f_score'],
        'ROUGE-2': output_dict['rouge_2_f_score'],
        'ROUGE-L': output_dict['rouge_l_f_score'],
        'len': len(summaries)
    }

    # remove the created temporary files
    shutil.rmtree(temp_dir)

    return output_dict
예제 #11
0
def compute_rouge(system_dir, model_dir):
    """
    system_dir: directory containing generated summaries
        file.001.txt
        file.002.txt
        file.003.txt
    model_dir:  directory containing (single) reference summaries
        file.001.txt
        file.002.txt
        file.003.txt

    to install pyrouge:
        1) pip install pyrouge
        2) pyrouge_set_rouge_path /absolute/path/to/ROUGE-1.5.5/directory
    more information: https://pypi.org/project/pyrouge/
    """

    r = Rouge155()
    r.system_dir = system_dir
    r.model_dir = model_dir

    r.system_filename_pattern = 'file.(\d+).txt'
    r.model_filename_pattern = 'file.#ID#.txt'

    output = r.convert_and_evaluate()
    print(output)
예제 #12
0
def calc_rouge_files_test(args, generated_texts):
    logger = logging.getLogger(args.logger_name)
    # cnndm_test = get_cnndm_dataset(split, args.corpus_pct)

    sys_sum_path = os.path.join(args.data_dir, 'system_summaries')
    mod_sum_path = os.path.join(args.save_dir, 'model_summaries')
    if not os.path.exists(sys_sum_path):
        os.mkdir(sys_sum_path)
    if len(os.listdir(sys_sum_path)) == 0:
        data_utils.write_system_summaries(args.data_dir, args.corpus_pct)
    assert len(os.listdir(sys_sum_path)) == len(generated_texts)
    if not os.path.exists(mod_sum_path):
        os.mkdir(mod_sum_path)

    # Write summaries into files
    for idx, sample in enumerate(generated_texts):
        filepath = os.path.join(mod_sum_path,
                                f'summary.{str(idx+1).zfill(4)}.txt')
        with open(filepath, 'w', encoding='utf-8') as file:
            file.write(sample)

    r = Rouge155()
    r.system_dir = sys_sum_path
    r.model_dir = mod_sum_path
    r.system_filename_pattern = 'summary.(\d+).txt'
    r.model_filename_pattern = 'summary.#ID#.txt'

    output = r.convert_and_evaluate()
    logger.info(output)

    return r.output_to_dict(output)
예제 #13
0
    def saliency(self, reference=None, system=None, alpha=0.5):
        self.r = Rouge155()

        self.r.model_dir = 'model_summaries'
        self.r.system_dir = 'system_summaries'
        self.r.system_filename_pattern = 'text.(\d+).txt'
        self.r.model_filename_pattern = 'text.[A-Z].#ID#.txt'

        open('model_summaries/text.A.001.txt', 'w').close()
        open('system_summaries/text.001.txt', 'w').close()
        if reference is not None:
            np.savetxt("model_summaries/text.A.001.txt",
                       reference,
                       newline="\n",
                       fmt="%s")

        if system is not None:
            np.savetxt("system_summaries/text.001.txt",
                       system,
                       newline="\n",
                       fmt="%s")

        output = self.r.convert_and_evaluate()
        output = self.r.output_to_dict(output)
        R1 = output['rouge_1_f_score']
        R2 = output['rouge_2_f_score']

        return alpha * R1 + (1 - alpha) * R2
예제 #14
0
    def _calc_rouge(self, args):
        summ_path = args['summ_path']
        ref_path = args['ref_path']
        eos = args['eos']
        ignore_empty_reference = args['ignore_empty_reference']
        ignore_empty_summary = args['ignore_empty_summary']
        stemming = args['stemming']

        s = settings.Settings()
        s._load()
        with tempfile.TemporaryDirectory() as dirpath:
            sys_root, model_root = [
                os.path.join(dirpath, _) for _ in ["system", "model"]
            ]
            utils.mkdirs([sys_root, model_root])
            ignored = utils.split_files(
                model_path=ref_path,
                system_path=summ_path,
                model_dir=model_root,
                system_dir=sys_root,
                eos=eos,
                ignore_empty_reference=ignore_empty_reference,
                ignore_empty_summary=ignore_empty_summary)
            r = Rouge155(rouge_dir=os.path.dirname(s.data['ROUGE_path']),
                         log_level=logging.ERROR,
                         stemming=stemming)
            r.system_dir = sys_root
            r.model_dir = model_root
            r.system_filename_pattern = r's.(\d+).txt'
            r.model_filename_pattern = 'm.[A-Z].#ID#.txt'
            data_arg = "-e %s" % s.data['ROUGE_data']
            rouge_args_str = "%s %s" % (data_arg, self.rouge_args)
            output = r.convert_and_evaluate(rouge_args=rouge_args_str)
            res = self._get_info(output)
        return res
예제 #15
0
def compute_rouge(model_name,
                  n_iter=None,
                  diversity_param_tuple=None,
                  cos_threshold=None,
                  extra=None):
    rouge_args = '-a -l 250 -n 2 -m -2 4 -u -c 95 -r 1000 -f A -p 0.5 -t 0 -d -e {} -x'.format(
        path_parser.rouge_dir)

    r = Rouge155(rouge_args=rouge_args)

    baselines_wo_config = ['lead', 'lead-2006', 'lead-2007', 'lead_2007']
    if model_name in baselines_wo_config or model_name.startswith('duc'):
        text_dp = join(path_parser.summary_text, model_name)
    else:
        text_dp = tools.get_text_dp(
            model_name,
            cos_threshold=cos_threshold,
            n_iter=n_iter,
            diversity_param_tuple=diversity_param_tuple,
            extra=extra)

    r.system_dir = text_dp
    r.model_dir = join(path_parser.data_summary_targets, config.test_year)
    gen_sys_file_pat = '(\w*)'
    gen_model_file_pat = '#ID#_[\d]'

    r.system_filename_pattern = gen_sys_file_pat
    r.model_filename_pattern = gen_model_file_pat

    output = r.convert_and_evaluate()
    output = proc_output(output)
    logger.info(output)
    return output
예제 #16
0
def eval_rouge(results, golds):
    assert len(golds) == len(results)
    clear_dir('gold_summaries')
    clear_dir('result_summaries')
    r = Rouge155(ROUGE_PATH)
    r.system_dir = 'result_summaries'
    r.model_dir = 'gold_summaries'
    r.system_filename_pattern = 'result.(\\d+).txt'
    r.model_filename_pattern = 'gold.[A-Z].#ID#.txt'
    for i in range(len(golds)):
        output_gold = open('gold_summaries/gold.A.%d.txt' % i, 'w')
        output_result = open('result_summaries/result.%d.txt' % i, 'w')
        if isinstance(golds[i], list):
            output_gold.write('\n'.join(golds[i]))  # 文件中一行一个句子
        else:
            output_gold.write(golds[i])
        if isinstance(results[i], list):
            output_result.write('\n'.join(results[i]))
        else:
            output_result.write(results[i])

        output_gold.close()
        output_result.close()

    output = r.convert_and_evaluate(
        rouge_args='-e {}/data -n 2 -w 1.2 -a'.format(ROUGE_PATH))
    print(output)
    return r.output_to_dict(output)
예제 #17
0
def eval_model(valid_x, valid_y, vocab, model):
    # if we put the following part outside the code,
    # error occurs
    r = Rouge155()
    r.system_dir = 'tmp/systems'
    r.model_dir = 'tmp/models'
    r.system_filename_pattern = "(\d+).txt"
    r.model_filename_pattern = "[A-Z].#ID#.txt"

    logging.info('Evaluating on a minibatch...')
    model.eval()
    _, x = valid_x.next_batch()
    with torch.no_grad():
        pred = greedy(model, x, vocab)
    _, y = valid_y.next_batch()
    y = y[:, 1:].tolist()
    print_summaries(pred, vocab, 'tmp/systems', '%d.txt')
    print_summaries(y, vocab, 'tmp/models', 'A.%d.txt')

    try:
        output = r.convert_and_evaluate()
        output_dict = r.output_to_dict(output)
        logging.info(
            'Rouge1-F: %f, Rouge2-F: %f, RougeL-F: %f' %
            (output_dict['rouge_1_f_score'], output_dict['rouge_2_f_score'],
             output_dict['rouge_l_f_score']))
    except Exception as e:
        logging.info('Failed to evaluate')

    model.train()
예제 #18
0
def main(_):
    rouge = Rouge155()
    rouge.log.setLevel(logging.ERROR)
    rouge.system_filename_pattern = "rouge.(\\d+).txt"
    rouge.model_filename_pattern = "rouge.[A-Z].#ID#.txt"

    tf.logging.set_verbosity(tf.logging.INFO)

    tmpdir = mkdtemp()
    tf.logging.info("tmpdir: %s" % tmpdir)
    # system = decodes/predictions
    system_dir = os.path.join(tmpdir, "system")
    # model = targets/gold
    model_dir = os.path.join(tmpdir, "model")
    os.mkdir(system_dir)
    os.mkdir(model_dir)

    rouge.system_dir = system_dir
    rouge.model_dir = model_dir

    prep_data(rouge.system_dir, rouge.model_dir)

    rouge_scores = rouge.convert_and_evaluate()
    rouge_scores = rouge.output_to_dict(rouge_scores)
    for prefix in ["rouge_1", "rouge_2", "rouge_l"]:
        for suffix in ["f_score", "precision", "recall"]:
            key = "_".join([prefix, suffix])
            tf.logging.info("%s: %.4f" % (key, rouge_scores[key]))

    # clean up after pyrouge
    shutil.rmtree(tmpdir)
    shutil.rmtree(rouge._config_dir)  # pylint: disable=protected-access
    shutil.rmtree(os.path.split(rouge._system_dir)[0])  # pylint: disable=protected-access
예제 #19
0
def _rouge(system_dir, gold_dir):
    # Run rouge
    r = Rouge155()
    r.system_dir = system_dir
    r.model_dir = gold_dir
    r.system_filename_pattern = '([a-zA-Z0-9]*).model'
    r.model_filename_pattern = '#ID#.gold'
    output = r.convert_and_evaluate(
        rouge_args=
        "-e /address/to/rouge/data/directory/rouge/data -a -c 95 -m -n 4 -w 1.2"
    )
    # print output
    output_dict = r.output_to_dict(output)
    # print output_dict

    # avg_rscore = 0
    # if FLAGS.rouge_reward_fscore:
    #     avg_rscore = (output_dict["rouge_1_f_score"]+output_dict["rouge_2_f_score"]+
    #                   output_dict["rouge_3_f_score"]+output_dict["rouge_4_f_score"]+
    #                   output_dict["rouge_l_f_score"])/5.0
    # else:
    #     avg_rscore = (output_dict["rouge_1_recall"]+output_dict["rouge_2_recall"]+
    #                   output_dict["rouge_3_recall"]+output_dict["rouge_4_recall"]+
    #                   output_dict["rouge_l_recall"])/5.0

    avg_rscore = (output_dict["rouge_1_f_score"] +
                  output_dict["rouge_2_f_score"] +
                  output_dict["rouge_l_f_score"]) / 3.0

    return avg_rscore
예제 #20
0
    def test_paths(self):
        rouge = Rouge155()

        def get_home_from_settings():
            with open(rouge.settings_file) as f:
                for line in f.readlines():
                    if line.startswith("home_dir"):
                        rouge_home = line.split("=")[1].strip()
            return rouge_home

        self.assertEqual(rouge.home_dir, get_home_from_settings())
        self.assertTrue(os.path.exists(rouge.bin_path))
        self.assertTrue(os.path.exists(rouge.data_dir))

        wrong_path = "/nonexisting/path/rewafafkljaerearjafankwe3"
        with self.assertRaises(Exception) as context:
            rouge.system_dir = wrong_path
        self.assertEqual(
            str(context.exception),
            "Cannot set {} directory because the path {} does not "
            "exist.".format("system", wrong_path))
        right_path = add_data_path("systems")
        rouge.system_dir = right_path
        self.assertEqual(rouge.system_dir, right_path)

        with self.assertRaises(Exception) as context:
            rouge.model_dir = wrong_path
        self.assertEqual(
            str(context.exception),
            "Cannot set {} directory because the path {} does not "
            "exist.".format("model", wrong_path))
        right_path = add_data_path("models")
        rouge.model_dir = right_path
        self.assertEqual(rouge.model_dir, right_path)
예제 #21
0
def calculate_rouge(system_dir,
                    model_dir,
                    system_pattern='item#ID#.txt',
                    model_pattern='item#ID.txt'):
    #system_pattern='item.(\d+).txt', model_pattern='item.A.#ID#.txt'):
    """
    Calculates ROUGE
    
    Arguments:
        system_dir {string} -- folder path for generated outputs
        model_dir {[type]} -- forlder path for reference or gold outputs
    
    Keyword Arguments:
        system_pattern {str} -- filename pattern for generated outputs (default: {'item.(\d+).txt'})
        model_pattern {str} -- filename pattern for reference or gold outputs (default: {'item.A.#ID#.txt'})
    
    Returns:
        dict -- dictionary with ROUGE scores per file
    """

    r = Rouge155()
    print("miri1")
    r.system_dir = system_dir
    print("miri2")
    r.model_dir = model_dir
    r.system_filename_pattern = system_pattern
    r.model_filename_pattern = model_pattern
    print("miri3")
    IGNORE_FILES = ['.ipynb_checkpoints']
    output = r.convert_and_evaluate()
    print("miri51")
    output_dict = r.output_to_dict(output)
    print("miri6")
    return output_dict
예제 #22
0
def compute_rouge(sentences, targets, params):
    target_path = params.output + "/target"
    senten_path = params.output + "/senten"
    if not os.path.exists(target_path):
        os.mkdir(target_path)
    if not os.path.exists(senten_path):
        os.mkdir(senten_path)
    count = 1
    for sent, tgt in zip(sentences, targets):
        with open(target_path+"/text.A."+str(count)+".txt", mode='w', encoding='utf-8') as f:
            with open(senten_path+"/text."+str(count)+".txt", mode='w', encoding='utf-8') as g:
                count += 1
                for s in sent:
                    g.write(str(s)+" ")
                for t in tgt:
                    f.write(str(t)+" ")

    r = Rouge155()
    r.system_dir = senten_path
    r.model_dir = target_path
    r.system_filename_pattern = 'text.(\d+).txt'
    r.model_filename_pattern = 'text.[A-Z].#ID#.txt'

    output = r.convert_and_evaluate()
    print(output)
    return
예제 #23
0
def evaluate_summary(model_directory, system_directory):
    tempdir = create_temporary_directories()

    rouge_instance = Rouge155(ROUGE_PATH,
                              verbose=False,
                              rouge_args=' '.join(ROUGE_OPTIONS))

    # Converts the gold references files to rouge format.
    model_input_dir = model_directory
    model_output_dir = os.path.join(tempdir, MODEL_DIR)
    rouge_instance.convert_summaries_to_rouge_format(model_input_dir,
                                                     model_output_dir)

    # Converts the summary file to rouge format.
    system_output_dir = os.path.join(tempdir, SYSTEM_DIR)
    rouge_instance.convert_summaries_to_rouge_format(system_directory,
                                                     system_output_dir)

    # Writes the configuration file.
    config_filename = os.path.join(tempdir, CONFIG_FILENAME)
    rouge_instance.write_config_static(system_output_dir,
                                       SYSTEM_SUMMARIES_PATTERN,
                                       model_output_dir,
                                       MODEL_SUMMARIES_PATTERN,
                                       config_filename, 1)

    # Runs ROUGE comparing the gold reference summaries with the recently generated.
    output = rouge_instance.evaluate_static(ROUGE_PATH, config_filename,
                                            ROUGE_OPTIONS)

    # Removes the temporal directories.
    rmtree(tempdir)

    return rouge_instance.output_to_dict(output)
def evaluate_rouge(hypothesis_dir, gold_dir):
    r = Rouge155()
    r.system_dir = hypothesis_dir
    r.model_dir = gold_dir
    r.system_filename_pattern = '(\d+)_hypothesis.txt'
    r.model_filename_pattern = '#ID#_reference.txt'
    output = r.convert_and_evaluate()
    return output, r.output_to_dict(output)
예제 #25
0
 def evaluate(self):
     self.rouge = Rouge155()
     self.rouge.system_dir = self._system_dir
     self.rouge.model_dir = self._model_dir
     self.rouge.system_filename_pattern = 'system.(\d+).txt'
     self.rouge.model_filename_pattern = 'model.#ID#.txt'
     output = self.rouge.convert_and_evaluate()
     return output
예제 #26
0
def rouge_eval(decode_path, ref_path):
    r = Rouge155()
    r.model_dir = decode_path
    r.system_dir = ref_path
    r.system_filename_pattern = '(\d+)_reference.txt'
    r.model_filename_pattern = '#ID#_decoded.txt'
    rouge_results = r.convert_and_evaluate()
    return r.output_to_dict(rouge_results)
예제 #27
0
def evaluate_rouge(summaries, references, remove_temp=True, rouge_args=[]):
    '''
    Args:
        summaries: [[sentence]]. Each summary is a list of strings (sentences)
        references: [[[sentence]]]. Each reference is a list of candidate summaries.
        remove_temp: bool. Whether to remove the temporary files created during evaluation.
        rouge_args: [string]. A list of arguments to pass to the ROUGE CLI.
    '''
    # summaries: data_num, 3, str
    temp_chars = string.ascii_uppercase + string.digits
    temo_chars_idx = np.random.choice(a=len(temp_chars),
                                      size=10,
                                      replace=True,
                                      p=None)
    temp_dir = ''.join([temp_chars[idx] for idx in temo_chars_idx])
    temp_dir = os.path.join("temp", temp_dir)
    print(temp_dir)
    system_dir = os.path.join(temp_dir, 'system')
    model_dir = os.path.join(temp_dir, 'model')
    # directory for generated summaries
    os.makedirs(system_dir)
    # directory for reference summaries
    os.makedirs(model_dir)
    print(temp_dir, system_dir, model_dir)

    assert len(summaries) == len(references)  # data_num
    for i, (summary, candidates) in enumerate(zip(summaries, references)):
        summary_fn = '%i.txt' % i
        for j, candidate in enumerate(candidates):
            # candidate: list(str) 多个文摘句
            candidate_fn = '%i.%i.txt' % (i, j)
            with open(os.path.join(model_dir, candidate_fn),
                      'w',
                      encoding="utf-8") as f:
                # print(candidate), 参考的abstract
                f.write('\n'.join(candidate))

        with open(os.path.join(system_dir, summary_fn), 'w',
                  encoding="utf-8") as f:
            # 模型生成的
            f.write('\n'.join(summary))  # 生成的3个文摘的句子

    args_str = ' '.join(map(str, rouge_args))
    rouge = Rouge155(rouge_args=args_str)  # 怎么用
    rouge.system_dir = system_dir
    rouge.model_dir = model_dir
    rouge.system_filename_pattern = '(\d+).txt'  # 可以识别system_dir
    rouge.model_filename_pattern = '#ID#.\d+.txt'  # '#ID#' 用于对齐文章

    #rouge_args = '-c 95 -2 -1 -U -r 1000 -n 4 -w 1.2 -a'
    #output = rouge.convert_and_evaluate(rouge_args=rouge_args)
    output = rouge.convert_and_evaluate()
    r = rouge.output_to_dict(output)  # dict

    # remove the created temporary files
    if remove_temp:
        shutil.rmtree(temp_dir)
    return r
예제 #28
0
def RougeTest_pyrouge(ref, hyp, rouge_metric='all'):
    id = 0
    # initialization
    ref_path = './result/ref'
    hyp_path = './result/hyp'
    if not os.path.exists('./result'):
        os.mkdir('./result')
    if not os.path.exists(ref_path):
        os.mkdir(ref_path)
    if not os.path.exists(hyp_path):
        os.mkdir(hyp_path)

    # remove files from previous evaluation
    for f in os.listdir(ref_path):
        os.remove(os.path.join(ref_path, f))
    for f in os.listdir(hyp_path):
        os.remove(os.path.join(hyp_path, f))
    # print(id)
    # write new ref and hyp
    with open('./result/ref/ref.' + str(id) + '.txt', 'w') as f:
        f.write('\n'.join(ref))
    with open('./result/hyp/hyp.' + str(id) + '.txt', 'w') as f:
        f.write('\n'.join(hyp))
    # print("rouge")

    # r= Rouge155()
    r = Rouge155(
        '%s/SciSoft/ROUGE-1.5.5/' % home_path,
        '-e %s/SciSoft/ROUGE-1.5.5/data -c 95 -r 2 -n 2 -m -a' % home_path)
    r.system_dir = './result/hyp'
    r.model_dir = './result/ref'
    r.system_filename_pattern = 'hyp.(\d+).txt'
    r.model_filename_pattern = 'ref.#ID#.txt'

    output = r.convert_and_evaluate()
    # print(output)
    output_dict = r.output_to_dict(output)
    # cleanup
    tmpdir, _ = os.path.split(r.system_dir)
    shutil.rmtree(tmpdir)
    shutil.rmtree(r._config_dir)
    if rouge_metric[1] == 'f':
        return output_dict["rouge_%s_f_score" % rouge_metric[0]]
    elif rouge_metric[1] == 'r':
        return output_dict["rouge_%s_recall" % rouge_metric[0]]
    elif rouge_metric == 'avg_f':
        return (output_dict["rouge_1_f_score"] + output_dict["rouge_2_f_score"]
                + output_dict["rouge_l_f_score"]) / 3
    elif rouge_metric == 'avg_r':
        return (output_dict["rouge_1_recall"] + output_dict["rouge_2_recall"] +
                output_dict["rouge_l_recall"]) / 3
    else:
        return (output_dict["rouge_1_precision"],
                output_dict["rouge_1_recall"], output_dict["rouge_1_f_score"],
                output_dict["rouge_2_precision"],
                output_dict["rouge_2_recall"], output_dict["rouge_2_f_score"],
                output_dict["rouge_l_precision"],
                output_dict["rouge_l_recall"], output_dict["rouge_l_f_score"])
def main():
	args = get_args()
	rouge = Rouge155(args.rouge_home, args.rouge_args)
	rouge.system_filename_pattern = args.system_filename_pattern
	rouge.model_filename_pattern = args.model_filename_pattern
	rouge.system_dir = args.system_dir
	rouge.model_dir = args.model_dir
	output = rouge.convert_and_evaluate(args.system_id, args.split_sents)
	print(output)
예제 #30
0
def calculate_rouge(ref_texts, summary_text):
    vocab_ls = load_vocab('./bert-base-chinese-vocab.txt')
    # need to install pip install git+https://github.com/tagucci/pythonrouge.gi
    ref_texts = {"A": convert_cn(ref_texts, vocab_ls)}
    summary_text = convert_cn(summary_text, vocab_ls)

    rouge = Rouge155()
    score = rouge.score_summary(summary_text, ref_texts)
    return score
예제 #31
0
파일: rouge.py 프로젝트: mirandrom/HipoRank
def evaluate_rouge(summaries: List[List[str]],
                   references: List[List[List[str]]],
                   remove_temp=True,
                   rouge_args=None):
    '''
    Taken from original pacsum repository

    Args:
        summaries: [[sentence]]. Each summary is a list of strings (sentences)
        references: [[[sentence]]]. Each reference is a list of candidate summaries.
        remove_temp: bool. Whether to remove the temporary files created during evaluation.
        rouge_args: [string]. A list of arguments to pass to the ROUGE CLI.
    '''
    temp_dir = ''.join(
        random.choices(string.ascii_uppercase + string.digits, k=10))
    temp_dir = os.path.join("temp", temp_dir)
    print(temp_dir)
    system_dir = os.path.join(temp_dir, 'system')
    model_dir = os.path.join(temp_dir, 'model')
    # directory for generated summaries
    os.makedirs(system_dir)
    # directory for reference summaries
    os.makedirs(model_dir)
    print(temp_dir, system_dir, model_dir)

    assert len(summaries) == len(references)
    for i, (summary, candidates) in enumerate(zip(summaries, references)):
        summary_fn = '%i.txt' % i
        for j, candidate in enumerate(candidates):
            candidate_fn = '%i.%i.txt' % (i, j)
            with open(os.path.join(model_dir, candidate_fn), 'w') as f:
                #print(candidate) f["dataset"][0]
                f.write('\n'.join(candidate))

        with open(os.path.join(system_dir, summary_fn), 'w') as f:
            f.write('\n'.join(summary))

    rouge = Rouge155(rouge_args=rouge_args)
    rouge.system_dir = system_dir
    rouge.model_dir = model_dir
    rouge.system_filename_pattern = '(\d+).txt'
    rouge.model_filename_pattern = '#ID#.\d+.txt'

    #rouge_args = '-c 95 -2 -1 -U -r 1000 -n 4 -w 1.2 -a'
    #output = rouge.convert_and_evaluate(rouge_args=rouge_args)
    output = rouge.convert_and_evaluate()
    output_dir = Path(rouge._model_dir).parent

    r = rouge.output_to_dict(output)
    print(output)
    #print(r)

    # remove the created temporary files
    if remove_temp:
        shutil.rmtree(output_dir)
        shutil.rmtree(temp_dir)
    return r
예제 #32
0
def eval_rouge(dec_pattern, dec_dir, ref_pattern, ref_dir,
               cmd='-c 95 -r 1000 -n 2 -m', system_id=1):
    """ evaluate by original Perl implementation"""
    # silence pyrouge logging
    assert _ROUGE_PATH is not None
    log.get_global_console_logger().setLevel(logging.WARNING)
    with tempfile.TemporaryDirectory() as tmp_dir:
        Rouge155.convert_summaries_to_rouge_format(
            dec_dir, join(tmp_dir, 'dec'))
        Rouge155.convert_summaries_to_rouge_format(
            ref_dir, join(tmp_dir, 'ref'))
        Rouge155.write_config_static(
            join(tmp_dir, 'dec'), dec_pattern,
            join(tmp_dir, 'ref'), ref_pattern,
            join(tmp_dir, 'settings.xml'), system_id
        )
        cmd = (join(_ROUGE_PATH, 'ROUGE-1.5.5.pl')
               + ' -e {} '.format(join(_ROUGE_PATH, 'data'))
               + cmd
               + ' -a {}'.format(join(tmp_dir, 'settings.xml')))
        output = sp.check_output(cmd.split(' '), universal_newlines=True)
    return output
예제 #33
0
#-*- encoding: utf-8 -*-
from pyrouge import Rouge155

r = Rouge155()
#r.system_dir = '/home/chenbjin/SearchJob/DUC2002_Summarization_Documents/system'
#r.model_dir = '/home/chenbjin/SearchJob/DUC2002_Summarization_Documents/model'

r.system_dir = '/home/chenbjin/SearchJob/DUC2002_Summarization_Documents/wordnet.system.summary/'
r.model_dir = '/home/chenbjin/SearchJob/DUC2002_Summarization_Documents/model.summary/'

r.system_filename_pattern = "DUC2002.(\d+).txt"
r.model_filename_pattern = "DUC2002.[A-Z].#ID#.txt"

output = r.convert_and_evaluate()
print(output)
output_dict = r.output_to_dict(output)
'''

system_input_dir = '/home/chenbjin/SearchJob/DUC2002_Summarization_Documents/system.summary'
system_output_dir = '/home/chenbjin/SearchJob/DUC2002_Summarization_Documents/system.output'
Rouge155.convert_summaries_to_rouge_format(system_input_dir, system_output_dir)
'''
def main():
	args = get_args()
	Rouge155.write_config_static(
		args.system_dir, args.system_filename_pattern,
		args.model_dir, args.model_filename_pattern,
		args.config_file_path, args.system_id)