Exemple #1
0
                           summaries,
                           strict_para=self.rl_strict,
                           train_round=self.train_episode)
        summary = rl_agent(rewards)
        return summary


if __name__ == '__main__':
    # read source documents
    reader = CorpusReader('data/topic_1')
    source_docs = reader()

    # generate summaries, with summary max length 100 tokens
    rl_summarizer = RLSummarizer()
    summary = rl_summarizer.summarize(source_docs, summ_max_len=100)
    print('\n=====Generated Summary=====')
    print(summary)

    # (Optional) Evaluate the quality of the summary using ROUGE metrics
    if os.path.isdir('./rouge/ROUGE-RELEASE-1.5.5'):
        refs = reader.readReferences(
        )  # make sure you have put the references in data/topic_1/references
        avg_rouge_score = {}
        for ref in refs:
            rouge_scores = evaluate_summary_rouge(summary, ref)
            add_result(avg_rouge_score, rouge_scores)
        print('\n=====ROUGE scores against {} references====='.format(
            len(refs)))
        for metric in avg_rouge_score:
            print('{}:\t{}'.format(metric, np.mean(rouge_scores[metric])))
Exemple #2
0
if __name__ == '__main__':
    pseudo_ref = 'top15'  # pseudo-ref strategy

    # read source documents
    reader = CorpusReader('data/topic_1')
    source_docs = reader()
    summaries = reader.readSummaries()

    # get unsupervised metrics for the summaries
    supert = Supert(source_docs)
    scores = supert(summaries)
    print('unsupervised metrics\n', scores)

    # (Optional) compare the summaries against golden refs using ROUGE
    if os.path.isdir('./rouge/ROUGE-RELEASE-1.5.5'):
        refs = reader.readReferences(
        )  # make sure you have put the references in data/topic_1/references
        summ_rouge_scores = []
        for summ in summaries:
            rouge_scores = {}
            for ref in refs:
                rs = evaluate_summary_rouge(summ, ref)
                add_result(rouge_scores, rs)
            summ_rouge_scores.append(rouge_scores)

        mm = 'ROUGE-1'
        rouge_scores = []
        for rs in summ_rouge_scores:
            rouge_scores.append(np.mean(rs[mm]))
        print('reference-based', mm, '\n', rouge_scores)