def __init__(self, ref_dir, pred_dir, maxlen=100, log=sys.stderr): self.name == 'rouge' self.config_file = evaluate_duc.create_config(ref_dir, pred_dir) self.pred_dir = pred_dir self.ref_dir = ref_dir self.maxlen = maxlen self.output = log
options, task = parse_options() ## create SummaryProblem instances setup_start_time = time.time() if options.task == 'u08': framework.setup_TAC08(task) else: framework.setup_DUC_basic(task, skip_updates=False) ## only run the parser if compression is required (this is not known by the pickle stuff) parser = None if options.compress: parser = berkeleyparser.CommandLineParser(BERKELEY_PARSER_CMD) framework.setup_DUC_sentences(task, parser, reload=options.reload) setup_time = time.time() - setup_start_time ## go! run_standard(options) sys.stderr.write('Setup time [%1.2fs]\n' %setup_time) ## evaluate if not options.manpath: sys.stderr.write('no manual path specified, skipping evaluation\n') else: import evaluate_duc sys.stderr.write('\nROUGE results\n') config_file = evaluate_duc.create_config(task.manual_path, options.output) evaluate_duc.run_rouge(ROUGE_SCORER, config_file, options.length, verbose=False) os.remove(config_file)
type='float', help='fudge factor', default=1.0) parser.add_option('--shortened', dest='shortened', type='str', help='location of .sent files for shortening experiment') (options, args) = get_options(parser) if options.experiment != 'eval': os.popen('mkdir -p %s' % options.outpath + '/summary/') ids = get_topic_ids(options.inputpath) for id in ids: if '-C' in id: continue make_summary_ie(options.inputpath, id, options.outpath, options.outpath + '/summary/', options.length, options) else: import evaluate_duc sys.stderr.write('\nROUGE results\n') config_file = evaluate_duc.create_config(options.manpath, options.outpath + '/summary/', chop_annotator_id=True) ROUGE_SCORER = 'scoring/ROUGE-1.5.5/ROUGE-1.5.5_faster.pl' evaluate_duc.run_rouge(ROUGE_SCORER, config_file, options.length, verbose=True) os.remove(config_file)
os.popen('mkdir -p %s' %options.outpath + '/summary/') ## parameters length = 100 ## run through all topics ids = get_topic_ids(options.inputpath) for id in ids: if '-C' in id: continue make_summary(options.inputpath, id, options.outpath, options.outpath + '/summary/', length, options) ## ROUGE evaluation ROUGE_SCORER = 'scoring/ROUGE-1.5.5/ROUGE-1.5.5_faster.pl' man_source = {} man_source['u09'] = '../09/UpdateSumm09_eval/ROUGE/models/' man_source['u08'] = '../08/tac08_results/ROUGE/models/' man_source['u07'] = '../07/duc07.results.data/results/updateEval/ROUGE/models/' man_source['m07'] = '../07/duc07.results.data/results/mainEval/ROUGE/models/' man_source['m06'] = '../06/results/NIST/NISTeval/ROUGE/models/' man_source['m05'] = '../05/results/NIST/results/ROUGE/models/' #sys.exit() manual_path = man_source[options.task] if options.manpath: manual_path = options.manpath import evaluate_duc sys.stderr.write('\nROUGE results\n') config_file = evaluate_duc.create_config(manual_path, options.outpath + '/summary/', chop_annotator_id=True) evaluate_duc.run_rouge(ROUGE_SCORER, config_file, length, verbose=False) os.remove(config_file)
data_path = "/u/dgillick/workspace/summ/bourbon/tac09_v4/" # data_path = '/u/dgillick/workspace/summ/bourbon/duc07_v3/' ## parameters length = 100 ## run through all topics ids = get_topic_ids(data_path) for id in ids: if "-C" in id: continue make_summary(data_path, id, out_path, summ_path, length) ## ROUGE evaluation ROUGE_SCORER = "/u/favre/work/summarization/tools/ROUGE-1.5.5/ROUGE-1.5.5_faster.pl" man_source = {} man_source["u08"] = "/u/favre/work/summarization/tac08_results/ROUGE/models/" man_source["u07"] = "/u/drspeech/data/DUC/07/duc07.results.data/results/updateEval/ROUGE/models/" man_source["m07"] = "/u/drspeech/data/DUC/07/duc07.results.data/results/mainEval/ROUGE/models/" man_source["m06"] = "/u/drspeech/data/DUC/06/results/NIST/NISTeval/ROUGE/models/" man_source["m05"] = "/u/drspeech/data/DUC/05/results/NIST/results/ROUGE/models/" # sys.exit() manual_path = man_source["u08"] import evaluate_duc sys.stderr.write("\nROUGE results\n") config_file = evaluate_duc.create_config(manual_path, summ_path) evaluate_duc.run_rouge(ROUGE_SCORER, config_file, length, verbose=False) os.remove(config_file)
## parameters length = 100 ## run through all topics ids = get_topic_ids(data_path) for id in ids: if '-C' in id: continue make_summary(data_path, id, out_path, summ_path, length) ## ROUGE evaluation ROUGE_SCORER = '/u/favre/work/summarization/tools/ROUGE-1.5.5/ROUGE-1.5.5_faster.pl' man_source = {} man_source[ 'u08'] = '/u/favre/work/summarization/tac08_results/ROUGE/models/' man_source[ 'u07'] = '/u/drspeech/data/DUC/07/duc07.results.data/results/updateEval/ROUGE/models/' man_source[ 'm07'] = '/u/drspeech/data/DUC/07/duc07.results.data/results/mainEval/ROUGE/models/' man_source[ 'm06'] = '/u/drspeech/data/DUC/06/results/NIST/NISTeval/ROUGE/models/' man_source[ 'm05'] = '/u/drspeech/data/DUC/05/results/NIST/results/ROUGE/models/' #sys.exit() manual_path = man_source['u08'] import evaluate_duc sys.stderr.write('\nROUGE results\n') config_file = evaluate_duc.create_config(manual_path, summ_path) evaluate_duc.run_rouge(ROUGE_SCORER, config_file, length, verbose=False) os.remove(config_file)