Example #1
0
def dump_example(subtask, split):
    test_data = prepare_data(subtask, split)
    pred = extract_gt(test_data)
    subdir = get_subdir(subtask)
    json.dump(pred,
              open(os.path.join('example', subdir, 'submission1.json'), 'w'),
              ensure_ascii=False,
              indent=4)
    import random
    for dialog_id, states in pred.items():
        for state in states:
            for domain in state.values():
                for slot, value in domain.items():
                    if value:
                        if random.randint(0, 2) == 0:
                            domain[slot] = ""
                    else:
                        if random.randint(0, 4) == 0:
                            domain[slot] = "2333"
    json.dump(pred,
              open(os.path.join('example', subdir, 'submission2.json'), 'w'),
              ensure_ascii=False,
              indent=4)
    for dialog_id, states in pred.items():
        for state in states:
            for domain in state.values():
                for slot in domain:
                    domain[slot] = ""
    json.dump(pred,
              open(os.path.join('example', subdir, 'submission3.json'), 'w'),
              ensure_ascii=False,
              indent=4)
Example #2
0
def dump_example(subtask, split):
    test_data = prepare_data(subtask, split)
    gt = extract_gt(test_data)
    json.dump(gt, open(os.path.join('example', get_subdir(subtask), 'submission1.json'), 'w'), ensure_ascii=False, indent=4)
    for dialog_id, states in gt.items():
        for state in states:
            for domain in state.values():
                for slot in domain:
                    domain[slot] = ""
    json.dump(gt, open(os.path.join('example', get_subdir(subtask), 'submission2.json'), 'w'), ensure_ascii=False, indent=4)
Example #3
0
def eval_team(team, correct_name_label):
    for subtask in ['multiwoz', 'crosswoz']:
        test_data = prepare_data(subtask,
                                 'dstc9',
                                 correct_name_label=correct_name_label)
        gt = extract_gt(test_data)
        for i in range(1, 6):
            model_dir = os.path.join(team, f'{subtask}-dst', f'submission{i}')
            if not os.path.exists(model_dir):
                continue
            print(model_dir)
            evaluate(model_dir, subtask, test_data, gt)
Example #4
0
"""
    evaluate output file
"""

from argparse import ArgumentParser
import os

from convlab2.dst.dstc9.utils import prepare_data, extract_gt
from convlab2.dst.dstc9.eval_file import evaluate

if __name__ == '__main__':
    parser = ArgumentParser()
    parser.add_argument('--correct_name_label', action='store_true')
    args = parser.parse_args()
    for subtask in ['multiwoz', 'crosswoz']:
        test_data = prepare_data(subtask,
                                 'dstc9-250',
                                 correct_name_label=args.correct_name_label)
        gt = extract_gt(test_data)
        for team in os.listdir('.'):
            for i in range(1, 6):
                model_dir = os.path.join(team, f'{subtask}-dst',
                                         f'submission{i}')
                if not os.path.exists(model_dir):
                    continue
                print(model_dir)
                evaluate(model_dir, subtask, gt)