コード例 #1
0
def test_run_experiment_lr_eval_with_cfg():

    # basic evaluation experiment using rsmeval

    source = 'lr-eval-cfg'
    experiment_id = 'lr_eval_cfg'
    config_file = join(rsmtool_test_dir,
                       'data',
                       'experiments',
                       source,
                       '{}.cfg'.format(experiment_id))
    do_run_evaluation(source, experiment_id, config_file)

    output_dir = join('test_outputs', source, 'output')
    expected_output_dir = join(rsmtool_test_dir, 'data', 'experiments', source, 'output')
    html_report = join('test_outputs', source, 'report', '{}_report.html'.format(experiment_id))

    csv_files = glob(join(output_dir, '*.csv'))
    for csv_file in csv_files:
        csv_filename = basename(csv_file)
        expected_csv_file = join(expected_output_dir, csv_filename)

        if exists(expected_csv_file):
            yield check_file_output, csv_file, expected_csv_file

    yield check_report, html_report
コード例 #2
0
def test_run_experiment_lr_eval_with_repeated_ids():

    # rsmeval experiment with non-unique ids
    source = 'lr-eval-with-repeated-ids'
    experiment_id = 'lr_eval_with_repeated_ids'
    config_file = join(rsmtool_test_dir, 'data', 'experiments', source,
                       '{}.json'.format(experiment_id))
    do_run_evaluation(source, experiment_id, config_file)
コード例 #3
0
def test_run_experiment_lr_eval_wrong_path():

    # basic rsmeval experiment with wrong path to the
    # predictions file

    source = 'lr-eval-with-wrong-path'
    experiment_id = 'lr_eval_with_h2'
    config_file = join(rsmtool_test_dir, 'data', 'experiments', source,
                       '{}.json'.format(experiment_id))
    do_run_evaluation(source, experiment_id, config_file)
コード例 #4
0
def test_run_experiment_eval_lr_with_missing_candidate_column():

    # rsmeval experiment with `candidate_column`
    # set to a column that does not exist in the given
    # predictions file
    source = 'lr-eval-with-missing-candidate-column'
    experiment_id = 'lr_eval_with_missing_candidate_column'
    config_file = join(rsmtool_test_dir, 'data', 'experiments', source,
                       '{}.json'.format(experiment_id))
    do_run_evaluation(source, experiment_id, config_file)
コード例 #5
0
def test_run_experiment_lr_eval_same_system_human_score():

    # rsmeval experiment with the same value supplied
    # for both human score ans system score

    source = 'lr-eval-same-system-human-score'
    experiment_id = 'lr_eval_same_system_human_score'
    config_file = join(rsmtool_test_dir, 'data', 'experiments', source,
                       '{}.json'.format(experiment_id))
    do_run_evaluation(source, experiment_id, config_file)
コード例 #6
0
def test_run_experiment_lr_eval_all_non_numeric_machine_scores():

    # rsmeval experiment with all the machine scores`
    # being non-numeric and all getting filtered out
    # which should raise an exception

    source = 'lr-eval-with-all-non-numeric-machine-scores'
    experiment_id = 'lr_eval_all_non_numeric_machine_scores'
    config_file = join(rsmtool_test_dir, 'data', 'experiments', source,
                       '{}.json'.format(experiment_id))
    do_run_evaluation(source, experiment_id, config_file)
コード例 #7
0
def test_run_experiment_lr_eval_with_repeated_ids():

    # rsmeval experiment with non-unique ids
    source = 'lr-eval-with-repeated-ids'
    experiment_id = 'lr_eval_with_repeated_ids'
    config_file = join(rsmtool_test_dir,
                       'data',
                       'experiments',
                       source,
                       '{}.json'.format(experiment_id))
    do_run_evaluation(source, experiment_id, config_file)
コード例 #8
0
def test_run_experiment_lr_eval_wrong_path():

    # basic rsmeval experiment with wrong path to the
    # predictions file

    source = 'lr-eval-with-wrong-path'
    experiment_id = 'lr_eval_with_h2'
    config_file = join(rsmtool_test_dir,
                       'data',
                       'experiments',
                       source,
                       '{}.json'.format(experiment_id))
    do_run_evaluation(source, experiment_id, config_file)
コード例 #9
0
def test_run_experiment_eval_lr_with_missing_candidate_column():

    # rsmeval experiment with `candidate_column`
    # set to a column that does not exist in the given
    # predictions file
    source = 'lr-eval-with-missing-candidate-column'
    experiment_id = 'lr_eval_with_missing_candidate_column'
    config_file = join(rsmtool_test_dir,
                       'data',
                       'experiments',
                       source,
                       '{}.json'.format(experiment_id))
    do_run_evaluation(source, experiment_id, config_file)
コード例 #10
0
def test_run_experiment_lr_eval_same_system_human_score():

    # rsmeval experiment with the same value supplied
    # for both human score ans system score

    source = 'lr-eval-same-system-human-score'
    experiment_id = 'lr_eval_same_system_human_score'
    config_file = join(rsmtool_test_dir,
                       'data',
                       'experiments',
                       source,
                       '{}.json'.format(experiment_id))
    do_run_evaluation(source, experiment_id, config_file)
コード例 #11
0
def test_run_experiment_lr_eval_all_non_numeric_scores():

    # rsmeval experiment with all values for the human
    # score being non-numeric and all getting filtered out
    # which should raise an exception

    source = 'lr-eval-with-all-non-numeric-scores'
    experiment_id = 'lr_eval_all_non_numeric_scores'
    config_file = join(rsmtool_test_dir,
                       'data',
                       'experiments',
                       source,
                       '{}.json'.format(experiment_id))
    do_run_evaluation(source, experiment_id, config_file)