예제 #1
0
from utils_data import create_calibrated_df, prepare_dataset_for_high_vs_middle_prediction
from utils import plot_calibration, check_calibration, evaluate_level_prediction_high_vs_middle
import numpy as np
from utils_constants import CORRECTNESS

random_state = 42
split = 'test'

# BERT
df = create_calibrated_df([
    'output_bert_seed0_%s.csv' % split,
    'output_bert_seed3_%s.csv' % split,
    'output_bert_seed42_%s.csv' % split,
])
print('BERT: QA TEST ACCURACY = %.4f' % float(np.mean(df[CORRECTNESS])))
output_filename = 'output/bert_ensemble_%s.txt' % split
output_file = open(output_filename, "w")
plot_calibration(df,
                 0.1,
                 image_name='output_figures/bert_ensemble_%s.pdf' % split)
check_calibration(df, 0.1, output_file=output_file)
df = prepare_dataset_for_high_vs_middle_prediction(df,
                                                   output_file=output_file,
                                                   random_state=random_state)
evaluate_level_prediction_high_vs_middle(df, output_file=output_file)
output_file.close()

# XLNet
df = create_calibrated_df([
    'output_xlnet_seed_2_%s.csv' % split,
    'output_xlnet_seed_3_%s.csv' % split,
예제 #2
0
import pandas as pd

from utils_data import create_calibrated_df
from utils_mturk import get_list_id_within_doc, prepare_df_for_evaluation, perform_evaluation

# data preparation
df_results_mturk = pd.read_csv('data/pairwise_race_cs.csv')

# Single models
for random_seed in [0, 3, 42]:
    df_predictions = create_calibrated_df(
        ['output_bert_seed%d_test.csv' % random_seed])
    list_id_within_doc = get_list_id_within_doc(df_predictions)
    df_predictions['id'] = list_id_within_doc
    df_for_evaluation = prepare_df_for_evaluation(df_results_mturk,
                                                  df_predictions)
    output_filename = 'output/pairwise_race_cs_bert_%d_test.txt' % random_seed
    output_file = open(output_filename, "w")
    perform_evaluation(df_for_evaluation, output_file=output_file)
    output_file.close()

# ensemble
df_predictions = create_calibrated_df([
    'output_bert_seed0_test.csv', 'output_bert_seed3_test.csv',
    'output_bert_seed42_test.csv'
])
list_id_within_doc = get_list_id_within_doc(df_predictions)
df_predictions['id'] = list_id_within_doc
df_for_evaluation = prepare_df_for_evaluation(df_results_mturk, df_predictions)
output_filename = 'output/pairwise_race_cs_bert_ensemble_test.txt'
output_file = open(output_filename, "w")
from utils_data import create_calibrated_df, prepare_dataset_for_high_vs_middle_prediction
from utils import plot_calibration, check_calibration, evaluate_level_prediction_high_vs_middle
from utils_constants import CORRECTNESS
import numpy as np

random_state = 42
split = 'test'

for random_seed in [1, 2, 3, 4, 5]:
    df = create_calibrated_df(
        ['output_xlnet_seed_%d_%s.csv' % (random_seed, split)])
    print('XLNET %d: QA TEST ACCURACY = %.4f' %
          (random_seed, float(np.mean(df[CORRECTNESS]))))
    output_filename = 'output/xlnet_%d_test.txt' % random_seed
    output_file = open(output_filename, "w")
    plot_calibration(df,
                     0.1,
                     image_name='output_figures/xlnet_%d_test.pdf' %
                     random_seed)
    check_calibration(df, 0.1, output_file=output_file)
    df = prepare_dataset_for_high_vs_middle_prediction(
        df, output_file=output_file, random_state=random_state)
    evaluate_level_prediction_high_vs_middle(df, output_file=output_file)
    output_file.close()

for random_seed in [0, 1, 2, 3, 42]:
    df = create_calibrated_df(
        ['output_distilbert_seed%d_%s.csv' % (random_seed, split)])
    print('DistilBERT %d: QA TEST ACCURACY = %.4f' %
          (random_seed, float(np.mean(df[CORRECTNESS]))))
    output_filename = 'output/distilbert%d_%s.txt' % (random_seed, split)
import pandas as pd

from utils_data import create_calibrated_df
from utils_mturk import get_list_id_within_doc, prepare_df_for_evaluation, perform_evaluation

# data preparation
df_results_mturk = pd.read_csv('data/pairwise_race_cs.csv')

df_predictions = create_calibrated_df([
    'output_bert_seed0_test.csv',
    'output_bert_seed3_test.csv',
    'output_bert_seed42_test.csv',
    'output_distilbert_seed1_test.csv',
    'output_distilbert_seed3_test.csv',
    'output_distilbert_seed42_test.csv',
])
list_id_within_doc = get_list_id_within_doc(df_predictions)
df_predictions['id'] = list_id_within_doc
df_for_evaluation = prepare_df_for_evaluation(df_results_mturk, df_predictions)
output_filename = 'output/pairwise_race_cs_bert_distilbert_ensemble_test.txt'
output_file = open(output_filename, "w")
perform_evaluation(df_for_evaluation, output_file=output_file)
output_file.close()

df_predictions = create_calibrated_df([
    'output_bert_seed0_test.csv', 'output_bert_seed3_test.csv',
    'output_bert_seed42_test.csv', 'output_xlnet_seed_2_test.csv',
    'output_xlnet_seed_3_test.csv', 'output_xlnet_seed_4_test.csv'
])
list_id_within_doc = get_list_id_within_doc(df_predictions)
df_predictions['id'] = list_id_within_doc
예제 #5
0
import matplotlib.pyplot as plt
import numpy as np

from utils_constants import MAX_SCORE, CORRECTNESS
from utils_data import create_calibrated_df

random_seed = 42
split = 'test'
# image_name = 'output_figures/rel_diagram.pdf'
image_name = None
bin_size = 0.1

df = create_calibrated_df(['output_bert_seed%d_%s.csv' % (random_seed, split)])

# single model
list_correctness = []
list_counts = []
list_score_ranges = []
for idx in np.arange(0, 1.0 / bin_size):
    score_range = (bin_size * idx, bin_size * (idx + 1))
    list_score_ranges.append(score_range)
    tmp_df = df[(df[MAX_SCORE] >= score_range[0])
                & (df[MAX_SCORE] < score_range[1])]
    len_tmp_df = len(tmp_df)
    list_counts.append(len_tmp_df)
    if len_tmp_df > 0:
        list_correctness.append(np.mean(tmp_df[CORRECTNESS]))
    else:
        list_correctness.append(0)
num_bins = len(list_correctness)
import pandas as pd

from utils_data import create_calibrated_df
from utils_mturk import get_list_id_within_doc, prepare_df_for_evaluation, perform_evaluation

# data preparation
df_results_mturk = pd.read_csv('data/pairwise_race_cs.csv')

df_predictions = create_calibrated_df([
    'output_xlnet_seed_2_test.csv', 'output_xlnet_seed_3_test.csv',
    'output_xlnet_seed_4_test.csv'
])
list_id_within_doc = get_list_id_within_doc(df_predictions)
df_predictions['id'] = list_id_within_doc
df_for_evaluation = prepare_df_for_evaluation(df_results_mturk, df_predictions)
output_filename = 'output/pairwise_race_cs_xlnet_ensemble_test.txt'
output_file = open(output_filename, "w")
perform_evaluation(df_for_evaluation, output_file=output_file)
output_file.close()

# Single models
for random_seed in [2, 3, 4]:
    df_predictions = create_calibrated_df(
        ['output_xlnet_seed_%d_test.csv' % random_seed])
    list_id_within_doc = get_list_id_within_doc(df_predictions)
    df_predictions['id'] = list_id_within_doc
    df_for_evaluation = prepare_df_for_evaluation(df_results_mturk,
                                                  df_predictions)
    output_filename = 'output/pairwise_race_cs_xlnet_%d_test.txt' % random_seed
    output_file = open(output_filename, "w")
    perform_evaluation(df_for_evaluation, output_file=output_file)