def wikicont_cnn(): e = Experiment(hyperparams.HPCNN()) e_config = ExperimentConfig() e_config.num_epoch = 0 e_config.input_name = "WikiContrvCNN" e_config.name = "WikiContrvCNN_sigmoid" e.train_cnn_wiki_contrv(e_config)
def predict_rf(): hp = hyperparams.HPBert() hp.batch_size = 256 e = Experiment(hp) nli_setting = NLI() nli_setting.vocab_size = 30522 nli_setting.vocab_filename = "bert_voca.txt" target_label = 'mismatch' #data_id = 'test_conflict' data_id = "{}_1000".format(target_label) e_config = ExperimentConfig() #del_g = 0.7 #e_config.name = "X_match_del_{}".format(del_g) e_config.name = "NLIEx_AnyA_{}".format(target_label) e_config.load_names = ['bert', 'cls_dense', 'aux_conflict'] data_loader = nli.DataLoader(hp.seq_max, nli_setting.vocab_filename, True) #load_id = ("NLI_bare_A", 'model-195608') #load_id = ("NLIEx_O", 'model-10278') load_id = ("NLIEx_W_mismatch", "model-12030") load_id = ("NLIEx_Y_conflict", "model-12039") load_id = ("NLIEx_X_match", "model-12238") #load_id = ("NLIEx_match_del_{}".format(del_g), "model-4390") load_id = ("NLIEx_CE_{}".format(target_label), "model-12199") load_id = ("NLIEx_AnyA", "model-7255") e.predict_rf(nli_setting, e_config, data_loader, load_id, data_id, 5)
def train_score_merger(): hp = hyperparams.HPMerger_BM25() e = Experiment(hp) e_config = ExperimentConfig() e_config.name = "Merger_{}".format("A") e_config.num_epoch = 4 data_loader = score_loader.DataLoader(hp.seq_max, hp.hidden_units) e.train_score_merger(e_config, data_loader)
def train_score_merger_on_vector(): hp = hyperparams.HPMerger() e = Experiment(hp) e_config = ExperimentConfig() e_config.name = "MergerE_{}".format("E") data_loader = score_loader.NetOutputLoader(hp.seq_max, hp.hidden_units, hp.batch_size) e.train_score_merger(e_config, data_loader)
def lm_tweets_train(): hp = HPTweets() data = tweets.TweetLoader("atheism", hp.seq_max, shared_setting.Tweets2Stance) e_config = ExperimentConfig() e_config.name = "LM_tweets" e_config.num_epoch = 30 e_config.save_interval = 30 * 60 # 30 minutes e = Experiment(hp) e.train_lm_batch(e_config, data)
def lm_guardian_train(): hp = Hyperparams() guardian_data = guardian.GuardianLoader("atheism", hp.seq_max, shared_setting.Guardian2Stance) e_config = ExperimentConfig() e_config.name = "LM_guardian" e_config.num_epoch = 30 e = Experiment(hp) e.train_lm_batch(e_config, guardian_data)
def train_nli_smart_rf(): hp = hyperparams.HPSENLI() hp.compare_deletion_num = 20 e = Experiment(hp) nli_setting = NLI() nli_setting.vocab_size = 30522 nli_setting.vocab_filename = "bert_voca.txt" e_config = ExperimentConfig() #explain_tag = 'mismatch' explain_tag = 'match' #explain_tag = 'mismatch' loss_type = 2 e_config.name = "NLIEx_Hinge_{}".format(explain_tag) e_config.num_epoch = 1 e_config.ex_val = True e_config.save_interval = 30 * 60 # 30 minutes e_config.load_names = ['bert', 'cls_dense'] #, 'aux_conflict'] e_config.save_eval = True e_config.save_name = "LossFn_{}_{}".format(loss_type, explain_tag) data_loader = nli.DataLoader(hp.seq_max, nli_setting.vocab_filename, True) load_id = ("NLI_run_A", 'model-0') print("Loss : ", loss_type) e.train_nli_smart(nli_setting, e_config, data_loader, load_id, explain_tag, loss_type)
def gradient_rte_visulize(): hp = hyperparams.HPBert() e = Experiment(hp) vocab_filename = "bert_voca.txt" load_id = loader.find_model_name("RTE_A") e_config = ExperimentConfig() e_config.name = "RTE_{}".format("visual") e_config.save_interval = 30 * 60 # 30 minutes e_config.load_names = ['bert', 'cls_dense'] data_loader = rte.DataLoader(hp.seq_max, vocab_filename, True) e.rte_visualize(e_config, data_loader, load_id)
def pair_lm(): hp = HPPairTweet() topic = "atheism" setting = shared_setting.TopicTweets2Stance(topic) tweet_group = tweet_reader.load_per_user(topic) data = loader.PairDataLoader(hp.sent_max, setting, tweet_group) e_config = ExperimentConfig() e_config.name = "LM_pair_tweets_{}".format(topic) e_config.num_epoch = 1 e_config.save_interval = 30 * 60 # 30 minutes e = Experiment(hp) e.train_pair_lm(e_config, data)
def train_nil(): hp = hyperparams.HPBert() e = Experiment(hp) nli_setting = NLI() nli_setting.vocab_size = 30522 nli_setting.vocab_filename = "bert_voca.txt" e_config = ExperimentConfig() e_config.name = "NLI_only_{}".format("B") e_config.num_epoch = 2 e_config.save_interval = 30 * 60 # 30 minutes data_loader = nli.DataLoader(hp.seq_max, nli_setting.vocab_filename, True) e.train_nli(nli_setting, e_config, data_loader)
def stance_with_consistency(): hp = HPStanceConsistency() topic = "atheism" e = Experiment(hp) e_config = ExperimentConfig() e_config.name = "stance_consistency_{}".format(topic) setting = shared_setting.TopicTweets2Stance(topic) stance_data = stance_detection.DataLoader(topic, hp.seq_max, setting.vocab_filename) tweet_group = tweet_reader.load_per_user(topic) aux_data = AuxPairLoader(hp.seq_max, setting, tweet_group) voca_size = setting.vocab_size e.train_stance_consistency(voca_size, stance_data, aux_data)
def pred_mnli_anyway(): hp = hyperparams.HPBert() e = Experiment(hp) nli_setting = NLI() nli_setting.vocab_size = 30522 nli_setting.vocab_filename = "bert_voca.txt" e_config = ExperimentConfig() e_config.name = "NLIEx_AnyA" e_config.load_names = ['bert', 'cls_dense', 'aux_conflict'] data_loader = nli.DataLoader(hp.seq_max, nli_setting.vocab_filename, True) target_label = 'mismatch' data_id = "{}_1000".format(target_label) load_id = ("NLIEx_AnyA", 'model-2785') e.predict_rf(nli_setting, e_config, data_loader, load_id, data_id)
def pred_snli_ex(): hp = hyperparams.HPBert() e = Experiment(hp) nli_setting = NLI() nli_setting.vocab_size = 30522 nli_setting.vocab_filename = "bert_voca.txt" e_config = ExperimentConfig() e_config.name = "SNLIEx_B" e_config.load_names = ['bert', 'cls_dense', 'aux_conflict'] data_loader = nli.SNLIDataLoader(hp.seq_max, nli_setting.vocab_filename, True) load_id = ("SNLIEx_B", 'model-10275') e.predict_rf(nli_setting, e_config, data_loader, load_id, "test")
def train_nil_cold(): print('train_nil_cold') hp = hyperparams.HPBert() e = Experiment(hp) nli_setting = NLI() nli_setting.vocab_size = 30522 nli_setting.vocab_filename = "bert_voca.txt" e_config = ExperimentConfig() e_config.name = "NLI_Cold" e_config.num_epoch = 2 e_config.save_interval = 30 * 60 # 30 minutes data_loader = nli.DataLoader(hp.seq_max, nli_setting.vocab_filename, True) saved = e.train_nli_ex_0(nli_setting, e_config, data_loader, None, False) e.test_acc2(nli_setting, e_config, data_loader, saved)
def train_nil(): hp = HP() e = Experiment(hp) nli_setting = NLI() nli_setting.vocab_size = 30522 nli_setting.vocab_filename = "bert_voca.txt" e_config = ExperimentConfig() e_config.name = "NLI_only_{}".format("512") e_config.num_epoch = 2 e_config.save_interval = 30 * 60 # 30 minutes data_loader = nli.DataLoader(hp.seq_max, nli_setting.vocab_filename, True) load_id = ("uncased_L-12_H-768_A-12", 'bert_model.ckpt') e.train_nli_ex_0(nli_setting, e_config, data_loader, load_id, False)
def test_ql(): hp = hyperparams.HPAdhoc() hp.batch_size = 512 e = Experiment(hp) e_config = ExperimentConfig() e_config.name = "Adhoc_{}".format("C") e_config.num_epoch = 4 e_config.load_names = ['bert', 'cls'] vocab_size = 30522 vocab_filename = "bert_voca.txt" data_loader = ws.DataLoader(hp.seq_max, vocab_filename, vocab_size) load_id = ("uncased_L-12_H-768_A-12", 'bert_model.ckpt') e.test_ql(e_config, data_loader, load_id)
def train_nil_on_bert(): hp = hyperparams.HPBert() e = Experiment(hp) nli_setting = NLI() nli_setting.vocab_size = 30522 nli_setting.vocab_filename = "bert_voca.txt" e_config = ExperimentConfig() e_config.name = "NLI_Only_{}".format("C") e_config.num_epoch = 2 e_config.save_interval = 30 * 60 # 30 minutes data_loader = nli.DataLoader(hp.seq_max, nli_setting.vocab_filename, True) #load_id = None load_id = ("uncased_L-12_H-768_A-12", 'bert_model.ckpt') #load_id = ("NLI_bert_w_explain", 'model-91531') #load_id = ("NLI_Only_A", "model-0") e.train_nli_ex_0(nli_setting, e_config, data_loader, load_id, False)
def predict_lime_snli_continue(): hp = hyperparams.HPBert() hp.batch_size = 512 + 256 e = Experiment(hp) nli_setting = NLI() nli_setting.vocab_size = 30522 nli_setting.vocab_filename = "bert_voca.txt" e_config = ExperimentConfig() e_config.name = "SNLI_LIME_{}".format("eval") e_config.load_names = ['bert', 'cls_dense'] data_loader = nli.SNLIDataLoader(hp.seq_max, nli_setting.vocab_filename, True) load_id = ("SNLI_Only_A", 'model-0') e.predict_lime_snli_continue(nli_setting, e_config, data_loader, load_id, "test")
def predict_adhoc_robust_K(): hp = hyperparams.HPAdhoc() hp.batch_size = 512 e = Experiment(hp) e_config = ExperimentConfig() e_config.name = "Adhoc_{}_eval".format("K") e_config.load_names = ['bert', 'dense1', 'dense_reg'] vocab_size = 30522 #payload_path = os.path.join(cpath.data_path, "robust_payload", "payload_B_200.pickle") payload_path = os.path.join(cpath.data_path, "robust_payload", "payload_desc.pickle") task_idx = int(sys.argv[2]) print(task_idx) load_id = ("Adhoc_K", 'model-6397') e.predict_robust(e_config, vocab_size, load_id, payload_path, task_idx)
def run_adhoc_rank_on_robust(): hp = hyperparams.HPAdhoc() hp.batch_size = 512 e = Experiment(hp) e_config = ExperimentConfig() e_config.name = "Adhoc_{}_eval".format("F") e_config.num_epoch = 4 e_config.load_names = ['bert', 'reg_dense'] vocab_size = 30522 vocab_filename = "bert_voca.txt" data_loader = data_sampler.DataLoaderFromFile(hp.batch_size, vocab_size) load_id = ("uncased_L-12_H-768_A-12", 'bert_model.ckpt') load_id = ("Adhoc_E", 'model-58338') e.rank_adhoc(e_config, data_loader, load_id)
def train_test_repeat(load_id, exp_name, n_repeat): hp = hyperparams.HPBert() e_config = ExperimentConfig() e_config.name = "RTE_{}".format("A") e_config.num_epoch = 10 e_config.save_interval = 30 * 60 # 30 minutes e_config.load_names = ['bert'] vocab_filename = "bert_voca.txt" data_loader = rte.DataLoader(hp.seq_max, vocab_filename, True) print(load_id) scores = [] for i in range(n_repeat): e = Experiment(hp) print(exp_name) e_config.name = "rte_{}".format(exp_name) save_path = e.train_rte(e_config, data_loader, load_id) acc = e.eval_rte(e_config, data_loader, save_path) scores.append(acc) print(exp_name) for e in scores: print(e, end="\t") print() r = average(scores) print("Avg\n{0:.03f}".format(r)) return r
def ukp_train_test_repeat(load_id, exp_name, topic, n_repeat): hp = hyperparams.HPBert() e_config = ExperimentConfig() e_config.num_epoch = 2 e_config.save_interval = 100 * 60 # 30 minutes e_config.voca_size = 30522 e_config.load_names = ['bert'] encode_opt = "is_good" print(load_id) scores = [] for i in range(n_repeat): e = Experiment(hp) print(exp_name) e_config.name = "arg_{}_{}_{}".format(exp_name, topic, encode_opt) data_loader = BertDataLoader(topic, True, hp.seq_max, "bert_voca.txt", option=encode_opt) save_path = e.train_ukp(e_config, data_loader, load_id) f1_last = e.eval_ukp(e_config, data_loader, save_path) scores.append(f1_last) print(exp_name) print(encode_opt) for e in scores: print(e, end="\t") print() print("Avg\n{0:.03f}".format(average(scores)))
def eval_ukp_with_nli(exp_name): step_per_epoch = 24544 + 970 hp = hyperparams.HPBert() e_config = ExperimentConfig() e_config.num_steps = step_per_epoch e_config.voca_size = 30522 e_config.num_dev_batches = 30 e_config.load_names = ['bert'] encode_opt = "is_good" num_class_list = [3, 3] f1_list = [] save_path = "/mnt/scratch/youngwookim/Chair/output/model/runs/argmix_AN_B_40000_abortion_is_good/model-21306" for topic in data_generator.argmining.ukp_header.all_topics[:1]: e = Experiment(hp) print(exp_name) e_config.name = "argmix_{}_{}_{}".format(exp_name, topic, encode_opt) arg_data_loader = BertDataLoader(topic, True, hp.seq_max, "bert_voca.txt", option=encode_opt) f1_last = e.eval_ukp_on_shared(e_config, arg_data_loader, num_class_list, save_path) f1_list.append((topic, f1_last)) print(exp_name) print(encode_opt) print(f1_list) for key, score in f1_list: print("{0}\t{1:.03f}".format(key, score))
def ukp_train_test(load_id, exp_name): hp = hyperparams.HPBert() e_config = ExperimentConfig() e_config.num_epoch = 2 e_config.save_interval = 100 * 60 # 30 minutes e_config.voca_size = 30522 e_config.load_names = ['bert'] encode_opt = "is_good" print(load_id) f1_list = [] for topic in data_generator.argmining.ukp_header.all_topics: e = Experiment(hp) print(exp_name) e_config.name = "arg_{}_{}_{}".format(exp_name, topic, encode_opt) data_loader = BertDataLoader(topic, True, hp.seq_max, "bert_voca.txt", option=encode_opt) save_path = e.train_ukp(e_config, data_loader, load_id) print(topic) f1_last = e.eval_ukp(e_config, data_loader, save_path) f1_list.append((topic, f1_last)) print(exp_name) print(encode_opt) print(f1_list) for key, score in f1_list: print("{0}\t{1:.03f}".format(key, score))
def train_snli_ex(): hp = hyperparams.HPBert() hp.compare_deletion_num = 20 e = Experiment(hp) nli_setting = NLI() nli_setting.vocab_size = 30522 nli_setting.vocab_filename = "bert_voca.txt" e_config = ExperimentConfig() e_config.name = "SNLIEx_B" e_config.ex_val = False e_config.num_epoch = 1 e_config.save_interval = 30 * 60 # 30 minutes e_config.load_names = ['bert', 'cls_dense'] #, 'aux_conflict'] #explain_tag = 'match' # 'dontcare' 'match' 'mismatch' #explain_tag = 'mismatch' #explain_tag = 'conflict' data_loader = nli.SNLIDataLoader(hp.seq_max, nli_setting.vocab_filename, True) #load_id = ("NLI_run_nli_warm", "model-97332") #load_id = ("NLIEx_A", "model-16910") #load_id = ("uncased_L-12_H-768_A-12", 'bert_model.ckpt') #load_id = ("NLIEx_D", "model-1964") #load_id = ("NLIEx_D", "model-1317") load_id = ("SNLI_Only_A", 'model-0') e.train_nli_any_way(nli_setting, e_config, data_loader, load_id)
def test_fidelity(): hp = hyperparams.HPBert() e = Experiment(hp) nli_setting = NLI() nli_setting.vocab_size = 30522 nli_setting.vocab_filename = "bert_voca.txt" is_senn = False e_config = ExperimentConfig() e_config.name = "NLIEx_{}".format("Fidelity") e_config.num_epoch = 4 e_config.save_interval = 30 * 60 # 30 minutes if is_senn: e_config.load_names = ['bert', 'cls_dense', 'aux_conflict'] else: e_config.load_names = ['bert', 'cls_dense'] explain_tag = 'conflict' data_loader = nli.DataLoader(hp.seq_max, nli_setting.vocab_filename, True) load_id = ("NLIEx_Y_conflict", 'model-12039') #load_id = ("NLI_Only_C", 'model-0') #e.eval_fidelity(nli_setting, e_config, data_loader, load_id, explain_tag) e.eval_fidelity_gradient(nli_setting, e_config, data_loader, load_id, explain_tag)
def do_test_dev_acc(): hp = hyperparams.HPBert() e = Experiment(hp) nli_setting = NLI() nli_setting.vocab_size = 30522 nli_setting.vocab_filename = "bert_voca.txt" e_config = ExperimentConfig() e_config.name = "NLIEx_Test" e_config.load_names = ['bert', 'cls_dense'] #, 'aux_conflict'] data_loader = nli.DataLoader(hp.seq_max, nli_setting.vocab_filename, True) #load_id = ("NLI_bare_A", 'model-195608') load_id = ("NLIEx_S", 'model-4417') load_id = ("NLIEx_Y_conflict", "model-9636") load_id = ("NLI_Only_C", 'model-0') e.test_acc(nli_setting, e_config, data_loader, load_id)
def pool_adhoc(): hp = hyperparams.HPAdhoc() hp.batch_size = 512 e = Experiment(hp) e_config = ExperimentConfig() e_config.name = "Adhoc_{}_pool".format("L") #e_config.load_names = ['bert', 'reg_dense'] e_config.load_names = ['bert', 'reg_dense', 'aux_q_info'] vocab_size = 30522 task_idx = int(sys.argv[2]) print(task_idx) payload_path = os.path.join(cpath.data_path, "robust", "robust_train_merge", "merger_train_{}.pickle".format(task_idx)) load_id = ("Adhoc_L", 'model-644') e.predict_for_pooling(e_config, vocab_size, load_id, payload_path)
def test_snli(): hp = hyperparams.HPBert() e = Experiment(hp) nli_setting = NLI() nli_setting.vocab_size = 30522 nli_setting.vocab_filename = "bert_voca.txt" e_config = ExperimentConfig() e_config.name = "SNLIEx_Test" e_config.load_names = ['bert', 'cls_dense'] # , 'aux_conflict'] data_loader = nli.SNLIDataLoader(hp.seq_max, nli_setting.vocab_filename, True) todo = [] load_id = ("SNLI_Only_A", 'model-0') todo.append(load_id) todo.append(("SNLI_Only_1", 'model-0')) for load_id in todo: tf.reset_default_graph() e.test_acc(nli_setting, e_config, data_loader, load_id)
def attribution_predict(): hp = hyperparams.HPBert() target_label = 'mismatch' e = Experiment(hp) hp.batch_size = 512 nli_setting = NLI() nli_setting.vocab_size = 30522 nli_setting.vocab_filename = "bert_voca.txt" e_config = ExperimentConfig() e_config.name = "NLI_run_{}".format("nli_eval") e_config.load_names = ['bert', 'cls_dense'] #data_id = "test_{}".format(target_label) data_id = "{}_1000".format(target_label) data_loader = nli.DataLoader(hp.seq_max, nli_setting.vocab_filename, True) load_id = ("NLI_Only_C", 'model-0') e.nli_attribution_predict(nli_setting, e_config, data_loader, load_id, target_label, data_id)