コード例 #1
0
ファイル: nli_main.py プロジェクト: clover3/Chair
def test_fidelity():
    hp = hyperparams.HPBert()
    e = Experiment(hp)
    nli_setting = NLI()
    nli_setting.vocab_size = 30522
    nli_setting.vocab_filename = "bert_voca.txt"

    is_senn = False

    e_config = ExperimentConfig()
    e_config.name = "NLIEx_{}".format("Fidelity")
    e_config.num_epoch = 4
    e_config.save_interval = 30 * 60  # 30 minutes
    if is_senn:
        e_config.load_names = ['bert', 'cls_dense', 'aux_conflict']
    else:
        e_config.load_names = ['bert', 'cls_dense']
    explain_tag = 'conflict'

    data_loader = nli.DataLoader(hp.seq_max, nli_setting.vocab_filename, True)
    load_id = ("NLIEx_Y_conflict", 'model-12039')
    #load_id = ("NLI_Only_C", 'model-0')
    #e.eval_fidelity(nli_setting, e_config, data_loader, load_id, explain_tag)
    e.eval_fidelity_gradient(nli_setting, e_config, data_loader, load_id,
                             explain_tag)
コード例 #2
0
ファイル: nli_main.py プロジェクト: clover3/Chair
def train_snli_ex():
    hp = hyperparams.HPBert()
    hp.compare_deletion_num = 20
    e = Experiment(hp)
    nli_setting = NLI()
    nli_setting.vocab_size = 30522
    nli_setting.vocab_filename = "bert_voca.txt"
    e_config = ExperimentConfig()
    e_config.name = "SNLIEx_B"
    e_config.ex_val = False
    e_config.num_epoch = 1
    e_config.save_interval = 30 * 60  # 30 minutes
    e_config.load_names = ['bert', 'cls_dense']  #, 'aux_conflict']

    #explain_tag = 'match'  # 'dontcare'  'match' 'mismatch'

    #explain_tag = 'mismatch'
    #explain_tag = 'conflict'
    data_loader = nli.SNLIDataLoader(hp.seq_max, nli_setting.vocab_filename,
                                     True)
    #load_id = ("NLI_run_nli_warm", "model-97332")
    #load_id = ("NLIEx_A", "model-16910")
    #load_id = ("uncased_L-12_H-768_A-12", 'bert_model.ckpt')
    #load_id = ("NLIEx_D", "model-1964")
    #load_id = ("NLIEx_D", "model-1317")
    load_id = ("SNLI_Only_A", 'model-0')
    e.train_nli_any_way(nli_setting, e_config, data_loader, load_id)
コード例 #3
0
def ukp_train_test_repeat(load_id, exp_name, topic, n_repeat):
    hp = hyperparams.HPBert()
    e_config = ExperimentConfig()
    e_config.num_epoch = 2
    e_config.save_interval = 100 * 60  # 30 minutes
    e_config.voca_size = 30522
    e_config.load_names = ['bert']
    encode_opt = "is_good"

    print(load_id)
    scores = []
    for i in range(n_repeat):
        e = Experiment(hp)
        print(exp_name)
        e_config.name = "arg_{}_{}_{}".format(exp_name, topic, encode_opt)
        data_loader = BertDataLoader(topic,
                                     True,
                                     hp.seq_max,
                                     "bert_voca.txt",
                                     option=encode_opt)
        save_path = e.train_ukp(e_config, data_loader, load_id)
        f1_last = e.eval_ukp(e_config, data_loader, save_path)
        scores.append(f1_last)
    print(exp_name)
    print(encode_opt)
    for e in scores:
        print(e, end="\t")
    print()
    print("Avg\n{0:.03f}".format(average(scores)))
コード例 #4
0
ファイル: nli_main.py プロジェクト: clover3/Chair
def predict_rf():
    hp = hyperparams.HPBert()
    hp.batch_size = 256
    e = Experiment(hp)
    nli_setting = NLI()
    nli_setting.vocab_size = 30522
    nli_setting.vocab_filename = "bert_voca.txt"
    target_label = 'mismatch'
    #data_id = 'test_conflict'
    data_id = "{}_1000".format(target_label)
    e_config = ExperimentConfig()

    #del_g = 0.7
    #e_config.name = "X_match_del_{}".format(del_g)
    e_config.name = "NLIEx_AnyA_{}".format(target_label)
    e_config.load_names = ['bert', 'cls_dense', 'aux_conflict']

    data_loader = nli.DataLoader(hp.seq_max, nli_setting.vocab_filename, True)
    #load_id = ("NLI_bare_A", 'model-195608')
    #load_id = ("NLIEx_O", 'model-10278')
    load_id = ("NLIEx_W_mismatch", "model-12030")
    load_id = ("NLIEx_Y_conflict", "model-12039")
    load_id = ("NLIEx_X_match", "model-12238")
    #load_id = ("NLIEx_match_del_{}".format(del_g), "model-4390")
    load_id = ("NLIEx_CE_{}".format(target_label), "model-12199")
    load_id = ("NLIEx_AnyA", "model-7255")
    e.predict_rf(nli_setting, e_config, data_loader, load_id, data_id, 5)
コード例 #5
0
ファイル: nli_main.py プロジェクト: clover3/Chair
def analyze_nli_ex():
    hp = hyperparams.HPBert()
    e = Experiment(hp)
    nli_setting = NLI()
    nli_setting.vocab_size = 30522
    nli_setting.vocab_filename = "bert_voca.txt"
    explain_tag = 'match'

    e_config = ExperimentConfig()
    #e_config.name = "NLIEx_{}_premade_analyze".format(explain_tag)
    e_config.name = "NLIEx_{}_analyze".format(explain_tag)
    e_config.num_epoch = 4
    e_config.save_interval = 30 * 60  # 30 minutes
    e_config.load_names = ['bert', 'cls_dense', 'aux_conflict']

    data_loader = nli.DataLoader(hp.seq_max, nli_setting.vocab_filename, True)
    #load_id = ("NLIEx_E_align", "model-23621")
    #load_id = ("NLIEx_I_match", "model-1238")

    if explain_tag == 'conflict':
        load_id = ("NLIEx_Y_conflict", "model-12039")
        #load_id = ("NLIEx_HB", "model-2684")
    elif explain_tag == 'match':
        load_id = ("NLIEx_P_match", "model-1636")
        load_id = ("NLIEx_X_match", "model-12238")
    elif explain_tag == 'mismatch':
        load_id = ("NLIEx_U_mismatch", "model-10265")
    e.nli_visualization(nli_setting, e_config, data_loader, load_id,
                        explain_tag)
コード例 #6
0
def train_nli_smart_rf():
    hp = hyperparams.HPSENLI()
    hp.compare_deletion_num = 20
    e = Experiment(hp)
    nli_setting = NLI()
    nli_setting.vocab_size = 30522
    nli_setting.vocab_filename = "bert_voca.txt"

    e_config = ExperimentConfig()

    #explain_tag = 'mismatch'
    explain_tag = 'match'
    #explain_tag = 'mismatch'

    loss_type = 2
    e_config.name = "NLIEx_Hinge_{}".format(explain_tag)
    e_config.num_epoch = 1
    e_config.ex_val = True
    e_config.save_interval = 30 * 60  # 30 minutes
    e_config.load_names = ['bert', 'cls_dense']  #, 'aux_conflict']
    e_config.save_eval = True
    e_config.save_name = "LossFn_{}_{}".format(loss_type, explain_tag)

    data_loader = nli.DataLoader(hp.seq_max, nli_setting.vocab_filename, True)
    load_id = ("NLI_run_A", 'model-0')

    print("Loss : ", loss_type)

    e.train_nli_smart(nli_setting, e_config, data_loader, load_id, explain_tag,
                      loss_type)
コード例 #7
0
def train_test_repeat(load_id, exp_name, n_repeat):
    hp = hyperparams.HPBert()
    e_config = ExperimentConfig()
    e_config.name = "RTE_{}".format("A")
    e_config.num_epoch = 10
    e_config.save_interval = 30 * 60  # 30 minutes
    e_config.load_names = ['bert']
    vocab_filename = "bert_voca.txt"
    data_loader = rte.DataLoader(hp.seq_max, vocab_filename, True)

    print(load_id)
    scores = []
    for i in range(n_repeat):
        e = Experiment(hp)
        print(exp_name)
        e_config.name = "rte_{}".format(exp_name)
        save_path = e.train_rte(e_config, data_loader, load_id)
        acc = e.eval_rte(e_config, data_loader, save_path)
        scores.append(acc)
    print(exp_name)
    for e in scores:
        print(e, end="\t")
    print()
    r = average(scores)
    print("Avg\n{0:.03f}".format(r))
    return r
コード例 #8
0
def ukp_train_test(load_id, exp_name):
    hp = hyperparams.HPBert()
    e_config = ExperimentConfig()
    e_config.num_epoch = 2
    e_config.save_interval = 100 * 60  # 30 minutes
    e_config.voca_size = 30522
    e_config.load_names = ['bert']
    encode_opt = "is_good"

    print(load_id)
    f1_list = []
    for topic in data_generator.argmining.ukp_header.all_topics:
        e = Experiment(hp)
        print(exp_name)
        e_config.name = "arg_{}_{}_{}".format(exp_name, topic, encode_opt)
        data_loader = BertDataLoader(topic,
                                     True,
                                     hp.seq_max,
                                     "bert_voca.txt",
                                     option=encode_opt)
        save_path = e.train_ukp(e_config, data_loader, load_id)
        print(topic)
        f1_last = e.eval_ukp(e_config, data_loader, save_path)
        f1_list.append((topic, f1_last))
    print(exp_name)
    print(encode_opt)
    print(f1_list)
    for key, score in f1_list:
        print("{0}\t{1:.03f}".format(key, score))
コード例 #9
0
def eval_ukp_with_nli(exp_name):
    step_per_epoch = 24544 + 970

    hp = hyperparams.HPBert()
    e_config = ExperimentConfig()
    e_config.num_steps = step_per_epoch
    e_config.voca_size = 30522
    e_config.num_dev_batches = 30
    e_config.load_names = ['bert']
    encode_opt = "is_good"
    num_class_list = [3, 3]
    f1_list = []
    save_path = "/mnt/scratch/youngwookim/Chair/output/model/runs/argmix_AN_B_40000_abortion_is_good/model-21306"
    for topic in data_generator.argmining.ukp_header.all_topics[:1]:
        e = Experiment(hp)
        print(exp_name)
        e_config.name = "argmix_{}_{}_{}".format(exp_name, topic, encode_opt)
        arg_data_loader = BertDataLoader(topic,
                                         True,
                                         hp.seq_max,
                                         "bert_voca.txt",
                                         option=encode_opt)
        f1_last = e.eval_ukp_on_shared(e_config, arg_data_loader,
                                       num_class_list, save_path)
        f1_list.append((topic, f1_last))
    print(exp_name)
    print(encode_opt)
    print(f1_list)
    for key, score in f1_list:
        print("{0}\t{1:.03f}".format(key, score))
コード例 #10
0
def gradient_rte_visulize():
    hp = hyperparams.HPBert()
    e = Experiment(hp)
    vocab_filename = "bert_voca.txt"
    load_id = loader.find_model_name("RTE_A")
    e_config = ExperimentConfig()
    e_config.name = "RTE_{}".format("visual")
    e_config.save_interval = 30 * 60  # 30 minutes
    e_config.load_names = ['bert', 'cls_dense']

    data_loader = rte.DataLoader(hp.seq_max, vocab_filename, True)
    e.rte_visualize(e_config, data_loader, load_id)
コード例 #11
0
ファイル: nli_main.py プロジェクト: clover3/Chair
def train_nli_with_reinforce_old():
    hp = hyperparams.HPNLI2()
    e = Experiment(hp)
    nli_setting = NLI()

    e_config = ExperimentConfig()
    e_config.name = "NLI_run_{}".format("retest")
    e_config.num_epoch = 4
    e_config.save_interval = 30 * 60  # 30 minutes
    e_config.load_names = ['bert', 'dense_cls']  #, 'aux_conflict']

    data_loader = nli.DataLoader(hp.seq_max, nli_setting.vocab_filename)
    load_id = ("interval", "model-48040")
    e.train_nli_ex_0(nli_setting, e_config, data_loader, load_id, True)
コード例 #12
0
def protest_bert():
    hp = hyperparams.HPBert()
    e = Experiment(hp)
    e_config = ExperimentConfig()
    e_config.name = "protest"
    e_config.num_epoch = 1
    e_config.save_interval = 1 * 60  # 1 minutes
    e_config.load_names = ['bert']
    vocab_size = 30522
    vocab_filename = "bert_voca.txt"

    data_loader = protest.DataLoader(hp.seq_max, vocab_filename, vocab_size)
    load_id = ("uncased_L-12_H-768_A-12", 'bert_model.ckpt')
    e.train_protest(e_config, data_loader, load_id)
コード例 #13
0
ファイル: nli_main.py プロジェクト: clover3/Chair
def pred_mnli_anyway():
    hp = hyperparams.HPBert()
    e = Experiment(hp)
    nli_setting = NLI()
    nli_setting.vocab_size = 30522
    nli_setting.vocab_filename = "bert_voca.txt"
    e_config = ExperimentConfig()
    e_config.name = "NLIEx_AnyA"
    e_config.load_names = ['bert', 'cls_dense', 'aux_conflict']

    data_loader = nli.DataLoader(hp.seq_max, nli_setting.vocab_filename, True)
    target_label = 'mismatch'
    data_id = "{}_1000".format(target_label)
    load_id = ("NLIEx_AnyA", 'model-2785')
    e.predict_rf(nli_setting, e_config, data_loader, load_id, data_id)
コード例 #14
0
ファイル: nli_main.py プロジェクト: clover3/Chair
def pred_snli_ex():
    hp = hyperparams.HPBert()
    e = Experiment(hp)
    nli_setting = NLI()
    nli_setting.vocab_size = 30522
    nli_setting.vocab_filename = "bert_voca.txt"
    e_config = ExperimentConfig()
    e_config.name = "SNLIEx_B"
    e_config.load_names = ['bert', 'cls_dense', 'aux_conflict']

    data_loader = nli.SNLIDataLoader(hp.seq_max, nli_setting.vocab_filename,
                                     True)

    load_id = ("SNLIEx_B", 'model-10275')
    e.predict_rf(nli_setting, e_config, data_loader, load_id, "test")
コード例 #15
0
ファイル: classification_main.py プロジェクト: clover3/Chair
def crs_stance_baseline():
    hp = hyperparams.HPCRS()
    hp.batch_size = 16 
    e = Experiment(hp)

    e_config = ExperimentConfig()
    e_config.name = "CRS_{}".format("baseline")
    e_config.num_epoch = 4
    e_config.save_interval = 10 * 60  # 60 minutes
    e_config.load_names = ['bert'] #, 'reg_dense']
    e_config.voca_size = 30522

    data_loader = DataGenerator()
    load_id = ("uncased_L-12_H-768_A-12", 'bert_model.ckpt')
    e.train_crs_classify(e_config, data_loader, load_id)
コード例 #16
0
def train_adhoc_with_reinforce():
    hp = hyperparams.HPAdhoc()
    e = Experiment(hp)

    e_config = ExperimentConfig()
    e_config.name = "Adhoc_{}".format("E")
    e_config.num_epoch = 4
    e_config.save_interval = 30 * 60  # 30 minutes
    e_config.load_names = ['bert']
    vocab_size = 30522
    vocab_filename = "bert_voca.txt"

    data_loader = ws.DataLoader(hp.seq_max, vocab_filename, vocab_size)
    load_id = ("uncased_L-12_H-768_A-12", 'bert_model.ckpt')
    e.train_adhoc(e_config, data_loader, load_id)
コード例 #17
0
def train_rte():
    hp = hyperparams.HPBert()
    e = Experiment(hp)
    vocab_filename = "bert_voca.txt"
    #load_id = ("uncased_L-12_H-768_A-12", 'bert_model.ckpt')
    load_id = ("tlm_simple", "model.ckpt-15000")

    e_config = ExperimentConfig()
    e_config.name = "RTE_{}".format("tlm_simple_15000")
    e_config.num_epoch = 10
    e_config.save_interval = 30 * 60  # 30 minutes
    e_config.load_names = ['bert']

    data_loader = rte.DataLoader(hp.seq_max, vocab_filename, True)
    e.train_rte(e_config, data_loader, load_id)
コード例 #18
0
def train_nli_with_premade(explain_tag):
    hp = hyperparams.HPBert()
    e = Experiment(hp)
    nli_setting = NLI()
    nli_setting.vocab_size = 30522
    nli_setting.vocab_filename = "bert_voca.txt"

    e_config = ExperimentConfig()
    e_config.name = "NLIEx_{}".format("Premade_"+explain_tag)
    e_config.num_epoch = 1
    e_config.save_interval = 30 * 60  # 30 minutes
    e_config.load_names = ['bert'] #, 'cls_dense'] #, 'aux_conflict']

    data_loader = nli.DataLoader(hp.seq_max, nli_setting.vocab_filename, True)
    load_id = ("uncased_L-12_H-768_A-12", 'bert_model.ckpt')
    e.train_nli_ex_with_premade_data(nli_setting, e_config, data_loader, load_id, explain_tag)
コード例 #19
0
def wikicont_bert():
    hp = hyperparams.HPBert()
    e = Experiment(hp)
    e_config = ExperimentConfig()
    e_config.name = "WikiContrv2009_only_wiki"
    e_config.num_epoch = 1
    e_config.save_interval = 60 * 60  # 1 minutes
    e_config.load_names = ['bert']
    e_config.valid_freq = 100
    vocab_size = 30522
    vocab_filename = "bert_voca.txt"

    data_loader = Ams18.DataLoader(hp.seq_max, vocab_filename, vocab_size)
    data_loader.source_collection.collection_type = 'wiki'
    load_id = ("uncased_L-12_H-768_A-12", 'bert_model.ckpt')
    e.train_wiki_contrv(e_config, data_loader, load_id)
コード例 #20
0
def bert_lm_test():
    hp = hyperparams.HPQL()

    e = Experiment(hp)

    e_config = ExperimentConfig()
    e_config.name = "Contrv_{}".format("B")
    e_config.num_epoch = 4
    e_config.save_interval = 30 * 60  # 30 minuteslm_protest
    e_config.load_names = ['bert', 'cls']
    vocab_size = 30522
    vocab_filename = "bert_voca.txt"

    data_loader = ws.DataLoader(hp.seq_max, vocab_filename, vocab_size)
    load_id = ("uncased_L-12_H-768_A-12", 'bert_model.ckpt')
    e.bert_lm_pos_neg(e_config, data_loader, load_id)
コード例 #21
0
def train_mscore_regression():
    hp = hyperparams.HPMscore()

    e = Experiment(hp)

    e_config = ExperimentConfig()
    e_config.name = "Contrv_{}".format("C")
    e_config.num_epoch = 4
    e_config.save_interval = 10 * 60  # 10 minutes
    e_config.load_names = ['bert']
    vocab_size = 30522
    vocab_filename = "bert_voca.txt"

    data_loader = mscore.DataLoader(hp.seq_max, vocab_filename, vocab_size)
    load_id = ("uncased_L-12_H-768_A-12", 'bert_model.ckpt')
    e.train_controversy_classification(e_config, data_loader, load_id)
コード例 #22
0
def train_adhoc_fad():
    hp = hyperparams.HPFAD()
    hp.batch_size = 16
    e = Experiment(hp)

    e_config = ExperimentConfig()
    e_config.name = "Adhoc_{}".format("FAD")
    e_config.num_epoch = 4
    e_config.save_interval = 10 * 60  # 60 minutes
    e_config.load_names = ['bert']  #, 'reg_dense']
    vocab_size = 30522

    data_loader = data_sampler.DataLoaderFromFile(hp.batch_size, vocab_size)
    load_id = ("uncased_L-12_H-768_A-12", 'bert_model.ckpt')
    #load_id = ("Adhoc_I2", 'model-290')
    e.train_adhoc2(e_config, data_loader, load_id)
コード例 #23
0
def test_ql():
    hp = hyperparams.HPAdhoc()
    hp.batch_size = 512

    e = Experiment(hp)

    e_config = ExperimentConfig()
    e_config.name = "Adhoc_{}".format("C")
    e_config.num_epoch = 4
    e_config.load_names = ['bert', 'cls']
    vocab_size = 30522
    vocab_filename = "bert_voca.txt"

    data_loader = ws.DataLoader(hp.seq_max, vocab_filename, vocab_size)
    load_id = ("uncased_L-12_H-768_A-12", 'bert_model.ckpt')
    e.test_ql(e_config, data_loader, load_id)
コード例 #24
0
def run_ql_rank():
    hp = hyperparams.HPQL()

    e = Experiment(hp)

    e_config = ExperimentConfig()
    e_config.name = "Adhoc_{}".format("D")
    e_config.num_epoch = 4
    e_config.save_interval = 30 * 60  # 30 minutes
    e_config.load_names = ['bert', 'cls']
    vocab_size = 30522
    vocab_filename = "bert_voca.txt"

    data_loader = ws.DataLoader(hp.seq_max, vocab_filename, vocab_size)
    load_id = ("uncased_L-12_H-768_A-12", 'bert_model.ckpt')
    e.rank_ql(e_config, data_loader, load_id)
コード例 #25
0
def test_nli():
    hp = hyperparams.HPBert()
    e = Experiment(hp)
    nli_setting = NLI()
    nli_setting.vocab_size = 30522
    nli_setting.vocab_filename = "bert_voca.txt"
    e_config = ExperimentConfig()
    e_config.name = "NLI_400k_tlm_simple_wo_hint"
    e_config.num_epoch = 2
    e_config.save_interval = 30 * 60  # 30 minutes
    e_config.load_names = ['bert', 'cls_dense']  # , 'aux_conflict']
    data_loader = nli.DataLoader(hp.seq_max, nli_setting.vocab_filename, True)
    #saved = "/mnt/scratch/youngwookim/Chair/output/model/runs/NLI_Cold/model-0"
    saved = "/mnt/scratch/youngwookim/Chair/output/model/runs/NLI_400k_tlm_wo_hint/model-0"
    saved = '/mnt/scratch/youngwookim/Chair/output/model/runs/NLI_400k_tlm_simple_hint/model-0'
    print(saved)
    e.test_acc2(nli_setting, e_config, data_loader, saved)
コード例 #26
0
ファイル: nli_main.py プロジェクト: clover3/Chair
def analyze_nli_pair():
    hp = hyperparams.HPBert()
    e = Experiment(hp)
    nli_setting = NLI()
    nli_setting.vocab_size = 30522
    nli_setting.vocab_filename = "bert_voca.txt"

    e_config = ExperimentConfig()
    e_config.name = "NLIEx_pair_analyze"
    e_config.num_epoch = 4
    e_config.save_interval = 30 * 60  # 30 minutes
    e_config.load_names = ['bert', 'cls_dense', 'aux_conflict']

    data_loader = nli.DataLoader(hp.seq_max, nli_setting.vocab_filename, True)
    load_id = ("NLIEx_T", "model-12097")
    e.nli_visualization_pairing(nli_setting, e_config, data_loader, load_id,
                                data)
コード例 #27
0
def run_adhoc_rank_on_robust():
    hp = hyperparams.HPAdhoc()
    hp.batch_size = 512

    e = Experiment(hp)

    e_config = ExperimentConfig()
    e_config.name = "Adhoc_{}_eval".format("F")
    e_config.num_epoch = 4
    e_config.load_names = ['bert', 'reg_dense']
    vocab_size = 30522
    vocab_filename = "bert_voca.txt"

    data_loader = data_sampler.DataLoaderFromFile(hp.batch_size, vocab_size)
    load_id = ("uncased_L-12_H-768_A-12", 'bert_model.ckpt')
    load_id = ("Adhoc_E", 'model-58338')
    e.rank_adhoc(e_config, data_loader, load_id)
コード例 #28
0
def predict_adhoc_robust_K():
    hp = hyperparams.HPAdhoc()
    hp.batch_size = 512

    e = Experiment(hp)

    e_config = ExperimentConfig()
    e_config.name = "Adhoc_{}_eval".format("K")
    e_config.load_names = ['bert', 'dense1', 'dense_reg']
    vocab_size = 30522
    #payload_path = os.path.join(cpath.data_path, "robust_payload", "payload_B_200.pickle")
    payload_path = os.path.join(cpath.data_path, "robust_payload",
                                "payload_desc.pickle")
    task_idx = int(sys.argv[2])
    print(task_idx)
    load_id = ("Adhoc_K", 'model-6397')
    e.predict_robust(e_config, vocab_size, load_id, payload_path, task_idx)
コード例 #29
0
ファイル: nli_main.py プロジェクト: clover3/Chair
def predict_lime_snli_continue():
    hp = hyperparams.HPBert()
    hp.batch_size = 512 + 256
    e = Experiment(hp)
    nli_setting = NLI()
    nli_setting.vocab_size = 30522
    nli_setting.vocab_filename = "bert_voca.txt"

    e_config = ExperimentConfig()
    e_config.name = "SNLI_LIME_{}".format("eval")
    e_config.load_names = ['bert', 'cls_dense']

    data_loader = nli.SNLIDataLoader(hp.seq_max, nli_setting.vocab_filename,
                                     True)
    load_id = ("SNLI_Only_A", 'model-0')
    e.predict_lime_snli_continue(nli_setting, e_config, data_loader, load_id,
                                 "test")
コード例 #30
0
ファイル: nli_main.py プロジェクト: clover3/Chair
def attribution_explain():
    hp = hyperparams.HPBert()
    e = Experiment(hp)
    nli_setting = NLI()
    nli_setting.vocab_size = 30522
    nli_setting.vocab_filename = "bert_voca.txt"

    e_config = ExperimentConfig()
    e_config.name = "NLI_run_{}".format("nli_eval")
    e_config.num_epoch = 4
    e_config.save_interval = 30 * 60  # 30 minutes
    e_config.load_names = ['bert', 'cls_dense']

    data_loader = nli.DataLoader(hp.seq_max, nli_setting.vocab_filename, True)
    load_id = ("NLI_run_nli_warm", "model-97332")
    load_id = ("NLI_Only_A", 'model-0')
    e.nli_attribution_baselines(nli_setting, e_config, data_loader, load_id)