Exemplo n.º 1
0
def stance_fine_tune():
    # importing the required module
    import matplotlib.pyplot as plt

    for lr in [1e-3, 5e-4, 2e-4, 1e-4]:
        hp = HPFineTunePair()
        topic = "hillary"
        e = Experiment(hp)
        hp.lr = lr
        hp.num_epochs = 100
        preload_id = ("LM_pair_tweets_hillary_run2", 1247707)
        setting = shared_setting.TopicTweets2Stance(topic)
        stance_data = stance_detection.FineLoader(topic, hp.seq_max,
                                                  setting.vocab_filename,
                                                  hp.sent_max)
        valid_history = e.train_stance(setting.vocab_size, stance_data,
                                       preload_id)
        e.clear_run()

        l_acc, l_f1 = zip(*valid_history)
        plt.plot(l_acc, label="{} / ACC".format(lr))
        plt.plot(l_f1, label="{} / F1".format(lr))

    plt.legend(loc='lower right')

    # giving a title to my graph
    plt.title('learning rate - dev !')
    # function to show the plot
    plt.show()
Exemplo n.º 2
0
def predict_rf_tune():
    hp = hyperparams.HPBert()
    hp.batch_size = 256
    e = Experiment(hp)
    nli_setting = NLI()
    nli_setting.vocab_size = 30522
    nli_setting.vocab_filename = "bert_voca.txt"
    target_label = 'match'
    #data_id = 'test_conflict'
    data_id = "{}_1000".format(target_label)
    e_config = ExperimentConfig()
    l = [(0.1, 12039), (0.2, 12245), (0.3, 12063), (0.4, 12250), (0.6, 12262),
         (0.7, 12253)]
    l = [(0.5, 12175), (0.8, 12269), (0.9, 12259)]
    for del_g, step in l:
        e_config.name = "X_match_del_{}".format(del_g)
        e_config.load_names = ['bert', 'cls_dense', 'aux_conflict']

        data_loader = nli.DataLoader(hp.seq_max, nli_setting.vocab_filename,
                                     True)
        load_id = ("NLIEx_match_del_{}".format(del_g), "model-{}".format(step))
        e.clear_run()
        e.predict_rf(nli_setting, e_config, data_loader, load_id, data_id, 5)