def train_fever_std_ema_v1(resume_model=None, do_analysis=False):
    """
    This method is created on 26 Nov 2018 08:50 with the purpose of training vc and ss all together.
    :param resume_model:
    :param wn_feature:
    :return:
    """

    num_epoch = 200
    seed = 12
    batch_size = 32
    lazy = True
    train_prob_threshold = 0.02
    train_sample_top_k = 8
    dev_prob_threshold = 0.1
    dev_sample_top_k = 5
    top_k_doc = 5

    schedule_sample_dict = defaultdict(lambda: 0.1)

    ratio_ss_for_vc = 0.2

    schedule_sample_dict.update({
        0: 0.1,
        1: 0.1,  # 200k + 400K
        2: 0.1,
        3: 0.1,  # 200k + 200k ~ 200k + 100k
        4: 0.1,
        5: 0.1,  # 200k + 100k
        6: 0.1  # 20k + 20k
    })

    # Eval at beginning of the training.
    eval_full_epoch = 1
    eval_nei_epoches = [2, 3, 4, 5, 6, 7]

    neg_only = False
    debug = False

    experiment_name = f"vc_ss_v17_ratio_ss_for_vc:{ratio_ss_for_vc}|t_prob:{train_prob_threshold}|top_k:{train_sample_top_k}_scheduled_neg_sampler"
    # resume_model = None

    print("Do EMA:")

    print("Dev prob threshold:", dev_prob_threshold)
    print("Train prob threshold:", train_prob_threshold)
    print("Train sample top k:", train_sample_top_k)

    # Get upstream sentence document retrieval data
    dev_doc_upstream_file = config.RESULT_PATH / "doc_retri/std_upstream_data_using_pageview/dev_doc.jsonl"
    train_doc_upstream_file = config.RESULT_PATH / "doc_retri/std_upstream_data_using_pageview/train_doc.jsonl"

    complete_upstream_dev_data = get_full_list(config.T_FEVER_DEV_JSONL,
                                               dev_doc_upstream_file,
                                               pred=True,
                                               top_k=top_k_doc)

    complete_upstream_train_data = get_full_list(config.T_FEVER_TRAIN_JSONL,
                                                 train_doc_upstream_file,
                                                 pred=False,
                                                 top_k=top_k_doc)
    if debug:
        complete_upstream_dev_data = complete_upstream_dev_data[:1000]
        complete_upstream_train_data = complete_upstream_train_data[:1000]

    print("Dev size:", len(complete_upstream_dev_data))
    print("Train size:", len(complete_upstream_train_data))

    # Prepare Data
    token_indexers = {
        'tokens':
        SingleIdTokenIndexer(namespace='tokens'),  # This is the raw tokens
        'elmo_chars': ELMoTokenCharactersIndexer(
            namespace='elmo_characters')  # This is the elmo_characters
    }

    # Data Reader
    dev_fever_data_reader = VCSS_Reader(token_indexers=token_indexers,
                                        lazy=lazy,
                                        max_l=260)
    train_fever_data_reader = VCSS_Reader(token_indexers=token_indexers,
                                          lazy=lazy,
                                          max_l=260)

    # Load Vocabulary
    biterator = BasicIterator(batch_size=batch_size)

    vocab, weight_dict = load_vocab_embeddings(config.DATA_ROOT /
                                               "vocab_cache" / "nli_basic")

    vocab.add_token_to_namespace('true', namespace='labels')
    vocab.add_token_to_namespace('false', namespace='labels')
    vocab.add_token_to_namespace("hidden", namespace="labels")
    vocab.change_token_with_index_to_namespace("hidden",
                                               -2,
                                               namespace='labels')

    print(vocab.get_token_to_index_vocabulary('labels'))
    print(vocab.get_vocab_size('tokens'))

    biterator.index_with(vocab)
    # Reader and prepare end

    vc_ss_training_sampler = VCSSTrainingSampler(complete_upstream_train_data)
    vc_ss_training_sampler.show_info()

    # Build Model
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu",
                          index=0)
    device_num = -1 if device.type == 'cpu' else 0

    model = Model(rnn_size_in=(1024 + 300 + 1, 1024 + 450 + 1),
                  rnn_size_out=(450, 450),
                  weight=weight_dict['glove.840B.300d'],
                  vocab_size=vocab.get_vocab_size('tokens'),
                  mlp_d=900,
                  embedding_dim=300,
                  max_l=300,
                  num_of_class=4)

    print("Model Max length:", model.max_l)
    if resume_model is not None:
        model.load_state_dict(torch.load(resume_model))
    model.display()
    model.to(device)

    cloned_empty_model = copy.deepcopy(model)
    ema: EMA = EMA(parameters=model.named_parameters())

    # Create Log File
    file_path_prefix, date = save_tool.gen_file_prefix(f"{experiment_name}")
    # Save the source code.
    script_name = os.path.basename(__file__)
    with open(os.path.join(file_path_prefix, script_name),
              'w') as out_f, open(__file__, 'r') as it:
        out_f.write(it.read())
        out_f.flush()

    analysis_dir = None
    if do_analysis:
        analysis_dir = Path(file_path_prefix) / "analysis_aux"
        analysis_dir.mkdir()
    # Save source code end.

    # Staring parameter setup
    best_dev = -1
    iteration = 0

    start_lr = 0.0001
    optimizer = optim.Adam(filter(lambda p: p.requires_grad,
                                  model.parameters()),
                           lr=start_lr)
    criterion = nn.CrossEntropyLoss()
    # parameter setup end

    for i_epoch in range(num_epoch):
        print("Resampling...")
        # This is for train
        # This is for sample candidate data for from result of ss for vc.
        # This we will need to do after each epoch.
        if i_epoch == eval_full_epoch:  # only eval at 1
            print("We now need to eval the whole training set.")
            print("Be patient and hope good luck!")
            load_ema_to_model(cloned_empty_model, ema)
            eval_sent_for_sampler(cloned_empty_model, token_indexers, vocab,
                                  vc_ss_training_sampler)

        elif i_epoch in eval_nei_epoches:  # at 2, 3, 4 eval for NEI
            print("We now need to eval the NEI training set.")
            print("Be patient and hope good luck!")
            load_ema_to_model(cloned_empty_model, ema)
            eval_sent_for_sampler(cloned_empty_model,
                                  token_indexers,
                                  vocab,
                                  vc_ss_training_sampler,
                                  nei_only=True)

        train_data_with_candidate_sample_list = vc_ss.data_wrangler.sample_sentences_for_vc_with_nei(
            config.T_FEVER_TRAIN_JSONL, vc_ss_training_sampler.sent_list,
            train_prob_threshold, train_sample_top_k)
        # We initialize the prob for each sentence so the sampler can work, but we will need to run the model for dev data to work.

        train_selection_dict = paired_selection_score_dict(
            vc_ss_training_sampler.sent_list)

        cur_train_vc_data = adv_simi_sample_with_prob_v1_1(
            config.T_FEVER_TRAIN_JSONL,
            train_data_with_candidate_sample_list,
            train_selection_dict,
            tokenized=True)

        if do_analysis:
            # Customized analysis output
            common.save_jsonl(
                vc_ss_training_sampler.sent_list, analysis_dir /
                f"E_{i_epoch}_whole_train_sent_{save_tool.get_cur_time_str()}.jsonl"
            )
            common.save_jsonl(
                train_data_with_candidate_sample_list, analysis_dir /
                f"E_{i_epoch}_sampled_train_sent_{save_tool.get_cur_time_str()}.jsonl"
            )
            common.save_jsonl(
                cur_train_vc_data, analysis_dir /
                f"E_{i_epoch}_train_vc_data_{save_tool.get_cur_time_str()}.jsonl"
            )

        print(f"E{i_epoch} VC_data:", len(cur_train_vc_data))

        # This is for sample negative candidate data for ss
        # After sampling, we decrease the ratio.
        neg_sample_upper_prob = schedule_sample_dict[i_epoch]
        print("Neg Sampler upper rate:", neg_sample_upper_prob)
        # print("Rate decreasing")
        # neg_sample_upper_prob -= decay_r
        neg_sample_upper_prob = max(0.000, neg_sample_upper_prob)

        cur_train_ss_data = vc_ss_training_sampler.sample_for_ss(
            neg_only=neg_only, upper_prob=neg_sample_upper_prob)

        if i_epoch >= 1:  # if epoch num >= 6 we balance pos and neg example for selection
            # new_ss_data = []
            pos_ss_data = []
            neg_ss_data = []
            for item in cur_train_ss_data:
                if item['selection_label'] == 'true':
                    pos_ss_data.append(item)
                elif item['selection_label'] == 'false':
                    neg_ss_data.append(item)

            ss_sample_size = min(len(pos_ss_data), len(neg_ss_data))
            random.shuffle(pos_ss_data)
            random.shuffle(neg_ss_data)
            cur_train_ss_data = pos_ss_data[:int(
                ss_sample_size * 0.5)] + neg_ss_data[:ss_sample_size]
            random.shuffle(cur_train_ss_data)

        vc_ss_training_sampler.show_info(cur_train_ss_data)
        print(f"E{i_epoch} SS_data:", len(cur_train_ss_data))

        vc_ss.data_wrangler.assign_task_label(cur_train_ss_data, 'ss')
        vc_ss.data_wrangler.assign_task_label(cur_train_vc_data, 'vc')

        vs_ss_train_list = cur_train_ss_data + cur_train_vc_data
        random.shuffle(vs_ss_train_list)
        print(f"E{i_epoch} Total ss+vc:", len(vs_ss_train_list))
        vc_ss_instance = train_fever_data_reader.read(vs_ss_train_list)

        train_iter = biterator(vc_ss_instance, shuffle=True, num_epochs=1)

        for i, batch in tqdm(enumerate(train_iter)):
            model.train()
            out = model(batch)

            if i_epoch >= 1:
                ratio_ss_for_vc = 0.8

            loss = compute_mixing_loss(
                model,
                out,
                batch,
                criterion,
                vc_ss_training_sampler,
                ss_for_vc_prob=ratio_ss_for_vc)  # Important change

            # No decay
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            iteration += 1

            # EMA update
            ema(model.named_parameters())

            if i_epoch < 9:
                mod = 10000
                # mod = 100
            else:
                mod = 2000

            if iteration % mod == 0:

                # This is the code for eval:
                load_ema_to_model(cloned_empty_model, ema)

                vc_ss.data_wrangler.assign_task_label(
                    complete_upstream_dev_data, 'ss')
                dev_ss_instance = dev_fever_data_reader.read(
                    complete_upstream_dev_data)
                eval_ss_iter = biterator(dev_ss_instance,
                                         num_epochs=1,
                                         shuffle=False)
                scored_dev_sent_data = hidden_eval_ss(
                    cloned_empty_model, eval_ss_iter,
                    complete_upstream_dev_data)

                # for vc
                filtered_dev_list = vc_ss.data_wrangler.sample_sentences_for_vc_with_nei(
                    config.T_FEVER_DEV_JSONL, scored_dev_sent_data,
                    dev_prob_threshold, dev_sample_top_k)

                dev_selection_dict = paired_selection_score_dict(
                    scored_dev_sent_data)
                ready_dev_list = select_sent_with_prob_for_eval(
                    config.T_FEVER_DEV_JSONL,
                    filtered_dev_list,
                    dev_selection_dict,
                    tokenized=True)

                vc_ss.data_wrangler.assign_task_label(ready_dev_list, 'vc')
                dev_vc_instance = dev_fever_data_reader.read(ready_dev_list)
                eval_vc_iter = biterator(dev_vc_instance,
                                         num_epochs=1,
                                         shuffle=False)
                eval_dev_result_list = hidden_eval_vc(cloned_empty_model,
                                                      eval_vc_iter,
                                                      ready_dev_list)

                # Scoring
                eval_mode = {'check_sent_id_correct': True, 'standard': True}
                strict_score, acc_score, pr, rec, f1 = c_scorer.fever_score(
                    eval_dev_result_list,
                    common.load_jsonl(config.T_FEVER_DEV_JSONL),
                    mode=eval_mode,
                    verbose=False)
                print("Fever Score(Strict/Acc./Precision/Recall/F1):",
                      strict_score, acc_score, pr, rec, f1)

                print(f"Dev:{strict_score}/{acc_score}")

                if do_analysis:
                    # Customized analysis output
                    common.save_jsonl(
                        scored_dev_sent_data, analysis_dir /
                        f"E_{i_epoch}_scored_dev_sent_{save_tool.get_cur_time_str()}.jsonl"
                    )
                    common.save_jsonl(
                        eval_dev_result_list, analysis_dir /
                        f"E_{i_epoch}_eval_vc_output_data_{save_tool.get_cur_time_str()}.jsonl"
                    )

                need_save = False
                if strict_score > best_dev:
                    best_dev = strict_score
                    need_save = True

                if need_save or i_epoch < 7:
                    # save_path = os.path.join(
                    #     file_path_prefix,
                    #     f'i({iteration})_epoch({i_epoch})_dev({strict_score})_lacc({acc_score})_seed({seed})'
                    # )

                    # torch.save(model.state_dict(), save_path)

                    ema_save_path = os.path.join(
                        file_path_prefix,
                        f'ema_i({iteration})_epoch({i_epoch})_dev({strict_score})_lacc({acc_score})_p({pr})_r({rec})_f1({f1})_seed({seed})'
                    )

                    save_ema_to_file(ema, ema_save_path)
def train_fever_std_ema_v1(resume_model=None, wn_feature=False):
    """
    This method is the new training script for train fever with span and probability score.
    :param resume_model:
    :param wn_feature:
    :return:
    """
    num_epoch = 200
    seed = 12
    batch_size = 32
    lazy = True
    dev_prob_threshold = 0.1
    train_prob_threshold = 0.1
    train_sample_top_k = 8
    experiment_name = f"nsmn_sent_wise_std_ema_lr1|t_prob:{train_prob_threshold}|top_k:{train_sample_top_k}"
    # resume_model = None

    print("Do EMA:")

    print("Dev prob threshold:", dev_prob_threshold)
    print("Train prob threshold:", train_prob_threshold)
    print("Train sample top k:", train_sample_top_k)

    dev_upstream_sent_list = common.load_jsonl(
        config.RESULT_PATH /
        "sent_retri_nn/balanced_sentence_selection_results/dev_sent_pred_scores.jsonl"
    )

    train_upstream_sent_list = common.load_jsonl(
        config.RESULT_PATH /
        "sent_retri_nn/balanced_sentence_selection_results/train_sent_scores.jsonl"
    )

    # Prepare Data
    token_indexers = {
        'tokens':
        SingleIdTokenIndexer(namespace='tokens'),  # This is the raw tokens
        'elmo_chars': ELMoTokenCharactersIndexer(
            namespace='elmo_characters')  # This is the elmo_characters
    }

    print("Building Prob Dicts...")
    train_sent_list = common.load_jsonl(
        config.RESULT_PATH /
        "sent_retri_nn/balanced_sentence_selection_results/train_sent_scores.jsonl"
    )

    dev_sent_list = common.load_jsonl(
        config.RESULT_PATH /
        "sent_retri_nn/balanced_sentence_selection_results/dev_sent_pred_scores.jsonl"
    )

    selection_dict = paired_selection_score_dict(train_sent_list)
    selection_dict = paired_selection_score_dict(dev_sent_list, selection_dict)

    upstream_dev_list = threshold_sampler_insure_unique(
        config.T_FEVER_DEV_JSONL,
        dev_upstream_sent_list,
        prob_threshold=dev_prob_threshold,
        top_n=5)

    # Specifiy ablation to remove wordnet and number embeddings.
    dev_fever_data_reader = WNSIMIReader(token_indexers=token_indexers,
                                         lazy=lazy,
                                         wn_p_dict=p_dict,
                                         max_l=320,
                                         ablation=None)
    train_fever_data_reader = WNSIMIReader(token_indexers=token_indexers,
                                           lazy=lazy,
                                           wn_p_dict=p_dict,
                                           max_l=320,
                                           shuffle_sentences=False,
                                           ablation=None)

    complete_upstream_dev_data = select_sent_with_prob_for_eval(
        config.T_FEVER_DEV_JSONL,
        upstream_dev_list,
        selection_dict,
        tokenized=True)

    dev_instances = dev_fever_data_reader.read(complete_upstream_dev_data)

    # Load Vocabulary
    biterator = BasicIterator(batch_size=batch_size)

    vocab, weight_dict = load_vocab_embeddings(config.DATA_ROOT /
                                               "vocab_cache" / "nli_basic")
    vocab.change_token_with_index_to_namespace('hidden',
                                               -2,
                                               namespace='labels')

    print(vocab.get_token_to_index_vocabulary('labels'))
    print(vocab.get_vocab_size('tokens'))

    biterator.index_with(vocab)

    # Build Model
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu",
                          index=0)
    device_num = -1 if device.type == 'cpu' else 0

    model = Model(
        rnn_size_in=(1024 + 300 + dev_fever_data_reader.wn_feature_size,
                     1024 + 450 + dev_fever_data_reader.wn_feature_size),
        rnn_size_out=(450, 450),
        weight=weight_dict['glove.840B.300d'],
        vocab_size=vocab.get_vocab_size('tokens'),
        mlp_d=900,
        embedding_dim=300,
        max_l=300,
        use_extra_lex_feature=False,
        max_span_l=100)

    print("Model Max length:", model.max_l)
    if resume_model is not None:
        model.load_state_dict(torch.load(resume_model))
    model.display()
    model.to(device)

    cloned_empty_model = copy.deepcopy(model)
    ema: EMA = EMA(parameters=model.named_parameters())

    # Create Log File
    file_path_prefix, date = save_tool.gen_file_prefix(f"{experiment_name}")
    # Save the source code.
    script_name = os.path.basename(__file__)
    with open(os.path.join(file_path_prefix, script_name),
              'w') as out_f, open(__file__, 'r') as it:
        out_f.write(it.read())
        out_f.flush()
    # Save source code end.

    best_dev = -1
    iteration = 0

    start_lr = 0.0001
    optimizer = optim.Adam(filter(lambda p: p.requires_grad,
                                  model.parameters()),
                           lr=start_lr)
    criterion = nn.CrossEntropyLoss()

    for i_epoch in range(num_epoch):
        print("Resampling...")
        # Resampling
        train_data_with_candidate_sample_list = \
            threshold_sampler_insure_unique(config.T_FEVER_TRAIN_JSONL, train_upstream_sent_list,
                                            train_prob_threshold,
                                            top_n=train_sample_top_k)

        complete_upstream_train_data = adv_simi_sample_with_prob_v1_1(
            config.T_FEVER_TRAIN_JSONL,
            train_data_with_candidate_sample_list,
            selection_dict,
            tokenized=True)

        print("Sample data length:", len(complete_upstream_train_data))
        sampled_train_instances = train_fever_data_reader.read(
            complete_upstream_train_data)

        train_iter = biterator(sampled_train_instances,
                               shuffle=True,
                               num_epochs=1,
                               cuda_device=device_num)
        for i, batch in tqdm(enumerate(train_iter)):
            model.train()
            out = model(batch)
            y = batch['label']

            loss = criterion(out, y)

            # No decay
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            iteration += 1

            # EMA update
            ema(model.named_parameters())

            if i_epoch < 15:
                mod = 10000
                # mod = 10
            else:
                mod = 2000

            if iteration % mod == 0:
                # eval_iter = biterator(dev_instances, shuffle=False, num_epochs=1, cuda_device=device_num)
                # complete_upstream_dev_data = hidden_eval(model, eval_iter, complete_upstream_dev_data)
                #
                # eval_mode = {'check_sent_id_correct': True, 'standard': True}
                # strict_score, acc_score, pr, rec, f1 = c_scorer.fever_score(complete_upstream_dev_data,
                #                                                             common.load_jsonl(config.T_FEVER_DEV_JSONL),
                #                                                             mode=eval_mode,
                #                                                             verbose=False)
                # print("Fever Score(Strict/Acc./Precision/Recall/F1):", strict_score, acc_score, pr, rec, f1)
                #
                # print(f"Dev:{strict_score}/{acc_score}")

                # EMA saving
                eval_iter = biterator(dev_instances,
                                      shuffle=False,
                                      num_epochs=1,
                                      cuda_device=device_num)
                load_ema_to_model(cloned_empty_model, ema)
                complete_upstream_dev_data = hidden_eval(
                    cloned_empty_model, eval_iter, complete_upstream_dev_data)

                eval_mode = {'check_sent_id_correct': True, 'standard': True}
                strict_score, acc_score, pr, rec, f1 = c_scorer.fever_score(
                    complete_upstream_dev_data,
                    common.load_jsonl(config.T_FEVER_DEV_JSONL),
                    mode=eval_mode,
                    verbose=False)
                print("Fever Score EMA(Strict/Acc./Precision/Recall/F1):",
                      strict_score, acc_score, pr, rec, f1)

                print(f"Dev EMA:{strict_score}/{acc_score}")

                need_save = False
                if strict_score > best_dev:
                    best_dev = strict_score
                    need_save = True

                if need_save:
                    # save_path = os.path.join(
                    #     file_path_prefix,
                    #     f'i({iteration})_epoch({i_epoch})_dev({strict_score})_lacc({acc_score})_seed({seed})'
                    # )

                    # torch.save(model.state_dict(), save_path)

                    ema_save_path = os.path.join(
                        file_path_prefix,
                        f'ema_i({iteration})_epoch({i_epoch})_dev({strict_score})_lacc({acc_score})_seed({seed})'
                    )

                    save_ema_to_file(ema, ema_save_path)
def train_fever_v2():
    # train_fever_v1 is the old training script.
    # train_fever_v2 is the new training script created on 02 Oct 2018 11:40:24.
    # Here we keep the negative and positive portion to be consistent.
    num_epoch = 10
    seed = 12
    batch_size = 128
    lazy = True
    torch.manual_seed(seed)
    keep_neg_sample_prob = 1
    top_k_doc = 5

    experiment_name = f"simple_nn_remain_{keep_neg_sample_prob}"
    # sample_prob_decay = 0.05

    dev_upstream_file = config.RESULT_PATH / "doc_retri/std_upstream_data_using_pageview/dev_doc.jsonl"
    train_upstream_file = config.RESULT_PATH / "doc_retri/std_upstream_data_using_pageview/train_doc.jsonl"

    # Prepare Data
    token_indexers = {
        'tokens':
        SingleIdTokenIndexer(namespace='tokens'),  # This is the raw tokens
        'elmo_chars': ELMoTokenCharactersIndexer(
            namespace='elmo_characters')  # This is the elmo_characters
    }

    train_fever_data_reader = SSelectorReader(token_indexers=token_indexers,
                                              lazy=lazy,
                                              max_l=180)
    # dev_fever_data_reader = SSelectorReader(token_indexers=token_indexers, lazy=False)
    dev_fever_data_reader = SSelectorReader(token_indexers=token_indexers,
                                            lazy=lazy,
                                            max_l=180)

    complete_upstream_dev_data = get_full_list(config.T_FEVER_DEV_JSONL,
                                               dev_upstream_file,
                                               pred=True,
                                               top_k=top_k_doc)
    print("Dev size:", len(complete_upstream_dev_data))
    dev_instances = dev_fever_data_reader.read(complete_upstream_dev_data)

    # Load Vocabulary
    biterator = BasicIterator(batch_size=batch_size)
    dev_biterator = BasicIterator(batch_size=batch_size)

    vocab, weight_dict = load_vocab_embeddings(config.DATA_ROOT /
                                               "vocab_cache" / "nli_basic")
    # THis is important
    vocab.add_token_to_namespace("true", namespace="selection_labels")
    vocab.add_token_to_namespace("false", namespace="selection_labels")
    vocab.add_token_to_namespace("hidden", namespace="selection_labels")
    vocab.change_token_with_index_to_namespace("hidden",
                                               -2,
                                               namespace='selection_labels')
    # Label value

    vocab.get_index_to_token_vocabulary('selection_labels')

    print(vocab.get_token_to_index_vocabulary('selection_labels'))
    print(vocab.get_vocab_size('tokens'))

    biterator.index_with(vocab)
    dev_biterator.index_with(vocab)

    # exit(0)
    # Build Model
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu",
                          index=0)
    device_num = -1 if device.type == 'cpu' else 0

    model = Model(weight=weight_dict['glove.840B.300d'],
                  vocab_size=vocab.get_vocab_size('tokens'),
                  embedding_dim=300,
                  max_l=160,
                  num_of_class=2)

    model.display()
    model.to(device)

    cloned_empty_model = copy.deepcopy(model)
    ema: EMA = EMA(parameters=model.named_parameters())

    # Create Log File
    file_path_prefix, date = save_tool.gen_file_prefix(f"{experiment_name}")
    # Save the source code.
    script_name = os.path.basename(__file__)
    with open(os.path.join(file_path_prefix, script_name),
              'w') as out_f, open(__file__, 'r') as it:
        out_f.write(it.read())
        out_f.flush()
    # Save source code end.

    best_dev = -1
    iteration = 0

    start_lr = 0.0002
    optimizer = optim.Adam(filter(lambda p: p.requires_grad,
                                  model.parameters()),
                           lr=start_lr)
    criterion = nn.CrossEntropyLoss()

    dev_actual_list = common.load_jsonl(config.T_FEVER_DEV_JSONL)

    for i_epoch in range(num_epoch):
        print("Resampling...")
        # Resampling
        complete_upstream_train_data = get_full_list(
            config.T_FEVER_TRAIN_JSONL,
            train_upstream_file,
            pred=False,
            top_k=top_k_doc)

        print("Sample Prob.:", keep_neg_sample_prob)
        filtered_train_data = post_filter(complete_upstream_train_data,
                                          keep_prob=keep_neg_sample_prob,
                                          seed=12 + i_epoch)

        # Change the seed to avoid duplicate sample...
        # keep_neg_sample_prob -= sample_prob_decay
        # if keep_neg_sample_prob <= 0:
        #     keep_neg_sample_prob = 0.005
        print("Sampled_length:", len(filtered_train_data))

        sampled_train_instances = train_fever_data_reader.read(
            filtered_train_data)

        train_iter = biterator(sampled_train_instances,
                               shuffle=True,
                               num_epochs=1,
                               cuda_device=device_num)
        for i, batch in tqdm(enumerate(train_iter)):
            model.train()
            out = model(batch)
            y = batch['selection_label']

            loss = criterion(out, y)

            # No decay
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            # Update EMA
            ema(model.named_parameters())
            iteration += 1

            if i_epoch <= 5:
                mod = 8000
            else:
                mod = 8000

            if iteration % mod == 0:
                eval_iter = dev_biterator(dev_instances,
                                          shuffle=False,
                                          num_epochs=1,
                                          cuda_device=device_num)

                load_ema_to_model(cloned_empty_model, ema)

                # complete_upstream_dev_data = hidden_eval(model, eval_iter, complete_upstream_dev_data)

                # Only eval EMA
                complete_upstream_dev_data = hidden_eval(
                    cloned_empty_model, eval_iter, complete_upstream_dev_data)

                dev_results_list = score_converter_v1(
                    config.T_FEVER_DEV_JSONL,
                    complete_upstream_dev_data,
                    sent_retri_top_k=5,
                    sent_retri_scal_prob=0.5)
                # This is only a wrapper for the simi_sampler

                eval_mode = {'check_sent_id_correct': True, 'standard': True}
                for a, b in zip(dev_actual_list, dev_results_list):
                    b['predicted_label'] = a['label']
                strict_score, acc_score, pr, rec, f1 = c_scorer.fever_score(
                    dev_results_list,
                    dev_actual_list,
                    mode=eval_mode,
                    verbose=False)
                tracking_score = strict_score
                print(f"Dev(raw_acc/pr/rec/f1):{acc_score}/{pr}/{rec}/{f1}")
                print("Strict score:", strict_score)
                print(f"Eval Tracking score:", f"{tracking_score}")

                # need_save = False
                # if tracking_score > best_dev:
                #     best_dev = tracking_score
                need_save = True

                if need_save:
                    save_path = os.path.join(
                        file_path_prefix, f'i({iteration})_epoch({i_epoch})_'
                        f'(tra_score:{tracking_score}|raw_acc:{acc_score}|pr:{pr}|rec:{rec}|f1:{f1})_ema'
                    )

                    save_ema_to_file(ema, save_path)
                    # torch.save(model.state_dict(), save_path)

        print("Epoch Evaluation...")
        eval_iter = dev_biterator(dev_instances,
                                  shuffle=False,
                                  num_epochs=1,
                                  cuda_device=device_num)

        load_ema_to_model(cloned_empty_model, ema)
        # complete_upstream_dev_data = hidden_eval(model, eval_iter, complete_upstream_dev_data)
        complete_upstream_dev_data = hidden_eval(cloned_empty_model, eval_iter,
                                                 complete_upstream_dev_data)

        dev_results_list = score_converter_v1(config.T_FEVER_DEV_JSONL,
                                              complete_upstream_dev_data,
                                              sent_retri_top_k=5,
                                              sent_retri_scal_prob=0.5)

        eval_mode = {'check_sent_id_correct': True, 'standard': True}
        for a, b in zip(dev_actual_list, dev_results_list):
            b['predicted_label'] = a['label']
        strict_score, acc_score, pr, rec, f1 = c_scorer.fever_score(
            dev_results_list, dev_actual_list, mode=eval_mode, verbose=False)
        tracking_score = strict_score
        print(f"Dev(raw_acc/pr/rec/f1):{acc_score}/{pr}/{rec}/{f1}")
        print("Strict score:", strict_score)
        print(f"Eval Tracking score:", f"{tracking_score}")

        if tracking_score > best_dev:
            best_dev = tracking_score

        save_path = os.path.join(
            file_path_prefix, f'i({iteration})_epoch({i_epoch})_'
            f'(tra_score:{tracking_score}|raw_acc:{acc_score}|pr:{pr}|rec:{rec}|f1:{f1})_epoch_ema'
        )

        save_ema_to_file(ema, save_path)