Esempio n. 1
0
def main():
    num_of_vocab = NUM_OF_VOCAB

    # load data
    train_file = input_path
    data_list, target_list = load_data_context(data_path=train_file)

    # dev set
    dev_file = input_path
    dev_data_list, dev_target_list = load_data_context(data_path=dev_file)

    # load final test data
    final_test_file = opt.test_path
    final_test_data_list, final_test_target_list = load_data_context(
        data_path=final_test_file)

    # build vocab (load vocab from the larger vocab pool)
    #     word2id, id2word, num_of_vocab = build_vocab([data_list, dev_data_list, final_test_data_list], num_of_vocab,
    #                                                  FILL_VOCAB)

    with open(word2id_path, 'rb') as w:
        word2id = pkl.load(w)
    with open(id2word_path, 'rb') as i:
        id2word = pkl.load(i)
    num_of_vocab = len(word2id)

    emb = build_embedding(id2word, GLOVE_EMB_PATH, num_of_vocab)

    ## we don''t really have multiple test sets
    #     test_data_set = TrainDataSet(dev_data_list, dev_target_list, EMAI_PAD_LEN, SENT_PAD_LEN, word2id, use_unk=True)
    #     test_data_loader = DataLoader(test_data_set, batch_size=BATCH_SIZE, shuffle=False)
    #     print("Size of test data", len(test_data_set))
    # ex_id2word, unk_words_idx = test_data_set.get_ex_id2word_unk_words()

    # convert to TestData class
    # then use Dataloader from torch.utils.data to create batches
    final_test_data_set = TestDataSet(final_test_data_list,
                                      EMAI_PAD_LEN,
                                      SENT_PAD_LEN,
                                      word2id,
                                      id2word,
                                      use_unk=False)
    final_test_data_loader = DataLoader(final_test_data_set,
                                        batch_size=BATCH_SIZE,
                                        shuffle=False)
    print("Size of final test data", len(final_test_data_set))

    X = data_list
    y = target_list
    y = np.array(y)

    combined = list(zip(X, y))
    random.shuffle(combined)
    X[:], y[:] = zip(*combined)

    # train dev split
    from sklearn.model_selection import StratifiedKFold
    skf = StratifiedKFold(n_splits=NUM_OF_FOLD, random_state=0)

    real_test_results = []

    # Train one fold at a time (for cross validation)

    def one_fold(num_fold, train_index, dev_index):
        print("Training on fold:", num_fold)
        X_train, X_dev = [X[i] for i in train_index], [X[i] for i in dev_index]
        y_train, y_dev = y[train_index], y[dev_index]

        # construct data loader
        # for one fold, test data comes from k fold split.
        train_data_set = TrainDataSet(X_train,
                                      y_train,
                                      EMAI_PAD_LEN,
                                      SENT_PAD_LEN,
                                      word2id,
                                      use_unk=True)

        dev_data_set = TrainDataSet(X_dev,
                                    y_dev,
                                    EMAI_PAD_LEN,
                                    SENT_PAD_LEN,
                                    word2id,
                                    use_unk=True)
        dev_data_loader = DataLoader(dev_data_set,
                                     batch_size=BATCH_SIZE,
                                     shuffle=False)
        # device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

        final_pred_best = None

        # This is to prevent model diverge, once happen, retrain
        while True:
            is_diverged = False
            # Model is defined in HierarchicalPredictor
            model = HierarchicalAttPredictor(SENT_EMB_DIM,
                                             SENT_HIDDEN_SIZE,
                                             CTX_LSTM_DIM,
                                             num_of_vocab,
                                             SENT_PAD_LEN,
                                             id2word,
                                             USE_ELMO=True,
                                             ADD_LINEAR=False)
            model.load_embedding(emb)
            model.deepmoji_model.load_specific_weights(
                PRETRAINED_PATH, exclude_names=['output_layer'])
            model.cuda()

            # model = nn.DataParallel(model)
            # model.to(device)

            optimizer = optim.Adam(model.parameters(),
                                   lr=learning_rate,
                                   amsgrad=True)  #
            # optimizer = optim.SGD(model.parameters(), lr=learning_rate)
            scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer,
                                                               gamma=GAMMA)

            # loss_criterion_binary = nn.CrossEntropyLoss(weight=weight_list_binary)  #
            if opt.loss == 'focal':
                loss_criterion = FocalLoss(gamma=opt.focal)

            elif opt.loss == 'ce':
                loss_criterion = nn.BCELoss()

            es = EarlyStopping(patience=EARLY_STOP_PATIENCE)
            final_pred_list_test = None

            result_print = {}

            for num_epoch in range(MAX_EPOCH):

                # to ensure shuffle at ever epoch
                train_data_loader = DataLoader(train_data_set,
                                               batch_size=BATCH_SIZE,
                                               shuffle=True)

                print('Begin training epoch:', num_epoch, end='...\t')
                sys.stdout.flush()

                # stepping scheduler
                scheduler.step(num_epoch)
                print('Current learning rate', scheduler.get_lr())

                ## Training step
                train_loss = 0
                model.train()

                for i, (a, a_len, emoji_a, e_c) \
                        in tqdm(enumerate(train_data_loader), total=len(train_data_set)/BATCH_SIZE):

                    optimizer.zero_grad()
                    e_c = e_c.type(torch.float)
                    pred = model(a.cuda(), a_len, emoji_a.cuda())
                    loss_label = loss_criterion(pred.squeeze(1),
                                                e_c.view(-1).cuda()).cuda()

                    # training trilogy
                    loss_label.backward()
                    torch.nn.utils.clip_grad_norm_(model.parameters(), CLIP)
                    optimizer.step()

                    train_loss += loss_label.data.cpu().numpy() * a.shape[0]
                    del pred, loss_label

                ## Evaluatation step
                model.eval()
                dev_loss = 0
                # pred_list = []
                for i, (a, a_len, emoji_a, e_c) in enumerate(dev_data_loader):

                    with torch.no_grad():
                        e_c = e_c.type(torch.float)
                        pred = model(a.cuda(), a_len, emoji_a.cuda())

                        loss_label = loss_criterion(
                            pred.squeeze(1),
                            e_c.view(-1).cuda()).cuda()

                        dev_loss += loss_label.data.cpu().numpy() * a.shape[0]

                        # pred_list.append(pred.data.cpu().numpy())
                        # gold_list.append(e_c.numpy())
                        del pred, loss_label

                print('Training loss:',
                      train_loss / len(train_data_set),
                      end='\t')
                print('Dev loss:', dev_loss / len(dev_data_set))

                # print(classification_report(gold_list, pred_list, target_names=EMOS))
                # get_metrics(pred_list, gold_list)

                # Gold Test testing
                print('Final test testing...')
                final_pred_list_test = []
                model.eval()

                for i, (a, a_len,
                        emoji_a) in enumerate(final_test_data_loader):

                    with torch.no_grad():

                        pred = model(a.cuda(), a_len, emoji_a.cuda())

                        final_pred_list_test.append(pred.data.cpu().numpy())
                    del a, pred
                print("final_pred_list_test", len(final_pred_list_test))
                final_pred_list_test = np.concatenate(final_pred_list_test,
                                                      axis=0)
                final_pred_list_test = np.squeeze(final_pred_list_test, axis=1)
                print("final_pred_list_test_concat", len(final_pred_list_test))

                accuracy, precision, recall, f1 = get_metrics(
                    np.asarray(final_test_target_list),
                    np.asarray(final_pred_list_test))

                result_print.update(
                    {num_epoch: [accuracy, precision, recall, f1]})

                if dev_loss / len(dev_data_set) > 1.3 and num_epoch > 4:
                    print("Model diverged, retry")
                    is_diverged = True
                    break

                if es.step(dev_loss):  # overfitting
                    print('overfitting, loading best model ...')
                    break
                else:
                    if es.is_best():
                        print('saving best model ...')
                        if final_pred_best is not None:
                            del final_pred_best
                        final_pred_best = deepcopy(final_pred_list_test)

                    else:
                        print('not best model, ignoring ...')
                        if final_pred_best is None:
                            final_pred_best = deepcopy(final_pred_list_test)

            with open(result_path, 'wb') as w:
                pkl.dump(result_print, w)

            if is_diverged:
                print("Reinitialize model ...")
                del model

                continue

            real_test_results.append(np.asarray(final_pred_best))
            # saving model for inference
            torch.save(model.state_dict(), opt.out_path)
            del model
            break

    if torch.cuda.device_count() > 1:
        print("Let's use", torch.cuda.device_count(), "GPUs!")

    # Training the folds
    for idx, (_train_index, _dev_index) in enumerate(skf.split(X, y)):
        print('Train size:', len(_train_index), 'Dev size:', len(_dev_index))
        one_fold(idx, _train_index, _dev_index)


#     # Function of majority voting
#     # Need this to vote across different folds
#     def find_majority(k):
#         myMap = {}
#         maximum = ('', 0)  # (occurring element, occurrences)
#         for n in k:
#             if n in myMap:
#                 myMap[n] += 1
#             else:
#                 myMap[n] = 1

#             # Keep track of maximum on the go
#             if myMap[n] > maximum[1]: maximum = (n, myMap[n])

#         return maximum

    real_test_results = np.asarray(real_test_results)

    # since we only have 1 value per row per fold, just average across folds for the final value
    mj = np.mean(real_test_results, axis=0)
    #     for col_num in range(real_test_results.shape[1]):
    #         a_mj = find_majority(real_test_results[:, col_num])
    #         mj.append(a_mj[0])

    ### This is loading final test results to get metric
    print('Gold TESTING RESULTS')
    get_metrics(np.asarray(final_test_target_list), np.asarray(mj))
Esempio n. 2
0
    def one_fold(num_fold, train_index, dev_index):
        print("Training on fold:", num_fold)
        X_train, X_dev = [X[i] for i in train_index], [X[i] for i in dev_index]
        y_train, y_dev = y[train_index], y[dev_index]

        # construct data loader
        # for one fold, test data comes from k fold split.
        train_data_set = TrainDataSet(X_train,
                                      y_train,
                                      EMAI_PAD_LEN,
                                      SENT_PAD_LEN,
                                      word2id,
                                      use_unk=True)

        dev_data_set = TrainDataSet(X_dev,
                                    y_dev,
                                    EMAI_PAD_LEN,
                                    SENT_PAD_LEN,
                                    word2id,
                                    use_unk=True)
        dev_data_loader = DataLoader(dev_data_set,
                                     batch_size=BATCH_SIZE,
                                     shuffle=False)
        # device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

        final_pred_best = None

        # This is to prevent model diverge, once happen, retrain
        while True:
            is_diverged = False
            # Model is defined in HierarchicalPredictor
            model = HierarchicalAttPredictor(SENT_EMB_DIM,
                                             SENT_HIDDEN_SIZE,
                                             CTX_LSTM_DIM,
                                             num_of_vocab,
                                             SENT_PAD_LEN,
                                             id2word,
                                             USE_ELMO=True,
                                             ADD_LINEAR=False)
            model.load_embedding(emb)
            model.deepmoji_model.load_specific_weights(
                PRETRAINED_PATH, exclude_names=['output_layer'])
            model.cuda()

            # model = nn.DataParallel(model)
            # model.to(device)

            optimizer = optim.Adam(model.parameters(),
                                   lr=learning_rate,
                                   amsgrad=True)  #
            # optimizer = optim.SGD(model.parameters(), lr=learning_rate)
            scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer,
                                                               gamma=GAMMA)

            # loss_criterion_binary = nn.CrossEntropyLoss(weight=weight_list_binary)  #
            if opt.loss == 'focal':
                loss_criterion = FocalLoss(gamma=opt.focal)

            elif opt.loss == 'ce':
                loss_criterion = nn.BCELoss()

            es = EarlyStopping(patience=EARLY_STOP_PATIENCE)
            final_pred_list_test = None

            result_print = {}

            for num_epoch in range(MAX_EPOCH):

                # to ensure shuffle at ever epoch
                train_data_loader = DataLoader(train_data_set,
                                               batch_size=BATCH_SIZE,
                                               shuffle=True)

                print('Begin training epoch:', num_epoch, end='...\t')
                sys.stdout.flush()

                # stepping scheduler
                scheduler.step(num_epoch)
                print('Current learning rate', scheduler.get_lr())

                ## Training step
                train_loss = 0
                model.train()

                for i, (a, a_len, emoji_a, e_c) \
                        in tqdm(enumerate(train_data_loader), total=len(train_data_set)/BATCH_SIZE):

                    optimizer.zero_grad()
                    e_c = e_c.type(torch.float)
                    pred = model(a.cuda(), a_len, emoji_a.cuda())
                    loss_label = loss_criterion(pred.squeeze(1),
                                                e_c.view(-1).cuda()).cuda()

                    # training trilogy
                    loss_label.backward()
                    torch.nn.utils.clip_grad_norm_(model.parameters(), CLIP)
                    optimizer.step()

                    train_loss += loss_label.data.cpu().numpy() * a.shape[0]
                    del pred, loss_label

                ## Evaluatation step
                model.eval()
                dev_loss = 0
                # pred_list = []
                for i, (a, a_len, emoji_a, e_c) in enumerate(dev_data_loader):

                    with torch.no_grad():
                        e_c = e_c.type(torch.float)
                        pred = model(a.cuda(), a_len, emoji_a.cuda())

                        loss_label = loss_criterion(
                            pred.squeeze(1),
                            e_c.view(-1).cuda()).cuda()

                        dev_loss += loss_label.data.cpu().numpy() * a.shape[0]

                        # pred_list.append(pred.data.cpu().numpy())
                        # gold_list.append(e_c.numpy())
                        del pred, loss_label

                print('Training loss:',
                      train_loss / len(train_data_set),
                      end='\t')
                print('Dev loss:', dev_loss / len(dev_data_set))

                # print(classification_report(gold_list, pred_list, target_names=EMOS))
                # get_metrics(pred_list, gold_list)

                # Gold Test testing
                print('Final test testing...')
                final_pred_list_test = []
                model.eval()

                for i, (a, a_len,
                        emoji_a) in enumerate(final_test_data_loader):

                    with torch.no_grad():

                        pred = model(a.cuda(), a_len, emoji_a.cuda())

                        final_pred_list_test.append(pred.data.cpu().numpy())
                    del a, pred
                print("final_pred_list_test", len(final_pred_list_test))
                final_pred_list_test = np.concatenate(final_pred_list_test,
                                                      axis=0)
                final_pred_list_test = np.squeeze(final_pred_list_test, axis=1)
                print("final_pred_list_test_concat", len(final_pred_list_test))

                accuracy, precision, recall, f1 = get_metrics(
                    np.asarray(final_test_target_list),
                    np.asarray(final_pred_list_test))

                result_print.update(
                    {num_epoch: [accuracy, precision, recall, f1]})

                if dev_loss / len(dev_data_set) > 1.3 and num_epoch > 4:
                    print("Model diverged, retry")
                    is_diverged = True
                    break

                if es.step(dev_loss):  # overfitting
                    print('overfitting, loading best model ...')
                    break
                else:
                    if es.is_best():
                        print('saving best model ...')
                        if final_pred_best is not None:
                            del final_pred_best
                        final_pred_best = deepcopy(final_pred_list_test)

                    else:
                        print('not best model, ignoring ...')
                        if final_pred_best is None:
                            final_pred_best = deepcopy(final_pred_list_test)

            with open(result_path, 'wb') as w:
                pkl.dump(result_print, w)

            if is_diverged:
                print("Reinitialize model ...")
                del model

                continue

            real_test_results.append(np.asarray(final_pred_best))
            # saving model for inference
            torch.save(model.state_dict(), opt.out_path)
            del model
            break
Esempio n. 3
0
def main():

    ##########  Set Assumptions ############
    ##########  Set Assumptions ############
    NUM_OF_FOLD = opt.folds
    MAX_EPOCH = opt.epoch
    input_path = opt.input_path
    CONTINUE = opt.cont

    EMAI_PAD_LEN = config['train']['EMAI_PAD_LEN']
    EMOJ_SENT_PAD_LEN = config['train']['EMOJ_SENT_PAD_LEN']
    SENT_PAD_LEN = config['train']['SENT_PAD_LEN']
    SENT_EMB_DIM = config['model']['SENT_EMB_DIM']
    learning_rate = config['train']['learning_rate']
    FILL_VOCAB = config['train']['FILL_VOCAB']
    BATCH_SIZE = config['train']['BATCH_SIZE']

    SENT_HIDDEN_SIZE = config['model']['SENT_HIDDEN_SIZE']
    CTX_LSTM_DIM = config['model']['CTX_LSTM_DIM']

    CLIP = config['train']['CLIP']
    EARLY_STOP_PATIENCE = config['train']['EARLY_STOP_PATIENCE']
    LAMBDA1 = config['train']['LAMBDA1']
    LAMBDA2 = config['train']['LAMBDA2']
    FLAT = config['train']['FLAT']
    GAMMA = config['train']['GAMMA']
    loss = config['train']['loss']
    # fix random seeds to ensure replicability
    RANDOM_SEED = config['train']['RANDOM_SEED']

    # set to one fold

    GLOVE_EMB_PATH = config['emb']['glove_path']
    bert_vocab_path = config['emb']['bert_vocab_path']

    torch.manual_seed(RANDOM_SEED)
    torch.cuda.manual_seed(RANDOM_SEED)
    torch.cuda.manual_seed_all(RANDOM_SEED)
    np.random.seed(RANDOM_SEED)
    random.seed(RANDOM_SEED)

    preprocessor = EnglishPreProcessor()
    tokenizer = BertTokenizer(vocab_file=bert_vocab_path, do_lower_case=True)

    print('Tokenizing using dictionary from {}'.format(VOCAB_PATH))
    with open(VOCAB_PATH, 'r') as f:
        vocabulary = json.load(f)
    emoji_st = SentenceTokenizer(vocabulary, EMOJ_SENT_PAD_LEN)

    result_path = config['output'][
        'result']  #place to store metrics of final test data
    result_path_ind = config['output'][
        'result_ind']  #place predicted score by individual output

    # use pre-built vocab that contains both email and wiki data
    word2id_path = config['infer']['word2id']
    id2word_path = config['infer']['id2word']

    with open(word2id_path, 'rb') as w:
        word2id = pkl.load(w)
    with open(id2word_path, 'rb') as i:
        id2word = pkl.load(i)
    num_of_vocab = len(word2id)

    emb = create_data.build_embedding(id2word, GLOVE_EMB_PATH, num_of_vocab)

    # load data
    train_file = input_path
    data_list, target_list = create_data.load_data_context(
        data_path=train_file)

    # load final test data
    final_test_file = opt.test_path
    final_test_data_list, final_test_target_list = create_data.load_data_context(
        data_path=final_test_file)

    # convert to TestData class
    # then use Dataloader from torch.utils.data to create batches
    final_test_data_set = create_data.TestDataSet(final_test_data_list,
                                                  EMAI_PAD_LEN,
                                                  SENT_PAD_LEN,
                                                  word2id,
                                                  id2word,
                                                  emoji_st,
                                                  use_unk=True)
    final_test_data_loader = create_data.DataLoader(final_test_data_set,
                                                    batch_size=BATCH_SIZE,
                                                    shuffle=True)
    print("Size of final test data", len(final_test_data_set))

    X = data_list
    y = target_list
    y = np.array(y)

    combined = list(zip(X, y))
    random.shuffle(combined)
    X[:], y[:] = zip(*combined)

    # train dev split
    from sklearn.model_selection import StratifiedKFold
    skf = StratifiedKFold(n_splits=NUM_OF_FOLD, random_state=0)

    # for this version, remove multiple folds

    real_test_results = []

    # Train one fold at a time (for cross validation)

    def one_fold(num_fold, train_index, dev_index):
        print("Training on fold:", num_fold)
        X_train, X_dev = [X[i] for i in train_index], [X[i] for i in dev_index]
        y_train, y_dev = y[train_index], y[dev_index]

        # construct data loader
        # for one fold, test data comes from k fold split.
        train_data_set = create_data.TrainDataSet(X_train,
                                                  y_train,
                                                  EMAI_PAD_LEN,
                                                  SENT_PAD_LEN,
                                                  word2id,
                                                  emoji_st,
                                                  use_unk=True)

        dev_data_set = create_data.TrainDataSet(X_dev,
                                                y_dev,
                                                EMAI_PAD_LEN,
                                                SENT_PAD_LEN,
                                                word2id,
                                                emoji_st,
                                                use_unk=True)
        dev_data_loader = DataLoader(dev_data_set,
                                     batch_size=BATCH_SIZE,
                                     shuffle=False)
        # device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

        final_pred_best = None

        # This is to prevent model diverge, once happen, retrain
        while True:

            is_diverged = False
            # Model is defined in HierarchicalPredictor

            if CONTINUE:
                model = torch.load(opt.out_path)
            else:
                model = HierarchicalAttPredictor(SENT_EMB_DIM,
                                                 SENT_HIDDEN_SIZE,
                                                 CTX_LSTM_DIM,
                                                 num_of_vocab,
                                                 SENT_PAD_LEN,
                                                 id2word,
                                                 USE_ELMO=True,
                                                 ADD_LINEAR=False)
                model.load_embedding(emb)
                model.deepmoji_model.load_specific_weights(
                    PRETRAINED_PATH, exclude_names=['output_layer'])

            model.cuda()
            optimizer = optim.Adam(model.parameters(),
                                   lr=learning_rate,
                                   amsgrad=True)
            scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer,
                                                               gamma=GAMMA)

            # loss_criterion_binary = nn.CrossEntropyLoss(weight=weight_list_binary)  #
            if loss == 'focal':
                loss_criterion = FocalLoss(gamma=opt.focal)

            elif loss == 'ce':
                loss_criterion = nn.BCELoss()

            es = EarlyStopping(patience=EARLY_STOP_PATIENCE)
            final_pred_list_test = None

            result_print = {}

            for num_epoch in range(MAX_EPOCH):

                # to ensure shuffle at ever epoch
                train_data_loader = DataLoader(train_data_set,
                                               batch_size=BATCH_SIZE,
                                               shuffle=True)

                print('Begin training epoch:', num_epoch, end='...\t')
                sys.stdout.flush()

                # stepping scheduler
                scheduler.step(num_epoch)
                print('Current learning rate', scheduler.get_lr())

                ## Training step
                train_loss = 0
                model.train()

                for i, (a, a_len, emoji_a, e_c) \
                        in tqdm(enumerate(train_data_loader), total=len(train_data_set)/BATCH_SIZE):

                    optimizer.zero_grad()
                    e_c = e_c.type(torch.float)
                    pred = model(a.cuda(), a_len, emoji_a.cuda())
                    loss_label = loss_criterion(pred.squeeze(1),
                                                e_c.view(-1).cuda()).cuda()

                    # training trilogy
                    loss_label.backward()
                    torch.nn.utils.clip_grad_norm_(model.parameters(), CLIP)
                    optimizer.step()

                    train_loss += loss_label.data.cpu().numpy() * a.shape[0]
                    del pred, loss_label

                ## Evaluatation step
                model.eval()
                dev_loss = 0
                # pred_list = []
                for i, (a, a_len, emoji_a, e_c) in enumerate(dev_data_loader):

                    with torch.no_grad():
                        e_c = e_c.type(torch.float)
                        pred = model(a.cuda(), a_len, emoji_a.cuda())

                        loss_label = loss_criterion(
                            pred.squeeze(1),
                            e_c.view(-1).cuda()).cuda()

                        dev_loss += loss_label.data.cpu().numpy() * a.shape[0]

                        # pred_list.append(pred.data.cpu().numpy())
                        # gold_list.append(e_c.numpy())
                        del pred, loss_label

                print('Training loss:',
                      train_loss / len(train_data_set),
                      end='\t')
                print('Dev loss:', dev_loss / len(dev_data_set))

                # print(classification_report(gold_list, pred_list, target_names=EMOS))
                # get_metrics(pred_list, gold_list)

                # Gold Test testing
                print('Final test testing...')
                final_pred_list_test = []
                model.eval()

                for i, (a, a_len,
                        emoji_a) in enumerate(final_test_data_loader):

                    with torch.no_grad():

                        pred = model(a.cuda(), a_len, emoji_a.cuda())

                        final_pred_list_test.append(pred.data.cpu().numpy())
                    del a, pred
                print("final_pred_list_test", len(final_pred_list_test))
                final_pred_list_test = np.concatenate(final_pred_list_test,
                                                      axis=0)
                final_pred_list_test = np.squeeze(final_pred_list_test, axis=1)
                print("final_pred_list_test_concat", len(final_pred_list_test))

                accuracy, precision, recall, f1 = get_metrics(
                    np.asarray(final_test_target_list),
                    np.asarray(final_pred_list_test))

                result_print.update(
                    {num_epoch: [accuracy, precision, recall, f1]})

                if dev_loss / len(dev_data_set) > 1.3 and num_epoch > 4:
                    print("Model diverged, retry")
                    is_diverged = True
                    break

                if es.step(dev_loss):  # overfitting
                    print('overfitting, loading best model ...')
                    break
                else:
                    if es.is_best():
                        print('saving best model ...')
                        if final_pred_best is not None:
                            del final_pred_best
                        final_pred_best = deepcopy(final_pred_list_test)
                        # saving model for inference
                        output_path_ep = opt.out_path[:-4] + '_' + str(
                            num_epoch) + '.pth'
                        torch.save(model, output_path_ep)

                    else:
                        print('not best model, ignoring ...')
                        if final_pred_best is None:
                            final_pred_best = deepcopy(final_pred_list_test)

            with open(result_path, 'wb') as w:
                pkl.dump(result_print, w)

            if is_diverged:
                print("Reinitialize model ...")
                del model

                continue

            real_test_results.append(np.asarray(final_pred_best))

            del model
            break

    if torch.cuda.device_count() > 1:
        print("Let's use", torch.cuda.device_count(), "GPUs!")

    # Training the folds
    for idx, (_train_index, _dev_index) in enumerate(skf.split(X, y)):
        if idx != 0:
            break
        print('Train size:', len(_train_index), 'Dev size:', len(_dev_index))
        one_fold(idx, _train_index, _dev_index)

    real_test_results = np.asarray(real_test_results)

    # since we only have 1 value per row per fold, just average across folds for the final value
    mj = np.mean(real_test_results, axis=0)

    ### This is loading final test results to get metric
    print('Gold TESTING RESULTS')
    print(np.asarray(final_test_target_list).shape)
    print(np.asarray(mj).shape)
    get_metrics(np.asarray(final_test_target_list), np.asarray(mj))
    with open(result_path_ind, 'wb') as w:
        pkl.dump(real_test_results, w)