コード例 #1
0
def train_net(model, loss, config, inputs, labels, batch_size, disp_freq,
              current_iter_count):

    iter_counter = 0
    loss_value = 0
    loss_values = []

    for input, label in data_iterator(inputs, labels, batch_size):
        target = onehot_encoding(label, 10)
        iter_counter += 1

        # forward net
        output = model.forward(input)
        # calculate loss
        loss_value += loss.forward(output, target)
        # generate gradient w.r.t loss
        grad = loss.backward(output, target)
        # backward gradient

        model.backward(grad)
        # update layers' weights
        model.update(config)

        if iter_counter % disp_freq == 0:
            loss_values.append({
                'iter': current_iter_count + iter_counter,
                'loss': loss_value / disp_freq
            })
            loss_value = 0
    current_iter_count += iter_counter
    return current_iter_count, loss_values
コード例 #2
0
ファイル: solve_net.py プロジェクト: ivanium/ann-hw1
def train_net(model, loss, config, inputs, labels, batch_size, disp_freq):

    iter_counter = 0
    loss_list = []
    acc_list = []

    for input, label in data_iterator(inputs, labels, batch_size):
        target = onehot_encoding(label, 10)
        iter_counter += 1

        # forward net
        output = model.forward(input)
        # calculate loss
        loss_value = loss.forward(output, target)
        # generate gradient w.r.t loss
        grad = loss.backward(output, target)
        # backward gradient

        model.backward(grad)
        # update layers' weights
        model.update(config)

        acc_value = calculate_acc(output, label)
        loss_list.append(loss_value)
        acc_list.append(acc_value)

        if iter_counter % disp_freq == 0:
            msg = '  Training iter %d, batch loss %.4f, batch acc %.4f' % (iter_counter, np.mean(loss_list), np.mean(acc_list))
            loss_list = []
            acc_list = []
            LOG_INFO(msg)
コード例 #3
0
ファイル: solve_net.py プロジェクト: Funaizhang/THUAC
def test_net(model, loss, inputs, labels, batch_size, epoch, layer_name):
    loss_list = []
    acc_list = []

    for input, label in data_iterator(inputs,
                                      labels,
                                      batch_size,
                                      shuffle=False):
        target = onehot_encoding(label, 10)
        output, output_visualize = model.forward(input,
                                                 visualize=True,
                                                 layer_name=layer_name)
        # collapse output_visualize into 1 channel
        output_visualize = np.sum(output_visualize, axis=(1))

        loss_value = loss.forward(output, target)
        acc_value = calculate_acc(output, label)
        loss_list.append(loss_value)
        acc_list.append(acc_value)

    msg = '    Testing, total mean loss %.5f, total acc %.5f' % (
        np.mean(loss_list), np.mean(acc_list))
    LOG_INFO(msg)

    # save weights and biases
    model.save_weights(loss.name, epoch)

    return np.mean(loss_list), np.mean(
        acc_list
    ), output_visualize  # output_visualize: batch_size x height x width
コード例 #4
0
def train_net(model, loss, config, inputs, labels, batch_size, disp_freq):

    iter_counter = 0
    loss_list = []
    acc_list = []

    for input, label in data_iterator(inputs, labels, batch_size):
        target = onehot_encoding(label, 10)
        iter_counter += 1

        # forward net
        output = model.forward(input)
        # calculate loss
        loss_value = loss.forward(output, target)
        # generate gradient w.r.t loss
        grad = loss.backward(output, target)
        # backward gradient
        model.backward(grad)
        # update layers' weights
        model.update(config)

        acc_value = calculate_acc(output, label)
        loss_list.append(loss_value)
        acc_list.append(acc_value)

        if iter_counter % disp_freq == 0:
            msg = '  Training iter %d, batch loss %.4f, batch acc %.4f' % (
                iter_counter, np.mean(loss_list), np.mean(acc_list))
            loss_list = []
            acc_list = []
            LOG_INFO(msg)
コード例 #5
0
ファイル: solve_net.py プロジェクト: uclasystem/dorylus
def test_net(model, loss, input_feats, labels, test_mask, label_kind):
    target = onehot_encoding(labels, label_kind)
    output = model.forward(input_feats)

    # set mask
    output[~test_mask] = target[~test_mask]
    loss_value = loss.forward(output, target)

    acc_value = calculate_acc(output, labels, np.sum(test_mask))

    msg = '    Testing, total mean loss %.5f, total acc %.5f' % (loss_value,
                                                                 acc_value)
    LOG_INFO(msg)
コード例 #6
0
def train_net(model, loss, config, inputs, labels, batch_size, disp_freq, Loss,
              Acur):

    iter_counter = 0
    loss_list = []
    acc_list = []
    ll = []
    ac = []

    for input, label in data_iterator(inputs, labels, batch_size):
        target = onehot_encoding(label, 10)
        iter_counter += 1

        # forward net
        output = model.forward(input)
        # calculate loss
        loss_value = loss.forward(output, target)
        # generate gradient w.r.t loss
        grad = loss.backward(output, target)
        # backward gradient
        model.backward(grad)

        if loss_value > 1:
            config['learning_rate'] = 0.2
        elif loss_value > 0.5:
            config['learning_rate'] = 0.1
        elif loss_value > 0.2:
            config['learning_rate'] = 0.05
        else:
            config['learning_rate'] = max(loss_value / 5.0, 0.005)

        # update layers' weights
        model.update(config)

        acc_value = calculate_acc(output, label)
        loss_list.append(loss_value)
        acc_list.append(acc_value)
        ll.append(loss_value)
        ac.append(acc_value)

        if iter_counter % disp_freq == 0:
            msg = '  Training iter %d, batch loss %.4f, batch acc %.4f' % (
                iter_counter, np.mean(loss_list), np.mean(acc_list))
            Loss.append(np.mean(loss_list))
            Acur.append(np.mean(acc_list))
            loss_list = []
            acc_list = []
            LOG_INFO(msg)

    Loss.append(np.mean(ll))
    Acur.append(np.mean(ac))
コード例 #7
0
ファイル: solve_net.py プロジェクト: wmhst7/UndergradProjects
def test_net(model, loss, inputs, labels, batch_size):
    loss_list = []
    acc_list = []

    for input, label in data_iterator(inputs, labels, batch_size, shuffle=False):
        target = onehot_encoding(label, 10)
        output = model.forward(input)
        loss_value = loss.forward(output, target)
        acc_value = calculate_acc(output, label)
        loss_list.append(loss_value)
        acc_list.append(acc_value)

    msg = '    Testing, total mean loss %.5f, total acc %.5f' % (np.mean(loss_list), np.mean(acc_list))
    LOG_INFO(msg)
コード例 #8
0
ファイル: solve_net.py プロジェクト: ivanium/ann-hw1
def test_net(model, loss, inputs, labels, batch_size):
    loss_list = []
    acc_list = []

    for input, label in data_iterator(inputs, labels, batch_size, shuffle=False):
        target = onehot_encoding(label, 10)
        output = model.forward(input)
        loss_value = loss.forward(output, target)
        acc_value = calculate_acc(output, label)
        loss_list.append(loss_value)
        acc_list.append(acc_value)

    msg = '    Testing, total mean loss %.5f, total acc %.5f' % (np.mean(loss_list), np.mean(acc_list))
    LOG_INFO(msg)
コード例 #9
0
def train_net(model, loss, config, inputs, labels, batch_size, disp_freq,
              test_inputs, test_labels):

    iter_counter = 0
    train_loss_list, train_acc_list = [], []
    test_loss_list, test_acc_list = [], []
    # loss_list, acc_list = [], []

    for input, label in data_iterator(inputs, labels, batch_size):
        # train_loss_value, train_acc_value = test_net(model, loss, input, labels, 10000000)
        # train_loss_list.append(train_loss_value)
        # train_acc_list.append(train_acc_value)

        test_loss_value, test_acc_value = test_net(model, loss, test_inputs,
                                                   test_labels, 10000000)
        test_loss_list.append(test_loss_value)
        test_acc_list.append(test_acc_value)

        target = onehot_encoding(label, 10)
        iter_counter += 1

        # forward net
        output = model.forward(input)
        # calculate loss
        loss_value = loss.forward(output, target)
        # generate gradient w.r.t loss
        grad = loss.backward(output, target)
        # backward gradient

        model.backward(grad)
        # update layers' weights
        model.update(config)

        acc_value = calculate_acc(output, label)
        # loss_list.append(loss_value)
        # acc_list.append(acc_value)

        train_loss_list.append(loss_value)
        train_acc_list.append(acc_value)

        # if iter_counter % disp_freq == 0:
        #     msg = '  Training iter %d, batch loss %.4f, batch acc %.4f' % (iter_counter, np.mean(loss_list), np.mean(acc_list))
        #     loss_list = []
        #     acc_list = []
        #     LOG_INFO(msg)

    return train_loss_list, train_acc_list, test_loss_list, test_acc_list
コード例 #10
0
def test_net(model, loss, inputs, labels, batch_size):
    loss_list = []
    acc_list = []

    # test model with all the test data
    for input, label in data_iterator(inputs, labels, batch_size, shuffle=False):
        # get the expected value of this batch of input
        target = onehot_encoding(label, 10)
        output = model.forward(input)
        # calculate loss of this batch
        loss_value = loss.forward(output, target)
        acc_value = calculate_acc(output, label)
        loss_list.append(loss_value)
        acc_list.append(acc_value)

    # use the mean of all batch's loss and accuracy as the final result
    msg = '    Testing, total mean loss %.5f, total acc %.5f' % (np.mean(loss_list), np.mean(acc_list))
    LOG_INFO(msg)
コード例 #11
0
ファイル: solve_net.py プロジェクト: AmadeusChan/MlpOnMNIST
def train_net(model, loss, config, inputs, labels, batch_size, disp_freq, loss_file):

    iter_counter = 0
    loss_list = []
    acc_list = []

    for input, label in data_iterator(inputs, labels, batch_size):
        target = onehot_encoding(label, 10)
        iter_counter += 1

	# print "Debug: ", "input=", input.shape, " target=", target.shape

        # forward net
        output = model.forward(input)
        # calculate loss
        loss_value = loss.forward(output, target) 
        # generate gradient w.r.t loss
        grad = loss.backward(output, target)
        # backward gradient

        model.backward(grad)
        # update layers' weights
        model.update(config)

        acc_value = calculate_acc(output, label)
        loss_list.append(loss_value)
        acc_list.append(acc_value)

	'''
	outf = file(loss_file, "a")
	outf.write(str(loss_value) + ' ' + str(acc_value) + '\n')
	outf.close()
	'''

        if iter_counter % disp_freq == 0:
            msg = '  Training iter %d, batch loss %.4f, batch acc %.4f' % (iter_counter, np.mean(loss_list), np.mean(acc_list))

	    outf = file(loss_file, "a")
	    outf.write(str(np.mean(loss_list)) + ' ' + str(np.mean(acc_list)) + '\n')
	    outf.close()

            loss_list = []
            acc_list = []
            LOG_INFO(msg)
コード例 #12
0
def train_net(model, loss, config, inputs, labels, batch_size, disp_freq, Loss, Acur):

    iter_counter = 0
    loss_list = []
    acc_list = []
    ll = []
    ac = []

    # train model with
    for input, label in data_iterator(inputs, labels, batch_size):
        target = onehot_encoding(label, 10)
        iter_counter += 1

        # forward net
        output = model.forward(input)
        # calculate loss value of the whole batch
        loss_value = loss.forward(output, target)
        # generate gradient w.r.t loss, this is actually the local gradient contribution of the output layer
        grad = loss.backward(output, target)
        # backward gradient

        model.backward(grad)

        # update layers' weights: recount after the whole backward procedure
        model.update(config)

        acc_value = calculate_acc(output, label)
        loss_list.append(loss_value)
        ll.append(loss_value)
        acc_list.append(acc_value)
        ac.append(acc_value)

        if iter_counter % disp_freq == 0:
            msg = '  Training iter %d, batch loss %.4f, batch acc %.4f' % (iter_counter, np.mean(loss_list), np.mean(acc_list))
            loss_list = []
            acc_list = []
            LOG_INFO(msg)
    Loss.append(np.mean(ll))
    Acur.append(np.mean(ac))
コード例 #13
0
ファイル: solve_net.py プロジェクト: uclasystem/dorylus
def train_net(model, loss, config, input_feats, labels, train_mask,
              label_kind):
    target = onehot_encoding(labels, label_kind)

    # forward net
    output = model.forward(input_feats)
    # set mask
    output[~train_mask] = target[~train_mask]
    # calculate loss
    loss_value = loss.forward(output, target)
    # generate gradient w.r.t loss
    grad = loss.backward(output, target)
    # backward gradient
    model.backward(grad)
    # update layers' weights
    model.update(config)

    acc_value = calculate_acc(output, labels, np.sum(train_mask))

    msg = '  Training batch loss %.4f, batch acc %.4f' % (loss_value,
                                                          acc_value)
    LOG_INFO(msg)
コード例 #14
0
def test_net(model, loss, inputs, labels, batch_size, info):
    # validation info = [validation, index, learning_rate, momentum, ...]
    # test info = [test]
    loss_list = []
    acc_list = []

    for input, label in data_iterator(inputs,
                                      labels,
                                      batch_size,
                                      shuffle=False):
        target = onehot_encoding(label, 10)
        output = model.forward(input)
        loss_value = loss.forward(output, target)
        acc_value = np.sum(np.argmax(output, axis=1) == label)
        loss_list.append(loss_value / 40)
        acc_list.append(acc_value)

    #msg = '    Testing, total mean loss %.5f, total acc %.5f' % (np.mean(loss_list), np.mean(acc_list))
    #LOG_INFO(msg)

    # write back to file...
    if info[0] == 0:  # validation
        file = open('loss_info/loss.txt', 'a')
        file.write(
            '\n[{}]Val set: Avg. loss: {:.4f}, Accuracy: {}/{} ({:.0f}%), Learning_rate: {:.7f}, Momentum: {:.4f}, Weight Decay: {:.4f}\n---'
            .format(info[1], np.mean(loss_list), np.sum(acc_list), batch_size,
                    100. * np.sum(acc_list) / batch_size, info[2], info[3],
                    info[4]))

    else:  # test
        file = open('loss_info/loss.txt', 'a')
        file.write(
            '\nTest set: Avg. loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)'.format(
                np.mean(loss_list), np.sum(acc_list), batch_size,
                100. * np.sum(acc_list) / batch_size))

    return np.mean(loss_list)
コード例 #15
0
def read_data_for_aspect(fname, pre_trained_vectors, embedding_size):

    data_csv = pd.read_csv(fname,
                           sep='\t',
                           header=None,
                           index_col=None,
                           names=['text', 'aspect_l1', 'aspect_l2', 'score'])

    data_csv = data_csv.sample(frac=1).reset_index(drop=True)  # shuffle data

    stop_words = [
        ',', '.', ':', ';', '?', '(', ')', '[', ']', '!', '@', '#', '%', '$',
        '*', '-', '/', '&', '``', "''"
    ]

    max_context_len = 0
    word2idx = {}  # Index 0 represents words we haven't met before
    context = []
    context_len = []
    aspect_class = []

    for index, row in data_csv.iterrows():
        word_list = nltk.word_tokenize(row.text.strip())

        context_words = [
            word for word in word_list
            if word not in stop_words and isinteger(word) is False
            and isfloat(word) is False and word != 'T'
        ]

        words_have_vector = [
            word for word in context_words if word in pre_trained_vectors
        ]

        # make sure most words can find their embedding vectors
        if len(words_have_vector) / float(len(context_words)) < 0.8:
            continue

        max_context_len = max(max_context_len, len(words_have_vector))

        idx = []
        for word in words_have_vector:
            if word not in word2idx:
                word2idx[word] = len(
                    word2idx
                ) + 1  # Index 0 represents absent words, so start from 1
            idx.append(word2idx[word])

        context.append(idx)
        context_len.append(len(words_have_vector))

        aspect_class.append(row.aspect_l1 + '/' + row.aspect_l2)

    # convert to numpy format
    context_npy = np.zeros(shape=[len(context), max_context_len])
    for i in range(len(context)):
        context_npy[i, :len(context[i])] = context[i]

    aspect_class_npy, onehot_mapping = onehot_encoding(aspect_class)

    train_data = list()
    train_data.append(context_npy)  # [data_size, max_context_len]
    train_data.append(np.array(context_len))  # [data_size,]
    train_data.append(aspect_class_npy)  # [data_size, aspect_class]

    word_embeddings = np.zeros([len(word2idx) + 1, embedding_size])
    for word in word2idx.keys():
        word_embeddings[word2idx[word]] = pre_trained_vectors[word]

    return train_data, word_embeddings, word2idx, max_context_len, onehot_mapping
コード例 #16
0
def main(opt, save_dir):
    os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(
        str(x) for x in opt.test['gpus'])

    # img_dir = opt.test['img_dir']
    ratio = opt.ratio
    img_dir = './data/{:s}/images'.format(opt.dataset)
    label_dir = './data/{:s}/labels_point'.format(opt.dataset)
    label_instance_dir = './data/{:s}/labels_instance'.format(opt.dataset)
    # save_dir = './data/{:s}/selected_masks'.format(opt.dataset)
    if not os.path.exists(save_dir):
        os.makedirs(save_dir, exist_ok=True)
    model_path = opt.test['model_path']

    # data transforms
    test_transform = get_transforms(opt.transform['test'])

    model = ResUNet34(pretrained=opt.model['pretrained'],
                      with_uncertainty=opt.with_uncertainty)
    model = model.cuda()
    cudnn.benchmark = True

    # ----- load trained model ----- #
    # print("=> loading trained model")
    checkpoint = torch.load(model_path)
    model.load_state_dict(checkpoint['state_dict'])
    # print("=> loaded model at epoch {}".format(checkpoint['epoch']))

    # switch to evaluate mode
    model.eval()
    apply_dropout(model)

    with open('./data/{:s}/train_val_test.json'.format(opt.dataset),
              'r') as file:
        data_list = json.load(file)
        train_list = data_list['train']

    for img_name in tqdm(train_list):
        # load test image
        # print('=> Processing image {:s}'.format(img_name))
        img_path = '{:s}/{:s}'.format(img_dir, img_name)
        img = Image.open(img_path)
        ori_h = img.size[1]
        ori_w = img.size[0]
        name = os.path.splitext(img_name)[0]
        label_point = misc.imread('{:s}/{:s}_label_point.png'.format(
            label_dir, name))

        input = test_transform((img, ))[0].unsqueeze(0)
        # print('\tComputing unertainty maps...')
        mean_sigma = np.zeros((2, ori_h, ori_w))
        mean_sigma_normalized = np.zeros((2, ori_h, ori_w))
        mean_prob = np.zeros((2, ori_h, ori_w))
        for _ in range(opt.T):
            output, log_var = get_probmaps(input, model, opt)
            output = output.astype(np.float64)
            log_var = log_var.astype(np.float64)
            sigma_map = np.exp(log_var / 2)
            sigma_map_normalized = sigma_map / (np.exp(output) + 1e-8)

            mean_prob += np.exp(output) / np.sum(np.exp(output), axis=0)
            mean_sigma += sigma_map
            mean_sigma_normalized += sigma_map_normalized

        mean_prob /= opt.T
        mean_sigma /= opt.T
        mean_sigma_normalized /= opt.T

        un_data_normalized = mean_sigma_normalized**2

        pred = np.argmax(mean_prob, axis=0)
        un_data_normalized = np.sum(un_data_normalized *
                                    utils.onehot_encoding(pred),
                                    axis=0)

        # find the area of largest uncertainty for visualization
        threshed = un_data_normalized > 1.0
        large_unc_area = morph.opening(threshed, selem=morph.disk(1))
        large_unc_area = morph.remove_small_objects(large_unc_area,
                                                    min_size=64)
        un_data_smoothed = gaussian_filter(un_data_normalized * large_unc_area,
                                           sigma=5)

        # cmap = plt.cm.jet
        # plt.imsave('{:s}/{:s}_uncertainty.png'.format(save_dir, name), cmap(un_data_normalized))

        points = measure.label(label_point)
        uncertainty_list = []
        radius = 10
        for k in range(1, np.max(points) + 1):
            x, y = np.argwhere(points == k)[0]
            r1 = x - radius if x - radius > 0 else 0
            r2 = x + radius if x + radius < ori_h else ori_h
            c1 = y - radius if y - radius > 0 else 0
            c2 = y + radius if y + radius < ori_w else ori_w
            uncertainty = np.mean(un_data_smoothed[r1:r2, c1:c2])
            uncertainty_list.append([k, uncertainty])

        uncertainty_list = np.array(uncertainty_list)
        sorted_list = uncertainty_list[uncertainty_list[:, 1].argsort()[::-1]]
        indices = sorted_list[:int(ratio * np.max(points)), 0]

        # annotation
        label_instance = misc.imread('{:s}/{:s}_label.png'.format(
            label_instance_dir, name))
        new_anno = np.zeros_like(label_instance)
        counter = 1
        for idx in indices:
            nuclei_idx = np.unique(label_instance[points == idx])[0]
            if nuclei_idx == 0:
                continue
            new_anno += (label_instance == nuclei_idx) * counter
            counter += 1
            # utils.show_figures((new_anno,))

        misc.imsave('{:s}/{:s}_label_partial_mask.png'.format(save_dir, name),
                    new_anno.astype(np.uint8))
        misc.imsave(
            '{:s}/{:s}_label_partial_mask_binary.png'.format(save_dir, name),
            (new_anno > 0).astype(np.uint8) * 255)

    print('=> Processed all images')
コード例 #17
0
                                 seed=cfg_fe.seed,
                                 runty=runty,
                                 path=save_path)
elif runty == 'eval':
    x_train, x_test = fe_cluster(x_train,
                                 x_test,
                                 genes_features,
                                 cells_features,
                                 n_cluster_g=cfg_fe.n_clusters_g,
                                 n_cluster_c=cfg_fe.n_clusters_c,
                                 seed=cfg_fe.seed,
                                 runty=runty,
                                 path=load_path)

# one-hot encoding
x_train = onehot_encoding(x_train)
x_test = onehot_encoding(x_test)

feature_cols = [
    c for c in x_train.columns
    if (str(c)[0:5] != 'kfold'
        and c not in ['sig_id', 'drug_id', 'cp_type', 'cp_time', 'cp_dose'])
]
target_cols = [x for x in y_train.columns if x != 'sig_id']

# label smoothing
if cfg_fe.regularization_ls:
    y_train = ls_manual(y_train, ls_rate=cfg_fe.ls_rate)

# merge drug_id and labels
x_train = x_train.merge(y_train, on='sig_id')
コード例 #18
0
ファイル: main.py プロジェクト: XStargate/MoA_prediction
def main():

    cfg_fe = Config_FeatureEngineer()
    seed_everything(seed_value=cfg_fe.seed)

    data_dir = '/kaggle/input/lish-moa/'
    save_path = './'
    load_path = '/kaggle/input/moatabnetmultimodekfold/'
    runty = 'eval'

    train = pd.read_csv(os.path.join(data_dir, 'train_features.csv'))
    targets_scored = pd.read_csv(
        os.path.join(data_dir, 'train_targets_scored.csv'))
    test = pd.read_csv(os.path.join(data_dir, 'test_features.csv'))
    train_drug = pd.read_csv(os.path.join(data_dir, 'train_drug.csv'))
    submission = pd.read_csv(os.path.join(data_dir, 'sample_submission.csv'))

    x_train = train.copy()
    x_test = test.copy()
    y_train = targets_scored.copy()

    genes_features = [column for column in x_train.columns if 'g-' in column]
    cells_features = [column for column in x_train.columns if 'c-' in column]

    # scale the data, like RankGauss
    x_train, x_test = scaling(x_train,
                              x_test,
                              scale=cfg_fe.scale,
                              n_quantiles=cfg_fe.scale_n_quantiles,
                              seed=cfg_fe.seed)

    # decompose data, like PCA
    if runty == 'traineval':
        x_train, x_test = decompo_process(x_train,
                                          x_test,
                                          decompo=cfg_fe.decompo,
                                          genes_variance=cfg_fe.genes_variance,
                                          cells_variance=cfg_fe.cells_variance,
                                          seed=cfg_fe.seed,
                                          pca_drop_orig=cfg_fe.pca_drop_orig,
                                          runty=runty,
                                          path=save_path)
    elif runty == 'eval':
        x_train, x_test = decompo_process(x_train,
                                          x_test,
                                          decompo=cfg_fe.decompo,
                                          genes_variance=cfg_fe.genes_variance,
                                          cells_variance=cfg_fe.cells_variance,
                                          seed=cfg_fe.seed,
                                          pca_drop_orig=cfg_fe.pca_drop_orig,
                                          runty=runty,
                                          path=load_path)

    # select feature, VarianceThreshold
    x_train, x_test = feature_selection(
        x_train,
        x_test,
        feature_select=cfg_fe.feature_select,
        variancethreshold_for_FS=cfg_fe.variancethreshold_for_FS)

    # fe_stats
    x_train, x_test = fe_stats(x_train, x_test, genes_features, cells_features)

    # group the drug using kmeans
    if runty == 'traineval':
        x_train, x_test = fe_cluster(x_train,
                                     x_test,
                                     genes_features,
                                     cells_features,
                                     n_cluster_g=cfg_fe.n_clusters_g,
                                     n_cluster_c=cfg_fe.n_clusters_c,
                                     seed=cfg_fe.seed,
                                     runty=runty,
                                     path=save_path)
    elif runty == 'eval':
        x_train, x_test = fe_cluster(x_train,
                                     x_test,
                                     genes_features,
                                     cells_features,
                                     n_cluster_g=cfg_fe.n_clusters_g,
                                     n_cluster_c=cfg_fe.n_clusters_c,
                                     seed=cfg_fe.seed,
                                     runty=runty,
                                     path=load_path)

    # one-hot encoding
    x_train = onehot_encoding(x_train)
    x_test = onehot_encoding(x_test)

    feature_cols = [
        c for c in x_train.columns
        if (str(c)[0:5] != 'kfold' and c not in
            ['sig_id', 'drug_id', 'cp_type', 'cp_time', 'cp_dose'])
    ]
    target_cols = [x for x in y_train.columns if x != 'sig_id']

    # label smoothing
    if cfg_fe.regularization_ls:
        y_train = ls_manual(y_train, ls_rate=cfg_fe.ls_rate)

    # merge drug_id and labels
    x_train = x_train.merge(y_train, on='sig_id')
    x_train = x_train.merge(train_drug, on='sig_id')

    # remove sig_id
    # x_train, x_test, y_train = remove_ctl(x_train, x_test, y_train)

    # make CVs
    target_cols = [x for x in targets_scored.columns if x != 'sig_id']
    x_train = make_cv_folds(x_train, cfg_fe.seeds, cfg_fe.nfolds,
                            cfg_fe.drug_thresh, target_cols)

    begin_time = datetime.datetime.now()

    if (runty == 'traineval'):
        test_preds_all = train_tabnet(x_train, y_train, x_test, submission,
                                      feature_cols, target_cols, cfg_fe.seeds,
                                      cfg_fe.nfolds, save_path)
        y_train = targets_scored[
            train['cp_type'] != 'ctl_vehicle'].reset_index(drop=True)
        test_pred_final = pred_tabnet(x_train,
                                      y_train,
                                      x_test,
                                      submission,
                                      feature_cols,
                                      target_cols,
                                      cfg_fe.seeds,
                                      cfg_fe.nfolds,
                                      load_path='./',
                                      stacking=False)
    elif (runty == 'eval'):
        y_train = targets_scored[
            train['cp_type'] != 'ctl_vehicle'].reset_index(drop=True)
        test_pred_final = pred_tabnet(x_train,
                                      y_train,
                                      x_test,
                                      submission,
                                      feature_cols,
                                      target_cols,
                                      cfg_fe.seeds,
                                      cfg_fe.nfolds,
                                      load_path,
                                      stacking=False)

    time_diff = datetime.datetime.now() - begin_time
    print(f'Total time is {time_diff}')

    # make submission
    all_feat = [col for col in submission.columns if col not in ["sig_id"]]
    # To obtain the same lenght of test_preds_all and submission
    # sig_id = test[test["cp_type"] != "ctl_vehicle"].sig_id.reset_index(drop=True)
    sig_id = test.sig_id
    tmp = pd.DataFrame(test_pred_final, columns=all_feat)
    tmp["sig_id"] = sig_id

    submission = pd.merge(test[["sig_id"]], tmp, on="sig_id", how="left")
    submission.fillna(0, inplace=True)
    submission[test["cp_type"] == "ctl_vehicle"] = 0.

    submission.to_csv("submission_tabbet.csv", index=None)