Esempio n. 1
0
def main_cnnsmall(args, DEVICE):
    train_dataloader, valid_dataloader, test_dataloader, num_test = load_data_fmnist(
    )

    model = CNNSmall()
    if args.new_train:
        train_cnn(args, [train_dataloader, valid_dataloader, test_dataloader],
                  model, DEVICE)

    interpret_cnn(args, test_dataloader, num_test, DEVICE)
def compare_correlation():
    config = Config()
    config.use_correlation = 1
    config.gate = 'male'
    config.sum_dir = 'correlation_comparison'
    config.rotate = True
    config.noise = 0.1
    config.num_layers = 6
    config.num_layers_to_restore = 6
    config.num_layers_to_train = 0
    config.mode = 'supervised'
    train_cnn(config)
def compare_layers():
    config = Config()
    config.downsample_factor = 6
    config.num_layers = 0
    config.num_layers_to_train = 0
    config.mode = 'supervised'
    config.num_layers_to_restore = 0
    config.use_correlation = 0
    config.sum_dir = 'layer_comparison'
    config.use_sex_labels = False
    config.gate = 'male'
    train_cnn(config)

    config.downsample_factor = 4
    config.num_layers = 2
    config.num_layers_to_train = 2
    train_cnn(config)

    config.downsample_factor = 2
    config.num_layers = 4
    config.num_layers_to_train = 4
    train_cnn(config)

    config.downsample_factor = 0
    config.num_layers = 6
    config.num_layers_to_train = 6
    train_cnn(config)
def compare_data_augmentation_and_pretraining():
    config = Config()
    config.downsample_factor = 0
    config.num_layers = 6
    config.num_layers_to_train = 6
    config.mode = 'supervised'
    config.num_layers_to_restore = 0
    config.use_correlation = 0
    config.sum_dir = 'data_augmentation_comparison'
    config.use_sex_labels = False
    config.gate = 'male'

    config.num_layers_to_restore = 6
    config.num_layers_to_train = 0
    config.rotate = True
    config.noise = 0.1
    train_cnn(config)

    config.num_layers_to_train = 6
    config.rotate = True
    config.noise = 0.1
    train_cnn(config)

    config.num_layers_to_restore = 0
    config.num_layers_to_train = 6
    config.rotate = True
    config.noise = 0.1
    train_cnn(config)

    config.rotate = False
    config.noise = 0
    train_cnn(config)

    config.rotate = False
    config.noise = 1
    train_cnn(config)
def compare_gating():
    config = Config()
    config.downsample_factor = 6
    config.num_layers = 0
    config.num_layers_to_train = 0
    config.mode = 'supervised'
    config.num_layers_to_restore = 0
    config.use_correlation = 0
    config.sum_dir = 'gating_comparison'

    config.use_sex_labels = False
    config.gate = 'male'
    train_cnn(config)

    config.use_sex_labels = True
    config.gate = 'equal_gender'
    train_cnn(config)

    config.use_sex_labels = False
    config.gate = 'shuffle'
    train_cnn(config)
Esempio n. 6
0
)
print("\n Step 1: Start training {} as TA net with {} params >>>".format(
    args.assistant, num_parameters_assistant))

# training
if not os.path.isfile(assistant_model_path):
    print("\n Start training the {} as TA >>>".format(args.assistant))

    net_assistant = train_cnn(net_assistant,
                              args.assistant,
                              train_loader,
                              test_loader,
                              epochs=args.epochs,
                              resume_epoch=args.resume_epoch_1,
                              save_freq=save_freq,
                              lr_base=args.lr_base1,
                              lr_decay_factor=args.lr_decay_factor,
                              lr_decay_epochs=lr_decay_epochs,
                              weight_decay=args.weight_decay,
                              seed=args.seed,
                              path_to_ckpt=save_intrain_folder,
                              net_teacher=net_teacher,
                              lambda_kd=args.assistant_lambda_kd,
                              T_kd=args.assistant_T_kd)

    # store model
    torch.save({
        'model': net_assistant.state_dict(),
    }, assistant_model_path)
    print("\n End training CNN.")
else:
    print("\n Loading pre-trained {}.".format(args.assistant))
Esempio n. 7
0
def demo_run():
    # env = gym.make('REALComp-v0')
    # controller = Controller(env.action_space)
    controller = PPOAgent(
        action_space=envs.action_space,
        size_obs=(13 + 3 * 3) * config.
        observations_to_stack,  #13 : joints + sensors; 3*3 : 3 coordinates per object, 3 objects
        shape_pic=None,  #(72, 144, 3),  # As received from the wrapper
        size_layers=[256, 64],
        size_cnn_output=2,
        actor_lr=1e-4,
        critic_lr=1e-3,
        value_loss_coeff=1.,
        gamma=0.95,
        gae_lambda=0.95,
        epochs=10,
        horizon=32,
        mini_batch_size=8,
        frames_per_action=config.frames_per_action,
        init_wait=config.noop_steps,
        clip=0.2,
        entropy_coeff=0.01,
        log_std=0.,
        use_parallel=True,
        num_parallel=config.num_envs,
        logs=True,
    )

    ###################################
    # Pre-training of the CNN

    if config.pre_train_cnn:
        crop = True
        if crop:
            shape_pic = (72, 144, 3)
        else:
            shape_pic = (240, 320, 3)

        model_cnn = CNN(shape_pic=shape_pic, size_output=2)
        model_optimizer = optim.Adam(model_cnn.parameters())
        env = gym.make("REALComp-v0")

        losses = train_cnn(env,
                           model_cnn,
                           model_optimizer,
                           updates=400,
                           shape_pic=shape_pic,
                           crop=crop,
                           tensorboard=config.tensorboard)

        # Quick display of the model performances
        test_cnn(env,
                 model_cnn,
                 model_optimizer,
                 10,
                 shape_pic=shape_pic,
                 crop=crop)

        # Assign the trained model to the Agent

        controller.cnn = model_cnn.to(config.device)
        controller.optimizer = optim.Adam(
            params=list(controller.actor.parameters()) +
            list(controller.critic.parameters()))

    ###################################

    # render simulation on screen
    if config.render:
        envs.render('human')

    # reset simulation
    observation = envs.reset(config.random_reset)
    reward = np.zeros(config.num_envs)
    done = np.zeros(config.num_envs)

    # intrinsic phase
    acc_reward = np.zeros_like(reward, dtype=np.float64)
    time_since_last_touch = 0
    touches = 0
    new_episode = True

    if config.model_to_load:
        controller.load_models(config.model_to_load)
    else:
        print("Starting intrinsic phase...")
        for frame in tqdm.tqdm(
                range(config.intrinsic_frames // config.num_envs)):
            # time.sleep(0.05)
            # if config.save_every and frame and frame % config.save_every == 0:
            #     controller.save_models("models.pth")

            # Used to reset the normalization. All the envs. terminate at the same time, so we can do this
            if any(done):
                envs.ret = done

            if new_episode:
                new_episode = False
                # Add things : change current goal, etc
                pass

            action = controller.step(observation, acc_reward, done, test=False)

            observation, reward, done, _ = envs.step(action.cpu())
            reward, had_contact, acc_reward = update_reward(
                envs,
                frame,
                reward,
                acc_reward,
                target_object=target,
                punished_objects=punished_objects,
                action=action.cpu().numpy())

            time_since_last_touch += 1

            config.tensorboard.add_scalar('Rewards/frame_rewards',
                                          reward.mean(), frame)

            if had_contact.max():
                config.tensorboard.add_scalar(
                    'intrinsic/time_since_last_touch', time_since_last_touch,
                    touches)
                touches += 1
                time_since_last_touch = 0

            if config.pre_train_cnn:
                picture = observation[:, 13:]
                picture = picture.reshape(
                    (controller.num_parallel, controller.shape_pic[0],
                     controller.shape_pic[1], controller.shape_pic[2]))
                picture = torch.FloatTensor(picture)
                picture = picture.permute(0, 3, 1, 2)
                cnn_output = controller.cnn(picture.to(config.device))
                cnn_output = cnn_output.detach()
                loss = loss_function(
                    cnn_output.to(torch.device("cpu")),
                    torch.FloatTensor(envs.get_obj_pos(target)[:, 0:2]).to(
                        torch.device("cpu"))).to(torch.device("cpu")).mean()
                config.tensorboard.add_scalar('train/Fixed_CNN_loss', loss,
                                              frame)

            if (frame > config.noop_steps) and (
                (frame + 1 - config.noop_steps) % config.frames_per_action
                    == 0):  # True on the last frame of an action

                config.tensorboard.add_scalar('intrinsic/actions_magnitude',
                                              abs(action).mean(),
                                              frame / config.frames_per_action)
                config.tensorboard.add_scalar('Rewards/action_rewards',
                                              acc_reward.mean(),
                                              frame / config.frames_per_action)

                if config.reset_on_touch and any(had_contact):
                    done = np.ones(config.num_envs)
                    observation = envs.reset(config.random_reset)
                    new_episode = True

            if (frame > config.noop_steps) and (
                (frame - config.noop_steps) %
                (config.frames_per_action * config.actions_per_episode) == 0):
                done = np.ones(config.num_envs)
                observation = envs.reset(config.random_reset)
                new_episode = True

    # controller.save_models("models.pth")

    config.tensorboard.close()
    print("Starting extrinsic phase...")
    if config.enjoy:
        input("Press enter to test the agent and visualize its actions !")
        showoff(controller, target=target, punished_objects=punished_objects)

    # extrinsic phase
    env = gym.make('REALComp-v0')
    # if config.render:
    env.render('human')

    for k in range(config.extrinsic_trials):

        # reset simulation
        observation = env.reset()
        reward = 0
        done = False

        # set the extrinsic goal to pursue
        env.set_goal()
        print("Starting extrinsic trial...")

        while not done:
            # Call your controller to chose action
            # action = controller.step(observation, reward, done)

            ##### Modif : action taken WITHOUT "Exploration Noise"
            action = controller.step_opt(observation, reward, done)

            # do action
            observation, reward, done, _ = env.step(action)
    data = datasets.MNIST

batch_size = 512
train_set = data('./data/' + dataset_name,
                 train=True,
                 download=True,
                 split='byclass',
                 transform=transforms.ToTensor())
augmented = perform_augmentation(train_set, cvae_aug, cvae_model, num_classes,
                                 augmentation_per_class, use_cuda)
test_set = data('./data/' + dataset_name,
                train=False,
                download=True,
                split='byclass',
                transform=transforms.ToTensor())
print("augmentation completed")

cnn_model = CNN(class_size=62)
if use_cuda:
    cnn_model = cnn_model.cuda()
cnn_optimizer = optim.SGD(cnn_model.parameters(), lr=5e-2, momentum=0.5)
#cnn_optimizer = optim.Adagrad(cnn_model.parameters(), lr=5e-3, lr_decay=1e-3)
cnn_save_path = './saved_model/cnn_augmented_EMNIST.pt'
print("start training")
train_cnn(cnn_model,
          cnn_optimizer,
          augmented,
          test_set,
          save_path=cnn_save_path,
          num_epochs=30)
print("done")
Esempio n. 9
0
import numpy as np
import tensorflow as tf
import datetime
import data_helpers
import train_cnn
from sent_cnn import SentCNN
import config

x_u_i, x_r_i, y, max_len, U = train_cnn.load_data(config.config)

train_cnn.train_cnn(x_u_i, x_r_i, y, max_len, U, config.config, debug=False)
Esempio n. 10
0
    # initialize cnn
    dre_precnn_net = cnn_extract_initialization(args.dre_precnn_net,
                                                num_classes=args.num_classes)
    num_parameters = count_parameters(dre_precnn_net)
    # training
    if not os.path.isfile(filename_precnn_ckpt):
        print("\n Start training CNN for feature extraction in the DRE >>>")
        dre_precnn_net = train_cnn(
            dre_precnn_net,
            'PreCNNForDRE_{}'.format(args.dre_precnn_net),
            trainloader_dre_precnn,
            testloader_precnn,
            epochs=args.dre_precnn_epochs,
            resume_epoch=args.dre_precnn_resume_epoch,
            lr_base=args.dre_precnn_lr_base,
            lr_decay_factor=args.dre_precnn_lr_decay_factor,
            lr_decay_epochs=dre_precnn_lr_decay_epochs,
            weight_decay=args.dre_precnn_weight_decay,
            extract_feature=True,
            net_decoder=None,
            lambda_reconst=args.dre_precnn_lambda,
            train_means=train_means,
            train_stds=train_stds,
            path_to_ckpt=path_to_ckpt_in_train)

        # store model
        torch.save({
            'net_state_dict': dre_precnn_net.state_dict(),
        }, filename_precnn_ckpt)
        print("\n End training CNN.")
    else:
Esempio n. 11
0
def main():
    parameters = {

        # select influenza subtype
        'subtype': subtype,

        # select the way for feature generation
        'feature_type': feature_type,

        # 'rf', lr', 'knn', 'svm', 'cnn'
        #'model': model,

        # Number of hidden units in the encoder
        'hidden_size': 128,

        # Droprate (applied at input)
        'dropout_p': 0.5,

        # Note, no learning rate decay implemented
        'learning_rate': 0.001,

        # Size of mini batch
        'batch_size': 32,

        # Number of training iterations
        'num_of_epochs': 100
    }

    if parameters['subtype'] == 'H1N1':
        #read antigenic data and sequence data
        H1N1_Antigenic_dist = pd.read_csv('antigenic/H1N1_antigenic.csv')
        H1N1_seq = pd.read_csv('sequence/H1N1/H1N1_sequence_HA1.csv',
                               names=['seq', 'description'])

        if model_mode == 'Tradition model':
            if feature_type == 'Min-Shi Lee':
                print('\n')
                H1N1_num_mut_list = distance_mutation(H1N1_Antigenic_dist,
                                                      H1N1_seq)
                H1N1_Antigenic_dist_list = list(
                    H1N1_Antigenic_dist['Distance'])
                conf_matrix = get_confusion_matrix(H1N1_num_mut_list,
                                                   H1N1_Antigenic_dist_list,
                                                   'H1N1')
                H1N1_acc = get_accuracy(conf_matrix)
                H1N1_pre = get_precision(conf_matrix)
                H1N1_rec = get_recall(conf_matrix)
                H1N1_f1 = get_f1score(conf_matrix)
                H1N1_mcc = get_mcc(conf_matrix)
                print('Min-Shi Lee method on H1N1:')
                print(
                    'V_acc %.3f\tV_pre %.3f\tV_rec %.3f\tV_fscore %.3f\tV_mcc %.3f'
                    % (H1N1_acc, H1N1_pre, H1N1_rec, H1N1_f1, H1N1_mcc))

            elif feature_type == 'Yu-Chieh Liao':
                print('\n')
                H1N1_data = pd.read_csv('training/Yu-Chieh Liao/H1N1.csv')
                H1N1_data = H1N1_data.iloc[:, 1:329]
                H1N1_feature = H1N1_data.iloc[:, H1N1_data.columns != 'label']
                H1N1_label = H1N1_data['label']
                print('Yu-Chieh Liao method on H1N1 using svm:')
                #train_x, test_x, train_y, test_y = train_test_split_data(H1N1_feature, H1N1_label, 0.2)
                svm_cross_validation(H1N1_feature, H1N1_label)

            elif feature_type == 'William Lees':
                print('\n')
                H1N1_data = pd.read_csv(
                    'training/William Lees/H1N1_new_epitope_data.csv')
                H1N1_data = H1N1_data.iloc[:, 1:7]
                H1N1_feature = H1N1_data.iloc[:, H1N1_data.columns != 'label']
                H1N1_label = H1N1_data['label']
                print('William Lees method on H1N1 using svm:')
                logistic_cross_validation(H1N1_feature, H1N1_label)

            elif feature_type == 'Peng Yousong':
                print('\n')
                H1N1_data = pd.read_csv(
                    'training/Peng Yousong/H1N1_regional_data.csv')
                H1N1_data = H1N1_data.iloc[:, 1:12]
                H1N1_feature = H1N1_data.iloc[:, H1N1_data.columns != 'label']
                H1N1_label = H1N1_data['label']
                print('Peng Yousong method on H1N1 using naive bayes:')
                bayes_cross_validation(H1N1_feature, H1N1_label)

            elif feature_type == 'Yuhua Yao':
                print('\n')
                H1N1_data = pd.read_csv('training/Yuhua Yao/H1N1.csv')
                H1N1_data = H1N1_data.iloc[:, 1:329]
                H1N1_feature = H1N1_data.iloc[:, H1N1_data.columns != 'label']
                H1N1_label = H1N1_data['label']
                print('Yuhua Yao method on H1N1 using random forest:')
                randomforest_cross_validation(H1N1_feature, H1N1_label)

        elif model_mode == 'Deep model':
            setup_seed(10)

            #feature generation
            feature, label = cnn_training_data(H1N1_Antigenic_dist, H1N1_seq)
            train_x, test_x, train_y, test_y = train_test_split_data(
                feature, label, 0.2)

            if baseline == 'rf_baseline':
                print('rf_baseline + ProVect on H1N1:')
                rf_baseline(reshape_to_linear(train_x), train_y,
                            reshape_to_linear(test_x), test_y)
            elif baseline == 'lr_baseline':
                print('lr_baseline + ProVect on H1N1:')
                lr_baseline(reshape_to_linear(train_x), train_y,
                            reshape_to_linear(test_x), test_y)
            elif baseline == 'svm_baseline':
                print('svm_baseline + ProVect on H1N1:')
                svm_baseline(reshape_to_linear(train_x), train_y,
                             reshape_to_linear(test_x), test_y)
            elif baseline == 'knn_baseline':
                print('knn_baseline + ProVect on H1N1:')
                knn_baseline(reshape_to_linear(train_x), train_y,
                             reshape_to_linear(test_x), test_y)
            elif baseline == 'nn_baseline':
                print('nn_baseline + ProVect on H1N1:')
                nn_baseline(reshape_to_linear(train_x), train_y,
                            reshape_to_linear(test_x), test_y)
            elif baseline == 'cnn':
                print('CNN + ProVect on H1N1:')
                train_x = np.reshape(
                    train_x,
                    (np.array(train_x).shape[0], 1, np.array(train_x).shape[1],
                     np.array(train_x).shape[2]))
                test_x = np.reshape(
                    test_x,
                    (np.array(test_x).shape[0], 1, np.array(test_x).shape[1],
                     np.array(test_x).shape[2]))
                print(np.array(train_x).shape)
                print(np.array(test_x).shape)

                train_x = torch.tensor(train_x, dtype=torch.float32)
                train_y = torch.tensor(train_y, dtype=torch.int64)
                test_x = torch.tensor(test_x, dtype=torch.float32)
                test_y = torch.tensor(test_y, dtype=torch.int64)
                net = SENet18()
                #                if torch.cuda.is_available():
                #                    print('running with GPU')
                #                    net.cuda()

                train_cnn(net, parameters['num_of_epochs'],
                          parameters['learning_rate'],
                          parameters['batch_size'], train_x, train_y, test_x,
                          test_y)
            elif baseline == 'iva-cnn':
                print('iva-cnn + ProVect on H1N1:')
                train_x = np.reshape(
                    train_x,
                    (np.array(train_x).shape[0], 1, np.array(train_x).shape[1],
                     np.array(train_x).shape[2]))
                test_x = np.reshape(
                    test_x,
                    (np.array(test_x).shape[0], 1, np.array(test_x).shape[1],
                     np.array(test_x).shape[2]))
                print(np.array(train_x).shape)
                print(np.array(test_x).shape)

                train_x = torch.tensor(train_x, dtype=torch.float32).cuda()
                train_y = torch.tensor(train_y, dtype=torch.int64).cuda()
                test_x = torch.tensor(test_x, dtype=torch.float32).cuda()
                test_y = torch.tensor(test_y, dtype=torch.int64).cuda()

                net = SENet18b()
                net.cuda()
                train_cnn(net, parameters['num_of_epochs'],
                          parameters['learning_rate'],
                          parameters['batch_size'], train_x, train_y, test_x,
                          test_y)

    elif parameters['subtype'] == 'H3N2':
        H3N2_Antigenic_dist = pd.read_csv('antigenic/H3N2_antigenic.csv')
        H3N2_seq = pd.read_csv('sequence/H3N2/H3N2_sequence_HA1.csv',
                               names=['seq', 'description'])
        if model_mode == 'Tradition model':
            if feature_type == 'Min-Shi Lee':
                print('\n')
                H3N2_num_mut_list = distance_mutation(H3N2_Antigenic_dist,
                                                      H3N2_seq)
                H3N2_Antigenic_dist_list = list(
                    H3N2_Antigenic_dist['Distance'])
                conf_matrix = get_confusion_matrix(H3N2_num_mut_list,
                                                   H3N2_Antigenic_dist_list,
                                                   'H3N2')
                H3N2_acc = get_accuracy(conf_matrix)
                H3N2_pre = get_precision(conf_matrix)
                H3N2_rec = get_recall(conf_matrix)
                H3N2_f1 = get_f1score(conf_matrix)
                H3N2_mcc = get_mcc(conf_matrix)
                print('Min-Shi Lee method on H3N2:')
                print(
                    'V_acc %.3f\tV_pre %.3f\tV_rec %.3f\tV_fscore %.3f\tV_mcc %.3f'
                    % (H3N2_acc, H3N2_pre, H3N2_rec, H3N2_f1, H3N2_mcc))

            elif feature_type == 'Yu-Chieh Liao':
                print('\n')
                H3N2_data = pd.read_csv('training/Yu-Chieh Liao/H3N2.csv')
                H3N2_data = H3N2_data.iloc[:, 1:331]
                H3N2_feature = H3N2_data.iloc[:, H3N2_data.columns != 'label']
                H3N2_label = H3N2_data['label']
                print('Yu-Chieh Liao method on H3N2 using svm:')
                #train_x, test_x, train_y, test_y = train_test_split_data(H3N2_feature, H3N2_label, 0.2)
                svm_cross_validation(H3N2_feature, H3N2_label)

            elif feature_type == 'William Lees':
                print('\n')
                H3N2_data = pd.read_csv(
                    'training/William Lees/H3N2_new_epitope_data.csv')
                H3N2_data = H3N2_data.iloc[:, 1:7]
                H3N2_feature = H3N2_data.iloc[:, H3N2_data.columns != 'label']
                H3N2_label = H3N2_data['label']
                print('William Lees method on H3N2 using svm:')
                logistic_cross_validation(H3N2_feature, H3N2_label)

            elif feature_type == 'Peng Yousong':
                print('\n')
                H3N2_data = pd.read_csv(
                    'training/Peng Yousong/H3N2_regional_data.csv')
                H3N2_data = H3N2_data.iloc[:, 1:12]
                H3N2_feature = H3N2_data.iloc[:, H3N2_data.columns != 'label']
                H3N2_label = H3N2_data['label']
                print('Peng Yousong method on H3N2 using naive bayes:')
                bayes_cross_validation(H3N2_feature, H3N2_label)

            elif feature_type == 'Yuhua Yao':
                print('\n')
                H3N2_data = pd.read_csv('training/Yuhua Yao/H3N2.csv')
                H3N2_data = H3N2_data.iloc[:, 1:331]
                H3N2_feature = H3N2_data.iloc[:, H3N2_data.columns != 'label']
                H3N2_label = H3N2_data['label']
                print('Yuhua Yao method on H3N2 using random forest:')
                randomforest_cross_validation(H3N2_feature, H3N2_label)

        elif model_mode == 'Deep model':
            setup_seed(20)

            feature, label = cnn_training_data(H3N2_Antigenic_dist, H3N2_seq)
            train_x, test_x, train_y, test_y = train_test_split_data(
                feature, label, 0.2)

            if baseline == 'rf_baseline':
                print('rf_baseline + ProVect on H3N2:')
                rf_baseline(reshape_to_linear(train_x), train_y,
                            reshape_to_linear(test_x), test_y)
            elif baseline == 'lr_baseline':
                print('lr_baseline + ProVect on H3N2:')
                lr_baseline(reshape_to_linear(train_x), train_y,
                            reshape_to_linear(test_x), test_y)
            elif baseline == 'svm_baseline':
                print('svm_baseline + ProVect on H3N2:')
                svm_baseline(reshape_to_linear(train_x), train_y,
                             reshape_to_linear(test_x), test_y)
            elif baseline == 'knn_baseline':
                print('knn_baseline + ProVect on H3N2:')
                knn_baseline(reshape_to_linear(train_x), train_y,
                             reshape_to_linear(test_x), test_y)
            elif baseline == 'nn_baseline':
                print('nn_baseline + ProVect on H3N2:')
                nn_baseline(reshape_to_linear(train_x), train_y,
                            reshape_to_linear(test_x), test_y)
            elif baseline == 'cnn':
                print('CNN + ProVect on H3N2:')
                train_x = np.reshape(
                    train_x,
                    (np.array(train_x).shape[0], 1, np.array(train_x).shape[1],
                     np.array(train_x).shape[2]))
                test_x = np.reshape(
                    test_x,
                    (np.array(test_x).shape[0], 1, np.array(test_x).shape[1],
                     np.array(test_x).shape[2]))
                print(np.array(train_x).shape)
                print(np.array(test_x).shape)

                train_x = torch.tensor(train_x, dtype=torch.float32)
                train_y = torch.tensor(train_y, dtype=torch.int64)
                test_x = torch.tensor(test_x, dtype=torch.float32)
                test_y = torch.tensor(test_y, dtype=torch.int64)
                net = SENet18b()
                #                if torch.cuda.is_available():
                #                    print('running with GPU')
                #                    net.cuda()

                train_cnn(net, parameters['num_of_epochs'],
                          parameters['learning_rate'],
                          parameters['batch_size'], train_x, train_y, test_x,
                          test_y)
            elif baseline == 'iva-cnn':
                print('iva-cnn + ProVect on H3N2:')
                train_x = np.reshape(
                    train_x,
                    (np.array(train_x).shape[0], 1, np.array(train_x).shape[1],
                     np.array(train_x).shape[2]))
                test_x = np.reshape(
                    test_x,
                    (np.array(test_x).shape[0], 1, np.array(test_x).shape[1],
                     np.array(test_x).shape[2]))
                print(np.array(train_x).shape)
                print(np.array(test_x).shape)

                train_x = torch.tensor(train_x, dtype=torch.float32).cuda()
                train_y = torch.tensor(train_y, dtype=torch.int64).cuda()
                test_x = torch.tensor(test_x, dtype=torch.float32).cuda()
                test_y = torch.tensor(test_y, dtype=torch.int64).cuda()

                net = SENet18b()
                net.cuda()
                train_cnn(net, parameters['num_of_epochs'],
                          parameters['learning_rate'],
                          parameters['batch_size'], train_x, train_y, test_x,
                          test_y)

    elif parameters['subtype'] == 'H5N1':
        H5N1_Antigenic_dist = pd.read_csv('antigenic/H5N1_antigenic.csv')
        H5N1_seq = pd.read_csv('sequence/H5N1/H5N1_sequence_HA1.csv',
                               names=['seq', 'description'])
        if model_mode == 'Tradition model':
            if feature_type == 'Min-Shi Lee':
                print('\n')
                H5N1_num_mut_list = distance_mutation(H5N1_Antigenic_dist,
                                                      H5N1_seq)
                H5N1_Antigenic_dist_list = list(
                    H5N1_Antigenic_dist['Distance'])
                conf_matrix = get_confusion_matrix(H5N1_num_mut_list,
                                                   H5N1_Antigenic_dist_list,
                                                   'H5N1')
                H5N1_acc = get_accuracy(conf_matrix)
                H5N1_pre = get_precision(conf_matrix)
                H5N1_rec = get_recall(conf_matrix)
                H5N1_f1 = get_f1score(conf_matrix)
                H5N1_mcc = get_mcc(conf_matrix)
                print('Min-Shi Lee method on H5N1:')
                print(
                    'V_acc %.3f\tV_pre %.3f\tV_rec %.3f\tV_fscore %.3f\tV_mcc %.3f'
                    % (H5N1_acc, H5N1_pre, H5N1_rec, H5N1_f1, H5N1_mcc))

            elif feature_type == 'Yu-Chieh Liao':
                print('\n')
                H5N1_data = pd.read_csv('training/Yu-Chieh Liao/H5N1.csv')
                H5N1_data = H5N1_data.iloc[:, 1:322]
                H5N1_feature = H5N1_data.iloc[:, H5N1_data.columns != 'label']
                H5N1_label = H5N1_data['label']
                print('Yu-Chieh Liao method on H5N1 using svm:')
                #train_x, test_x, train_y, test_y = train_test_split_data(H5N1_feature, H5N1_label, 0.2)
                svm_cross_validation(H5N1_feature, H5N1_label)

            elif feature_type == 'William Lees':
                print('\n')
                H5N1_data = pd.read_csv(
                    'training/William Lees/H5N1_new_epitope_data.csv')
                H5N1_data = H5N1_data.iloc[:, 1:7]
                H5N1_feature = H5N1_data.iloc[:, H5N1_data.columns != 'label']
                H5N1_label = H5N1_data['label']
                print('William Lees method on H5N1 using svm:')
                logistic_cross_validation(H5N1_feature, H5N1_label)

            elif feature_type == 'Peng Yousong':
                print('\n')
                H5N1_data = pd.read_csv(
                    'training/Peng Yousong/H3N2_regional_data.csv')
                H5N1_data = H5N1_data.iloc[:, 1:12]
                H5N1_feature = H5N1_data.iloc[:, H5N1_data.columns != 'label']
                H5N1_label = H5N1_data['label']
                print('Peng Yousong method on H5N1 using naive bayes:')
                bayes_cross_validation(H5N1_feature, H5N1_label)

            elif feature_type == 'Yuhua Yao':
                print('\n')
                H5N1_data = pd.read_csv('training/Yuhua Yao/H5N1.csv')
                H5N1_data = H5N1_data.iloc[:, 1:322]
                H5N1_feature = H5N1_data.iloc[:, H5N1_data.columns != 'label']
                H5N1_label = H5N1_data['label']
                print('Yuhua Yao method on H5N1 using random forest:')
                randomforest_cross_validation(H5N1_feature, H5N1_label)

        elif model_mode == 'Deep model':
            setup_seed(20)

            feature, label = cnn_training_data(H5N1_Antigenic_dist, H5N1_seq)
            train_x, test_x, train_y, test_y = train_test_split_data(
                feature, label, 0.2)

            if baseline == 'rf_baseline':
                print('rf_baseline + ProVect on H5N1:')
                rf_baseline(reshape_to_linear(train_x), train_y,
                            reshape_to_linear(test_x), test_y)
            elif baseline == 'lr_baseline':
                print('lr_baseline + ProVect on H5N1:')
                lr_baseline(reshape_to_linear(train_x), train_y,
                            reshape_to_linear(test_x), test_y)
            elif baseline == 'svm_baseline':
                print('svm_baseline + ProVect on H5N1:')
                svm_baseline(reshape_to_linear(train_x), train_y,
                             reshape_to_linear(test_x), test_y)
            elif baseline == 'knn_baseline':
                print('knn_baseline + ProVect on H5N1:')
                knn_baseline(reshape_to_linear(train_x), train_y,
                             reshape_to_linear(test_x), test_y)
            elif baseline == 'nn_baseline':
                print('nn_baseline + ProVect on H5N1:')
                nn_baseline(reshape_to_linear(train_x), train_y,
                            reshape_to_linear(test_x), test_y)
            elif baseline == 'cnn':
                print('CNN + ProVect on H5N1:')
                train_x = np.reshape(
                    train_x,
                    (np.array(train_x).shape[0], 1, np.array(train_x).shape[1],
                     np.array(train_x).shape[2]))
                test_x = np.reshape(
                    test_x,
                    (np.array(test_x).shape[0], 1, np.array(test_x).shape[1],
                     np.array(test_x).shape[2]))
                print(np.array(train_x).shape)
                print(np.array(test_x).shape)

                train_x = torch.tensor(train_x, dtype=torch.float32)
                train_y = torch.tensor(train_y, dtype=torch.int64)
                test_x = torch.tensor(test_x, dtype=torch.float32)
                test_y = torch.tensor(test_y, dtype=torch.int64)
                net = SENet18()
                #                if torch.cuda.is_available():
                #                    print('running with GPU')
                #                    net.cuda()

                train_cnn(net, parameters['num_of_epochs'],
                          parameters['learning_rate'],
                          parameters['batch_size'], train_x, train_y, test_x,
                          test_y)
            elif baseline == 'iva-cnn':
                print('iva-cnn + ProVect on H5N1:')
                train_x = np.reshape(
                    train_x,
                    (np.array(train_x).shape[0], 1, np.array(train_x).shape[1],
                     np.array(train_x).shape[2]))
                test_x = np.reshape(
                    test_x,
                    (np.array(test_x).shape[0], 1, np.array(test_x).shape[1],
                     np.array(test_x).shape[2]))
                print(np.array(train_x).shape)
                print(np.array(test_x).shape)

                train_x = torch.tensor(train_x, dtype=torch.float32).cuda()
                train_y = torch.tensor(train_y, dtype=torch.int64).cuda()
                test_x = torch.tensor(test_x, dtype=torch.float32).cuda()
                test_y = torch.tensor(test_y, dtype=torch.int64).cuda()

                net = SENet18b()
                net.cuda()
                train_cnn(net, parameters['num_of_epochs'],
                          parameters['learning_rate'],
                          parameters['batch_size'], train_x, train_y, test_x,
                          test_y)