def calculate_pre_training(examples, labels):
    list_train_dataloader = []
    list_validation_dataloader = []
    human_number = 0
    for j in range(19):
        examples_personne_training = []
        labels_gesture_personne_training = []
        labels_human_personne_training = []

        examples_personne_valid = []
        labels_gesture_personne_valid = []
        labels_human_personne_valid = []

        for k in range(len(examples[j])):
            if k < 21:
                examples_personne_training.extend(examples[j][k])
                labels_gesture_personne_training.extend(labels[j][k])
                labels_human_personne_training.extend(
                    human_number * np.ones(len(labels[j][k])))
            else:
                examples_personne_valid.extend(examples[j][k])
                labels_gesture_personne_valid.extend(labels[j][k])
                labels_human_personne_valid.extend(human_number *
                                                   np.ones(len(labels[j][k])))

        print(np.shape(examples_personne_training))
        examples_personne_scrambled, labels_gesture_personne_scrambled, labels_human_personne_scrambled = scramble(
            examples_personne_training, labels_gesture_personne_training,
            labels_human_personne_training)

        examples_personne_scrambled_valid, labels_gesture_personne_scrambled_valid, labels_human_personne_scrambled_valid = scramble(
            examples_personne_valid, labels_gesture_personne_valid,
            labels_human_personne_valid)

        train = TensorDataset(
            torch.from_numpy(
                np.array(examples_personne_scrambled, dtype=np.float32)),
            torch.from_numpy(
                np.array(labels_gesture_personne_scrambled, dtype=np.int64)))
        validation = TensorDataset(
            torch.from_numpy(
                np.array(examples_personne_scrambled_valid, dtype=np.float32)),
            torch.from_numpy(
                np.array(labels_gesture_personne_scrambled_valid,
                         dtype=np.int64)))

        trainLoader = torch.utils.data.DataLoader(train,
                                                  batch_size=3315,
                                                  shuffle=True,
                                                  drop_last=True)
        validationLoader = torch.utils.data.DataLoader(validation,
                                                       batch_size=1312,
                                                       shuffle=True,
                                                       drop_last=True)

        list_train_dataloader.append(trainLoader)
        list_validation_dataloader.append(validationLoader)

        human_number += 1
        print("Shape training : ", np.shape(examples_personne_scrambled))
        print("Shape valid : ", np.shape(examples_personne_scrambled_valid))

    cnn = target_network_raw_emg_enhanced.SourceNetwork(
        number_of_class=7, dropout_rate=.35).cuda()

    criterion = nn.CrossEntropyLoss(size_average=False)
    optimizer = optim.Adam(cnn.parameters(), lr=0.002335721469090121)
    precision = 1e-8
    scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer=optimizer,
                                                     mode='min',
                                                     factor=.2,
                                                     patience=15,
                                                     verbose=True,
                                                     eps=precision)

    pre_train_model(cnn,
                    criterion=criterion,
                    optimizer=optimizer,
                    scheduler=scheduler,
                    dataloaders={
                        "train": list_train_dataloader,
                        "val": list_validation_dataloader
                    },
                    precision=precision)
def calculate_fitness(examples_training,
                      labels_training,
                      examples_test0,
                      labels_test0,
                      examples_test1,
                      labels_test_1,
                      learning_rate=.1,
                      training_cycle=4):
    accuracy_test0 = []
    accuracy_test1 = []
    for j in range(17):
        print("CURRENT DATASET : ", j)
        examples_personne_training = []
        labels_gesture_personne_training = []

        for k in range(len(examples_training[j])):
            if k < training_cycle * 7:
                examples_personne_training.extend(examples_training[j][k])
                labels_gesture_personne_training.extend(labels_training[j][k])

        X_test_0, Y_test_0 = [], []
        for k in range(len(examples_test0)):
            X_test_0.extend(examples_test0[j][k])
            Y_test_0.extend(labels_test0[j][k])

        X_test_1, Y_test_1 = [], []
        for k in range(len(examples_test1)):
            X_test_1.extend(examples_test1[j][k])
            Y_test_1.extend(labels_test_1[j][k])

        if training_cycle == 0:
            cnn = target_network_raw_emg_enhanced.SourceNetwork(
                number_of_class=7, dropout_rate=.35).cuda()
            cnn.eval()
            X_test_0, Y_test_0 = scramble(X_test_0, Y_test_0)

            test_0 = TensorDataset(
                torch.from_numpy(np.array(X_test_0, dtype=np.float32)),
                torch.from_numpy(np.array(Y_test_0, dtype=np.int64)))

            X_test_1, Y_test_1 = scramble(X_test_1, Y_test_1)

            test_1 = TensorDataset(
                torch.from_numpy(np.array(X_test_1, dtype=np.float32)),
                torch.from_numpy(np.array(Y_test_1, dtype=np.int64)))

            test_0_loader = torch.utils.data.DataLoader(test_0,
                                                        batch_size=256,
                                                        shuffle=False)
            total = 0
            correct_prediction_test_0 = 0
            for k, data_test_0 in enumerate(test_0_loader, 0):
                # get the inputs
                inputs_test_0, ground_truth_test_0 = data_test_0
                inputs_test_0, ground_truth_test_0 = Variable(
                    inputs_test_0.cuda()), Variable(ground_truth_test_0.cuda())

                outputs_test_0 = cnn(inputs_test_0)
                _, predicted = torch.max(outputs_test_0.data, 1)
                correct_prediction_test_0 += (predicted.cpu().numpy(
                ) == ground_truth_test_0.data.cpu().numpy()).sum()
                total += ground_truth_test_0.size(0)
            print("ACCURACY TEST_0 FINAL : %.3f %%" %
                  (100 * float(correct_prediction_test_0) / float(total)))
            accuracy_test0.append(100 * float(correct_prediction_test_0) /
                                  float(total))

            test_1_loader = torch.utils.data.DataLoader(test_1,
                                                        batch_size=256,
                                                        shuffle=False)
            total = 0
            correct_prediction_test_1 = 0
            for k, data_test_1 in enumerate(test_1_loader, 0):
                # get the inputs
                inputs_test_1, ground_truth_test_1 = data_test_1
                inputs_test_1, ground_truth_test_1 = Variable(
                    inputs_test_1.cuda()), Variable(ground_truth_test_1.cuda())

                outputs_test_1 = cnn(inputs_test_1)
                _, predicted = torch.max(outputs_test_1.data, 1)
                correct_prediction_test_1 += (predicted.cpu().numpy(
                ) == ground_truth_test_1.data.cpu().numpy()).sum()
                total += ground_truth_test_1.size(0)
            print("ACCURACY TEST_1 FINAL : %.3f %%" %
                  (100 * float(correct_prediction_test_1) / float(total)))
            accuracy_test1.append(100 * float(correct_prediction_test_1) /
                                  float(total))
        else:
            print(np.shape(examples_personne_training))
            examples_personne_scrambled, labels_gesture_personne_scrambled = scramble(
                examples_personne_training, labels_gesture_personne_training)
            valid_examples = examples_personne_scrambled[
                0:int(len(examples_personne_scrambled) * 0.1)]
            labels_valid = labels_gesture_personne_scrambled[
                0:int(len(labels_gesture_personne_scrambled) * 0.1)]

            X_fine_tune = examples_personne_scrambled[
                int(len(examples_personne_scrambled) * 0.1):]
            Y_fine_tune = labels_gesture_personne_scrambled[
                int(len(labels_gesture_personne_scrambled) * 0.1):]

            train = TensorDataset(
                torch.from_numpy(np.array(X_fine_tune, dtype=np.float32)),
                torch.from_numpy(np.array(Y_fine_tune, dtype=np.int64)))

            validation = TensorDataset(
                torch.from_numpy(np.array(valid_examples, dtype=np.float32)),
                torch.from_numpy(np.array(labels_valid, dtype=np.int64)))

            trainloader = torch.utils.data.DataLoader(train,
                                                      batch_size=256,
                                                      shuffle=True,
                                                      drop_last=True)
            validationloader = torch.utils.data.DataLoader(validation,
                                                           batch_size=128,
                                                           shuffle=True,
                                                           drop_last=True)

            pre_trained_weights = torch.load(
                'convnet_weights/best_pre_train_weights_target_raw.pt')
            cnn = target_network_raw_emg_enhanced.TargetNetwork(
                number_of_class=7,
                weights_pre_trained_convnet=pre_trained_weights,
                dropout=.5).cuda()

            criterion = nn.CrossEntropyLoss(size_average=False)
            optimizer = optim.Adam(cnn.parameters(), lr=learning_rate)

            precision = 1e-6
            scheduler = optim.lr_scheduler.ReduceLROnPlateau(
                optimizer=optimizer,
                mode='min',
                factor=.2,
                patience=5,
                verbose=True,
                eps=precision)

            cnn = train_model(cnn,
                              criterion,
                              optimizer,
                              scheduler,
                              dataloaders={
                                  "train": trainloader,
                                  "val": validationloader
                              },
                              precision=precision)

            cnn.eval()
            X_test_0, Y_test_0 = scramble(X_test_0, Y_test_0)

            test_0 = TensorDataset(
                torch.from_numpy(np.array(X_test_0, dtype=np.float32)),
                torch.from_numpy(np.array(Y_test_0, dtype=np.int64)))

            X_test_1, Y_test_1 = scramble(X_test_1, Y_test_1)

            test_1 = TensorDataset(
                torch.from_numpy(np.array(X_test_1, dtype=np.float32)),
                torch.from_numpy(np.array(Y_test_1, dtype=np.int64)))

            test_0_loader = torch.utils.data.DataLoader(test_0,
                                                        batch_size=256,
                                                        shuffle=False)
            total = 0
            correct_prediction_test_0 = 0
            for k, data_test_0 in enumerate(test_0_loader, 0):
                # get the inputs
                inputs_test_0, ground_truth_test_0 = data_test_0
                inputs_test_0, ground_truth_test_0 = Variable(
                    inputs_test_0.cuda()), Variable(ground_truth_test_0.cuda())

                outputs_test_0 = cnn(inputs_test_0)
                _, predicted = torch.max(outputs_test_0.data, 1)
                correct_prediction_test_0 += (predicted.cpu().numpy(
                ) == ground_truth_test_0.data.cpu().numpy()).sum()
                total += ground_truth_test_0.size(0)
            print("ACCURACY TEST_0 FINAL : %.3f %%" %
                  (100 * float(correct_prediction_test_0) / float(total)))
            accuracy_test0.append(100 * float(correct_prediction_test_0) /
                                  float(total))

            test_1_loader = torch.utils.data.DataLoader(test_1,
                                                        batch_size=256,
                                                        shuffle=False)
            total = 0
            correct_prediction_test_1 = 0
            for k, data_test_1 in enumerate(test_1_loader, 0):
                # get the inputs
                inputs_test_1, ground_truth_test_1 = data_test_1
                inputs_test_1, ground_truth_test_1 = Variable(
                    inputs_test_1.cuda()), Variable(ground_truth_test_1.cuda())

                outputs_test_1 = cnn(inputs_test_1)
                _, predicted = torch.max(outputs_test_1.data, 1)
                correct_prediction_test_1 += (predicted.cpu().numpy(
                ) == ground_truth_test_1.data.cpu().numpy()).sum()
                total += ground_truth_test_1.size(0)
            print("ACCURACY TEST_1 FINAL : %.3f %%" %
                  (100 * float(correct_prediction_test_1) / float(total)))
            accuracy_test1.append(100 * float(correct_prediction_test_1) /
                                  float(total))

    print("AVERAGE ACCURACY TEST 0 %.3f" % np.array(accuracy_test0).mean())
    print("AVERAGE ACCURACY TEST 1 %.3f" % np.array(accuracy_test1).mean())
    return accuracy_test0, accuracy_test1