def get_average_activation_for_training_examples(
        all_examples, all_labels, highest_activation_participants_gestures):
    from LongTermClassificationMain.PrepareAndLoadDataLongTerm.load_dataset_in_dataloader import \
        load_dataloaders_training_sessions
    participants_train, _, _ = load_dataloaders_training_sessions(
        all_examples,
        all_labels,
        batch_size=512,
        number_of_cycle_for_first_training=3,
        get_validation_set=False,
        number_of_cycles_rest_of_training=3,
        ignore_first=True,
        drop_last=True)
    activations_participant = []
    for participant_index, (dataset_participant,
                            highest_activation_participant) in enumerate(
                                zip(participants_train,
                                    highest_activation_participants_gestures)):
        activation_evaluation_session = []
        for session_index, dataloader_session in enumerate(
                dataset_participant):
            highest_activation_participant_for_this_training_session = highest_activation_participant[
                session_index]
            for inputs, labels in dataloader_session:
                activations_average = torch.mean(torch.abs(inputs),
                                                 dim=(1, 2, 3),
                                                 dtype=torch.double)
                # the examples which are from the neutral gesture are set to 0
                activations_average[labels == 0] = 0
                if len(labels) > 1:
                    highest_activation_with_associated_labels = torch.from_numpy(
                        np.array(
                            highest_activation_participant_for_this_training_session,
                            dtype=np.double)[labels])
                else:
                    highest_activation_with_associated_labels = torch.from_numpy(
                        np.array([
                            np.array(
                                highest_activation_participant_for_this_training_session,
                                dtype=np.double)[labels]
                        ]))
                activations_average = activations_average / highest_activation_with_associated_labels
                activation_evaluation_session.extend(
                    activations_average.cpu().numpy())
                print(activations_average.cpu().numpy())
                print(labels)
        activations_participant.extend(activation_evaluation_session)
    index_without_neutral = np.squeeze(np.nonzero(activations_participant))
    activations_participant = np.array(
        activations_participant)[index_without_neutral]
    print(activations_participant)
    print(np.nanmean(activations_participant), " STD: ",
          np.nanstd(activations_participant))
Пример #2
0
def train_raw_TCN_fine_tuning(examples_datasets_train, labels_datasets_train, num_kernels, filter_size=(4, 10),
                              number_of_cycle_for_first_training=2, number_of_cycles_rest_of_training=2,
                              path_weight_to_save_to="../weights_single_cycle_normal_training"):
    participants_train, participants_validation, _ = load_dataloaders_training_sessions(
        examples_datasets_train, labels_datasets_train, batch_size=512,
        number_of_cycle_for_first_training=number_of_cycle_for_first_training,
        number_of_cycles_rest_of_training=number_of_cycles_rest_of_training, ignore_first=True)

    for participant_i in range(len(participants_train)):
        for session_j in range(0, len(participants_train[participant_i])):
            # Define Model
            model = TemporalConvNet(number_of_class=11, num_kernels=num_kernels, kernel_size=filter_size).cuda()

            # Define Loss functions
            cross_entropy_loss_classes = nn.CrossEntropyLoss(reduction='mean').cuda()

            # Define Optimizer
            learning_rate = 0.001316
            print(model.parameters())
            optimizer = optim.Adam(model.parameters(), lr=learning_rate, betas=(0.5, 0.999))

            # Define Scheduler
            precision = 1e-8
            scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer=optimizer, mode='min', factor=.2, patience=5,
                                                             verbose=True, eps=precision)

            if session_j > 0:
                # Fine-tune from the previous training
                model, optimizer, _, start_epoch = load_checkpoint(
                    model=model, optimizer=optimizer, scheduler=None,
                    filename=path_weight_to_save_to + "/participant_%d/best_state_%d.pt" %
                             (participant_i, session_j - 1))

            best_state = train_model_standard(model=model, criterion=cross_entropy_loss_classes, optimizer=optimizer,
                                              scheduler=scheduler,
                                              dataloaders={"train": participants_train[participant_i][session_j],
                                                           "val": participants_validation[participant_i][session_j]},
                                              precision=precision, patience=10, patience_increase=10)

            if not os.path.exists(path_weight_to_save_to + "/participant_%d" % participant_i):
                os.makedirs(path_weight_to_save_to + "/participant_%d" % participant_i)
            torch.save(best_state, f=path_weight_to_save_to +
                                     "/participant_%d/best_state_%d.pt"
                                     % (participant_i, session_j))
Пример #3
0
def train_raw_convNet(examples_datasets_train, labels_datasets_train, num_kernels, filter_size=(4, 10),
                      number_of_cycle_for_first_training=2, number_of_cycles_rest_of_training=2,
                      path_weight_to_save_to="../weights"):
    participants_train, participants_validation, _ = load_dataloaders_training_sessions(
        examples_datasets_train, labels_datasets_train, batch_size=512,
        number_of_cycle_for_first_training=number_of_cycle_for_first_training,
        number_of_cycles_rest_of_training=number_of_cycles_rest_of_training)
    # participants_train, participants_validation, _, _, _, _ = load_dataloaders_training_sessions(
    #    examples_datasets_train, labels_datasets_train, batch_size=512, number_of_cycle_for_first_training=2)

    for participant_i in range(len(participants_train)):
        for session_j in range(len(participants_train[participant_i])):
            # Define Model
            # model = rawConvNet(number_of_class=11, number_of_blocks=3, dropout_rate=0.5, filter_size=filter_size,
            #                   number_of_features_output=[64, 64, 64]).cuda()
            model = TemporalConvNet(number_of_class=11, num_kernels=num_kernels, kernel_size=filter_size).cuda()

            # Define Loss functions
            cross_entropy_loss_classes = nn.CrossEntropyLoss(reduction='mean').cuda()

            # Define Optimizer
            learning_rate = 0.001316
            print(model.parameters())
            optimizer = optim.Adam(model.parameters(), lr=learning_rate, betas=(0.5, 0.999))

            # Define Scheduler
            precision = 1e-8
            scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer=optimizer, mode='min', factor=.2, patience=5,
                                                             verbose=True, eps=precision)

            best_weights = train_model_standard(model=model, criterion=cross_entropy_loss_classes, optimizer=optimizer,
                                                scheduler=scheduler,
                                                dataloaders={"train": participants_train[participant_i][session_j],
                                                             "val": participants_validation[participant_i][session_j]},
                                                precision=precision, patience=10, patience_increase=10)

            if not os.path.exists(path_weight_to_save_to + "/participant_%d" % participant_i):
                os.makedirs(path_weight_to_save_to + "/participant_%d" % participant_i)
            torch.save(best_weights, f=path_weight_to_save_to +
                                       "/participant_%d/best_weights_participant_normal_training_%d.pt"
                                       % (participant_i, session_j))
Пример #4
0
def test_network_DA_algorithm(examples_datasets_train, labels_datasets_train, num_kernels,
                              path_weights_normal='../weights', path_weights_DA='../weights_DANN',
                              filter_size=(4, 10), algo_name="DANN"):
    participant_train, _, participants_test = load_dataloaders_training_sessions(examples_datasets_train,
                                                                                 labels_datasets_train, batch_size=512)

    predictions = []
    ground_truths = []
    accuracies = []
    for participant_index, dataset_test in enumerate(participants_test):
        predictions_participant = []
        ground_truth_participant = []
        accuracies_participant = []
        model = rawConvNet(number_of_class=11, num_kernels=num_kernels, kernel_size=filter_size).cuda()
        print(np.shape(dataset_test))
        for session_index, training_session_test_data in enumerate(dataset_test):
            if session_index == 0:
                best_state = torch.load(
                    path_weights_normal + "/participant_%d/best_weights_participant_normal_training_%d.pt" %
                    (participant_index, 0))
            else:
                best_state = torch.load(
                    path_weights_DA + "/participant_%d/best_weights_participant_normal_training_%d.pt" %
                    (participant_index, session_index))  # There is 2 evaluation sessions per training
            best_weights = best_state['state_dict']
            model.load_state_dict(best_weights)

            predictions_training_session = []
            ground_truth_training_sesssion = []
            with torch.no_grad():
                model.eval()
                for inputs, labels in training_session_test_data:
                    inputs = inputs.cuda()
                    output = model(inputs)
                    _, predicted = torch.max(output.data, 1)
                    predictions_training_session.extend(predicted.cpu().numpy())
                    ground_truth_training_sesssion.extend(labels.numpy())
            print("Participant ID: ", participant_index, " Session ID: ", session_index, " Accuracy: ",
                  np.mean(np.array(predictions_training_session) == np.array(ground_truth_training_sesssion)))
            predictions_participant.append(predictions_training_session)
            ground_truth_participant.append(ground_truth_training_sesssion)
            accuracies_participant.append(np.mean(np.array(predictions_training_session) ==
                                                  np.array(ground_truth_training_sesssion)))
        accuracies.append(np.array(accuracies_participant))
        predictions.append(predictions_participant)
        ground_truths.append(ground_truth_participant)
        print("ACCURACY PARTICIPANT: ", accuracies_participant)
    print(np.array(accuracies).flatten())
    accuracies_to_display = []
    for accuracies_from_participant in np.array(accuracies).flatten():
        accuracies_to_display.extend(accuracies_from_participant)
    print(accuracies_to_display)
    print("OVERALL ACCURACY: " + str(np.mean(accuracies_to_display)))

    file_to_open = "../../results/test_accuracy_on_training_sessions_" + algo_name + "_" + str(
        filter_size[1]) + ".txt"
    np.save("../../results/predictions_training_session_" + algo_name, (ground_truths, predictions))
    with open(file_to_open, "a") as myfile:
        myfile.write("Predictions: \n")
        myfile.write(str(predictions) + '\n')
        myfile.write("Ground Truth: \n")
        myfile.write(str(ground_truths) + '\n')
        myfile.write("ACCURACIES: \n")
        myfile.write(str(accuracies) + '\n')
        myfile.write("OVERALL ACCURACY: " + str(np.mean(accuracies_to_display)))
def test_network_raw_convNet_on_training_sessions(examples_datasets_train,
                                                  labels_datasets_train,
                                                  num_kernel,
                                                  path_weights='../weights',
                                                  algo_name="Normal_Training",
                                                  filter_size=(4, 10),
                                                  type_of_calibration="None",
                                                  cycle_for_test=None):
    _, _, participants_test = load_dataloaders_training_sessions(
        examples_datasets_train,
        labels_datasets_train,
        batch_size=512,
        cycle_for_test=cycle_for_test)
    model_outputs = []
    predictions = []
    ground_truths = []
    accuracies = []
    for participant_index, dataset_test in enumerate(participants_test):
        model_outputs_participant = []
        predictions_participant = []
        ground_truth_participant = []
        accuracies_participant = []
        model = rawConvNet(number_of_class=11,
                           num_kernels=num_kernel,
                           kernel_size=filter_size).cuda()
        for session_index, training_session_test_data in enumerate(
                dataset_test):
            if type_of_calibration == "None":
                best_state = torch.load(path_weights +
                                        "/participant_%d/best_state_%d.pt" %
                                        (participant_index, 0))
            elif type_of_calibration == "Delayed":
                session_index_to_use = np.max((0, session_index - 1))
                best_state = torch.load(
                    path_weights + "/participant_%d/best_state_%d.pt" %
                    (participant_index, session_index_to_use))
            else:
                best_state = torch.load(path_weights +
                                        "/participant_%d/best_state_%d.pt" %
                                        (participant_index, session_index))
            best_weights = best_state['state_dict']
            model.load_state_dict(best_weights)

            predictions_training_session = []
            ground_truth_training_sesssion = []
            model_outputs_session = []
            with torch.no_grad():
                model.eval()
                for inputs, labels in training_session_test_data:
                    inputs = inputs.cuda()
                    output = model(inputs)
                    _, predicted = torch.max(output.data, 1)
                    model_outputs_session.extend(
                        torch.softmax(output, dim=1).cpu().numpy())
                    predictions_training_session.extend(
                        predicted.cpu().numpy())
                    ground_truth_training_sesssion.extend(labels.numpy())
            print(
                "Participant: ", participant_index, " Accuracy: ",
                np.mean(
                    np.array(predictions_training_session) == np.array(
                        ground_truth_training_sesssion)))
            predictions_participant.append(predictions_training_session)
            model_outputs_participant.append(model_outputs_session)
            ground_truth_participant.append(ground_truth_training_sesssion)
            accuracies_participant.append(
                np.mean(
                    np.array(predictions_training_session) == np.array(
                        ground_truth_training_sesssion)))
        accuracies.append(np.array(accuracies_participant))
        predictions.append(predictions_participant)
        model_outputs.append(model_outputs_participant)
        ground_truths.append(ground_truth_participant)
        print("ACCURACY PARTICIPANT: ", accuracies_participant)
    print(np.array(accuracies).flatten())
    accuracies_to_display = []
    for accuracies_from_participant in np.array(accuracies).flatten():
        accuracies_to_display.extend(accuracies_from_participant)
    print(accuracies_to_display)
    print("OVERALL ACCURACY: " + str(np.mean(accuracies_to_display)))

    if type_of_calibration == "None":
        file_to_open = "../../results/test_accuracy_on_training_sessions_" + algo_name + "_no_retraining_" + str(
            filter_size[1]) + ".txt"
        np.save(
            "../../results/predictions_training_session_" + algo_name +
            "_no_retraining", (ground_truths, predictions, model_outputs))
    elif type_of_calibration == "Delayed":
        file_to_open = "../../results/test_accuracy_on_training_sessions_" + algo_name + "_Delayed_" + str(
            filter_size[1]) + ".txt"
        np.save(
            "../../results/predictions_training_session_" + algo_name +
            "_Delayed", (ground_truths, predictions, model_outputs))
    else:
        file_to_open = "../../results/test_accuracy_on_training_sessions_" + algo_name + "_WITH_RETRAINING_" + str(
            filter_size[1]) + ".txt"
        np.save(
            "../../results/predictions_training_session_" + algo_name +
            "_WITH_RETRAINING", (ground_truths, predictions, model_outputs))
    with open(file_to_open, "a") as \
            myfile:
        myfile.write("Predictions: \n")
        myfile.write(str(predictions) + '\n')
        myfile.write("Ground Truth: \n")
        myfile.write(str(ground_truths) + '\n')
        myfile.write("ACCURACIES: \n")
        myfile.write(str(accuracies) + '\n')
        myfile.write("OVERALL ACCURACY: " +
                     str(np.mean(accuracies_to_display)))
Пример #6
0
def train_TL_convNet(examples_datasets_train, labels_datasets_train, num_kernels, filter_size=(4, 10),
                     number_of_cycle_for_first_training=2, number_of_cycles_rest_of_training=2,
                     path_weight_to_save_to="../weights_TL", path_weights_normal_training="../weights_TL"):
    participants_train_for_source, participants_validation_for_source, _ = load_dataloaders_training_sessions(
        examples_datasets_train, labels_datasets_train, batch_size=512,
        number_of_cycle_for_first_training=number_of_cycle_for_first_training,
        number_of_cycles_rest_of_training=number_of_cycles_rest_of_training, drop_last=False, ignore_first=True)
    participants_train_for_target, participants_validation_for_target, _ = load_dataloaders_training_sessions(
        examples_datasets_train, labels_datasets_train, batch_size=512,
        number_of_cycle_for_first_training=number_of_cycle_for_first_training,
        number_of_cycles_rest_of_training=number_of_cycles_rest_of_training, drop_last=False, ignore_first=True)

    for participant_i in range(0, len(participants_train_for_target)):
        for session_j in range(1, len(participants_train_for_target[participant_i])):

            model_source = SourceNetwork(number_of_class=11, num_kernels=num_kernels, kernel_size=filter_size).cuda()
            # Define Loss functions
            cross_entropy_loss = nn.CrossEntropyLoss(reduction='mean').cuda()
            # Define Optimizer
            learning_rate = 0.001316
            print(model_source.parameters())
            optimizer = optim.Adam(model_source.parameters(), lr=learning_rate, betas=(0.5, 0.999))

            # Define Scheduler
            precision = 1e-8
            scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer=optimizer, mode='min', factor=.2, patience=10,
                                                             verbose=True, eps=precision)
            if session_j == 1:
                # Fine-tune from the previous training
                model_source, optimizer, _, start_epoch = load_checkpoint(
                    model=model_source, optimizer=optimizer, scheduler=None,
                    filename=path_weights_normal_training +
                             "/participant_%d/best_state_%d.pt" %
                             (participant_i, 0))
            else:
                model_source, optimizer, _, start_epoch = load_checkpoint(
                    model=model_source, optimizer=optimizer, scheduler=None,
                    filename=path_weight_to_save_to +
                             "/participant_%d/best_state_participant_pre_training_%d.pt" %
                             (participant_i, session_j - 1))

            list_train_dataloader = []
            list_validation_dataloader = []
            # Get all sessions before the current one and pre-train on these
            for k in range(0, session_j + 1):
                list_train_dataloader.append(participants_train_for_source[participant_i][k])
                list_validation_dataloader.append(participants_validation_for_source[participant_i][k])
            best_state_pre_training = pre_train_model(model=model_source, cross_entropy_loss=cross_entropy_loss,
                                                      optimizer_class=optimizer, scheduler=scheduler,
                                                      dataloaders={"train": list_train_dataloader,
                                                                   "val": list_validation_dataloader},
                                                      patience=20, patience_increase=20)

            if not os.path.exists(path_weight_to_save_to + "/participant_%d" % participant_i):
                os.makedirs(path_weight_to_save_to + "/participant_%d" % participant_i)
            torch.save(best_state_pre_training, f=path_weight_to_save_to +
                                                  "/participant_%d/best_state_participant_pre_training_%d.pt"
                                                  % (participant_i, session_j))

            '''Train the source network with the current session'''
            state_pre_training = torch.load(path_weight_to_save_to +
                                            "/participant_%d/best_state_participant_pre_training_%d.pt" %
                                            (participant_i, session_j))
            weights_pre_training = state_pre_training['state_dict']
            model_target = TargetNetwork(weight_pre_trained_convNet=weights_pre_training, num_kernels=num_kernels,
                                         kernel_size=filter_size).cuda()

            # Define Loss functions
            cross_entropy_loss = nn.CrossEntropyLoss(reduction='mean').cuda()
            # Define Optimizer
            learning_rate = 0.001316
            print(model_target.parameters())
            optimizer = optim.Adam(model_target.parameters(), lr=learning_rate, betas=(0.5, 0.999))

            # Define Scheduler
            precision = 1e-8
            scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer=optimizer, mode='min', factor=.2, patience=5,
                                                             verbose=True, eps=precision)

            if session_j > 1:
                # Fine-tune from the previous training
                model_target, optimizer_classifier, _, start_epoch = load_checkpoint(
                    model=model_target, optimizer=optimizer, scheduler=None,
                    filename=path_weight_to_save_to +
                             "/participant_%d/best_state_%d.pt" %
                             (participant_i, session_j - 1))
            else:
                # Fine-tune from the previous training
                model_target, _, _, start_epoch = load_checkpoint(
                    model=model_target, optimizer=None, scheduler=None,
                    filename=path_weights_normal_training +
                             "/participant_%d/best_state_%d.pt" %
                             (participant_i, 0), strict=False)

            best_state = train_model_standard(model=model_target, criterion=cross_entropy_loss, optimizer=optimizer,
                                              scheduler=scheduler,
                                              dataloaders={"train":
                                                               participants_train_for_target[participant_i][
                                                                   session_j],
                                                           "val":
                                                               participants_validation_for_target[participant_i][
                                                                   session_j]},
                                              precision=precision, patience=10, patience_increase=10)
            if not os.path.exists(path_weight_to_save_to + "/participant_%d" % participant_i):
                os.makedirs(path_weight_to_save_to + "/participant_%d" % participant_i)
            torch.save(best_state, f=path_weight_to_save_to + "/participant_%d/"
                                                              "best_state_%d.pt" %
                                     (participant_i, session_j))