def test_network_convNet_only_second_evaluation_session_for_each_training_session(
        examples_datasets,
        labels_datasets,
        dnn,
        num_neurons,
        path_weights="../weights_evaluation_",
        use_only_first_training=False,
        algo_name="DANN",
        feature_vector_input_length=385):
    participants_evaluation_dataloader = load_dataloaders_test_sessions(
        examples_datasets_evaluation=examples_datasets,
        labels_datasets_evaluation=labels_datasets,
        batch_size=512)
    predictions = []
    ground_truths = []
    accuracies = []
    for participant_index, dataset_participant in enumerate(
            participants_evaluation_dataloader):
        predictions_participant = []
        ground_truth_participant = []
        accuracies_participant = []
        model = dnn(
            number_of_class=number_of_classes,
            num_neurons=num_neurons,
            feature_vector_input_length=feature_vector_input_length).cuda()
        for session_index, dataloader_session in enumerate(
                dataset_participant):
            #  The first evaluation session is used to train the DA algo, load these weights.
            # The second evaluation session is used to test (the right weights will already have been loaded)
            if session_index % 2 == 0:
                if use_only_first_training:
                    best_state = torch.load(
                        path_weights + algo_name +
                        "/participant_%d/best_state_NO_recalibration%d.pt" %
                        (participant_index, session_index))
                else:
                    best_state = torch.load(
                        path_weights + algo_name +
                        "/participant_%d/best_state_WITH_recalibration%d.pt" %
                        (participant_index, session_index))
                best_weights = best_state['state_dict']
                model.load_state_dict(best_weights)
            else:
                predictions_evaluation_session = []
                ground_truth_evaluation_session = []
                with torch.no_grad():
                    model.eval()
                    for inputs, labels in dataloader_session:
                        inputs = inputs.cuda()
                        output = model(inputs)
                        _, predicted = torch.max(output.data, 1)
                        predictions_evaluation_session.extend(
                            predicted.cpu().numpy())
                        ground_truth_evaluation_session.extend(labels.numpy())
                print(
                    "Participant: ", participant_index, " Accuracy: ",
                    np.mean(
                        np.array(predictions_evaluation_session) == np.array(
                            ground_truth_evaluation_session)))
                predictions_participant.append(predictions_evaluation_session)
                ground_truth_participant.append(
                    ground_truth_evaluation_session)
                accuracies_participant.append(
                    np.mean(
                        np.array(predictions_evaluation_session) == np.array(
                            ground_truth_evaluation_session)))
        predictions.append(predictions_participant)
        ground_truths.append(ground_truth_participant)
        accuracies.append(np.array(accuracies_participant))
        print("ACCURACY PARTICIPANT: ", accuracies_participant)

    print(np.array(accuracies).flatten())
    accuracies_to_display = []
    for accuracies_from_participant in np.array(accuracies).flatten():
        accuracies_to_display.extend(accuracies_from_participant)
    print(accuracies_to_display)
    print("OVERALL ACCURACY: " + str(np.mean(accuracies_to_display)))

    if use_only_first_training:
        file_to_open = "results_tsd_eval/evaluation_sessions_" + algo_name + "_no_retraining_" + str(
            num_neurons[1]) + ".txt"
        np.save(
            "results_tsd_eval/evaluation_sessions_" + algo_name +
            "_no_retraining", (ground_truths, predictions))
    else:
        file_to_open = "results_tsd_eval/evaluation_sessions_" + algo_name + "_WITH_retraining_" + str(
            num_neurons[1]) + ".txt"
        np.save(
            "results_tsd_eval/evaluation_sessions_" + algo_name +
            "_WITH_retraining", (ground_truths, predictions))
    with open(file_to_open, "a") as \
            myfile:
        myfile.write("Predictions: \n")
        myfile.write(str(predictions) + '\n')
        myfile.write("Ground Truth: \n")
        myfile.write(str(ground_truths) + '\n')
        myfile.write("ACCURACIES: \n")
        myfile.write(str(accuracies) + '\n')
        myfile.write("OVERALL ACCURACY: " +
                     str(np.mean(accuracies_to_display)))
Example #2
0
def test_network_convNet(examples_datasets,
                         labels_datasets,
                         convNet,
                         num_kernels,
                         filter_size=(4, 10),
                         path_weights='../../weights',
                         type_of_calibration="None",
                         only_second_evaluation_sessions=False,
                         algo_name="normal"):
    participants_evaluation_dataloader = load_dataloaders_test_sessions(
        examples_datasets_evaluation=examples_datasets,
        labels_datasets_evaluation=labels_datasets,
        batch_size=512)
    model_outputs = []
    predictions = []
    ground_truths = []
    accuracies = []
    for participant_index, dataset_participant in enumerate(
            participants_evaluation_dataloader):
        model_outputs_participant = []
        predictions_participant = []
        ground_truth_participant = []
        accuracies_participant = []
        model = convNet(number_of_class=11,
                        num_kernels=num_kernels,
                        kernel_size=filter_size).cuda()
        for session_index, dataloader_session in enumerate(
                dataset_participant):
            if only_second_evaluation_sessions and session_index % 2 != 0:
                ground_truth_evaluation_session, predictions_evaluation_session, model_outputs_session, \
                accuracy_session = evaluate_session(dataloader_session, model, participant_index, path_weights,
                                                    session_index, type_of_calibration)

                predictions_participant.append(predictions_evaluation_session)
                ground_truth_participant.append(
                    ground_truth_evaluation_session)
                accuracies_participant.append(accuracy_session)

            elif only_second_evaluation_sessions is False:
                ground_truth_evaluation_session, predictions_evaluation_session, model_outputs_session, \
                accuracy_session = evaluate_session(dataloader_session, model, participant_index, path_weights,
                                                    session_index, type_of_calibration)

                predictions_participant.append(predictions_evaluation_session)
                model_outputs_participant.append(model_outputs_session)
                ground_truth_participant.append(
                    ground_truth_evaluation_session)
                accuracies_participant.append(
                    np.mean(
                        np.array(predictions_evaluation_session) == np.array(
                            ground_truth_evaluation_session)))
        predictions.append(predictions_participant)
        model_outputs.append(model_outputs_participant)
        ground_truths.append(ground_truth_participant)
        accuracies.append(np.array(accuracies_participant))
        print("ACCURACY PARTICIPANT: ", accuracies_participant)

    print(np.array(accuracies).flatten())
    accuracies_to_display = []
    for accuracies_from_participant in np.array(accuracies).flatten():
        accuracies_to_display.extend(accuracies_from_participant)
    print(accuracies_to_display)
    print("OVERALL ACCURACY: " + str(np.mean(accuracies_to_display)))

    if only_second_evaluation_sessions:
        evaluations_use = "_only_SECOND"
    else:
        evaluations_use = ""
    if type_of_calibration == "None":
        file_to_open = "../../results/test_accuracy_on_EVALUATION_sessions_" + algo_name + "_no_retraining_" + evaluations_use + str(
            filter_size[1]) + ".txt"
        np.save(
            "../../results/predictions_EVALUATION_session_" + algo_name +
            "_no_retraining" + evaluations_use,
            (ground_truths, predictions, model_outputs))
    elif type_of_calibration == "Delayed":
        file_to_open = "../../results/test_accuracy_on_EVALUATION_sessions_" + algo_name + "_Delayed_" + evaluations_use + str(
            filter_size[1]) + ".txt"
        np.save(
            "../../results/predictions_EVALUATION_session_" + algo_name +
            "_Delayed" + evaluations_use,
            (ground_truths, predictions, model_outputs))
    else:
        file_to_open = "../../results/test_accuracy_on_EVALUATION_sessions_" + algo_name + "_WITH_RETRAINING_" + evaluations_use + str(
            filter_size[1]) + ".txt"
        np.save(
            "../../results/predictions_EVALUATION_session_" + algo_name +
            "_WITH_RETRAINING" + evaluations_use,
            (ground_truths, predictions, model_outputs))
    with open(file_to_open, "a") as \
            myfile:
        myfile.write("Predictions: \n")
        myfile.write(str(predictions) + '\n')
        myfile.write("Ground Truth: \n")
        myfile.write(str(ground_truths) + '\n')
        myfile.write("ACCURACIES: \n")
        myfile.write(str(accuracies) + '\n')
        myfile.write("OVERALL ACCURACY: " +
                     str(np.mean(accuracies_to_display)))
def test_network_convNet(examples_datasets,
                         labels_datasets,
                         convNet,
                         num_neurons,
                         path_weights_first_state,
                         path_weights='../../weights',
                         use_only_first_training=False,
                         only_second_evaluation_sessions=True,
                         algo_name="normal"):
    participants_evaluation_dataloader = load_dataloaders_test_sessions(
        examples_datasets_evaluation=examples_datasets,
        labels_datasets_evaluation=labels_datasets,
        batch_size=512)
    model_outputs = []
    predictions = []
    ground_truths = []
    accuracies = []
    for participant_index, dataset_participant in enumerate(
            participants_evaluation_dataloader):
        model_outputs_participant = []
        predictions_participant = []
        ground_truth_participant = []
        accuracies_participant = []
        model = convNet(
            number_of_class=number_of_classes,
            feature_vector_input_length=feature_vector_input_length,
            num_neurons=num_neurons).cuda()
        for session_index, dataloader_session in enumerate(
                dataset_participant):
            if only_second_evaluation_sessions and session_index % 2 != 0:
                ground_truth_evaluation_session, predictions_evaluation_session, model_outputs_session, \
                accuracy_session = evaluate_session(dataloader_session, model, participant_index, path_weights,
                                                    session_index, use_only_first_training,
                                                    path_weights_first_state=path_weights_first_state)

                predictions_participant.append(predictions_evaluation_session)
                ground_truth_participant.append(
                    ground_truth_evaluation_session)
                accuracies_participant.append(accuracy_session)

            elif only_second_evaluation_sessions is False:
                ground_truth_evaluation_session, predictions_evaluation_session, model_outputs_session, \
                accuracy_session = evaluate_session(dataloader_session, model, participant_index, path_weights,
                                                    session_index, use_only_first_training,
                                                    path_weights_first_state=path_weights_first_state)

                predictions_participant.append(predictions_evaluation_session)
                model_outputs_participant.append(model_outputs_session)
                ground_truth_participant.append(
                    ground_truth_evaluation_session)
                accuracies_participant.append(
                    np.mean(
                        np.array(predictions_evaluation_session) == np.array(
                            ground_truth_evaluation_session)))
        predictions.append(predictions_participant)
        model_outputs.append(model_outputs_participant)
        ground_truths.append(ground_truth_participant)
        accuracies.append(np.array(accuracies_participant))
        print("ACCURACY PARTICIPANT: ", accuracies_participant)

    print(np.array(accuracies).flatten())
    accuracies_to_display = []
    for accuracies_from_participant in np.array(accuracies).flatten():
        accuracies_to_display.extend(accuracies_from_participant)
    print(accuracies_to_display)
    print("OVERALL ACCURACY: " + str(np.mean(accuracies_to_display)))

    if only_second_evaluation_sessions:
        evaluations_use = "_only_SECOND"
    else:
        evaluations_use = ""
    if use_only_first_training:
        file_to_open = "Results_tsd_eval/" + algo_name + evaluations_use + "_no_retraining_" \
                       + str(num_neurons[1]) + ".txt"
        np.save(
            "Results_tsd_eval/" + algo_name + evaluations_use +
            "_no_retraining", (ground_truths, predictions, model_outputs))
    else:
        file_to_open = "Results_tsd_eval/" + algo_name + evaluations_use + \
                       "_WITH_retraining_" + str(num_neurons[1]) + ".txt"
        np.save(
            "Results_tsd_eval/" + algo_name + evaluations_use +
            "_WITH_retraining", (ground_truths, predictions, model_outputs))
    with open(file_to_open, "a") as myfile:
        myfile.write("Predictions: \n")
        myfile.write(str(predictions) + '\n')
        myfile.write("Ground Truth: \n")
        myfile.write(str(ground_truths) + '\n')
        myfile.write("ACCURACIES: \n")
        myfile.write(str(accuracies) + '\n')
        myfile.write("OVERALL ACCURACY: " +
                     str(np.mean(accuracies_to_display)))
def run_SCADANN_evaluation_sessions(examples_datasets_evaluations, labels_datasets_evaluation,
                                    examples_datasets_train, labels_datasets_train, algo_name,
                                    num_kernels, filter_size, path_weights_to_load_from, path_weights_SCADANN,
                                    batch_size=512, patience_increment=10, use_recalibration_data=False,
                                    number_of_cycle_for_first_training=4, number_of_cycles_rest_of_training=4,
                                    feature_vector_input_length=385, learning_rate=0.001316):
    # Get the data to use as the SOURCE from the training sessions
    participants_train, _, _ = load_dataloaders_training_sessions_spectrogram(
        examples_datasets_train, labels_datasets_train, batch_size=batch_size,
        number_of_cycle_for_first_training=number_of_cycle_for_first_training, get_validation_set=False,
        number_of_cycles_rest_of_training=number_of_cycles_rest_of_training, gestures_to_remove=None,
        ignore_first=True, shuffle=False, drop_last=False)

    # Get the data to use as the TARGET from the evaluation sessions
    participants_evaluation_dataloader = load_dataloaders_test_sessions(
        examples_datasets_evaluation=examples_datasets_evaluations,
        labels_datasets_evaluation=labels_datasets_evaluation, batch_size=batch_size, shuffle=False, drop_last=False)

    for participant_i in range(len(participants_evaluation_dataloader)):
        print("SHAPE SESSIONS: ", np.shape(participants_evaluation_dataloader[participant_i]))
        for session_j in range(0, len(participants_evaluation_dataloader[participant_i])):
            # There is two evaluation session for every training session. We train on the first one
            if session_j % 2 == 0:
                # Classifier and discriminator
                model = TSD_Network(number_of_class=number_of_classes, num_neurons=num_kernels,
                                    feature_vector_input_length=feature_vector_input_length).cuda()
                # loss functions
                crossEntropyLoss = nn.CrossEntropyLoss().cuda()
                # optimizer
                precision = 1e-8
                optimizer_classifier = optim.Adam(model.parameters(), lr=learning_rate,
                                                  betas=(0.5, 0.999))
                scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer=optimizer_classifier, mode='min', factor=.2,
                                                                 patience=5, verbose=True, eps=precision)

                if use_recalibration_data:
                    model, optimizer_classifier, _, start_epoch = load_checkpoint(
                        model=model, optimizer=optimizer_classifier, scheduler=None,
                        filename=path_weights_to_load_from +
                                 "/participant_%d/best_state_WITH_recalibration%d.pt" %
                                 (participant_i, session_j))
                    models_array = []
                    for j in range(0, int(session_j / 2) + 1):
                        model_temp = TSD_Network(number_of_class=number_of_classes, num_neurons=num_kernels,
                                                 feature_vector_input_length=feature_vector_input_length).cuda()
                        model_temp, _, _, _ = load_checkpoint(
                            model=model_temp, optimizer=None, scheduler=None,
                            filename=path_weights_to_load_from + "/participant_%d/best_state_WITH_recalibration%d.pt" %
                                     (participant_i, int(j * 2)))
                        models_array.append(model_temp)
                else:
                    model, optimizer_classifier, _, start_epoch = load_checkpoint(
                        model=model, optimizer=optimizer_classifier, scheduler=None,
                        filename=path_weights_to_load_from +
                                 "/participant_%d/best_state_NO_recalibration%d.pt" %
                                 (participant_i, session_j))

                    models_array = []
                    for j in range(0, int(session_j / 2) + 1):
                        model_temp = TSD_Network(number_of_class=number_of_classes, num_neurons=num_kernels,
                                                 feature_vector_input_length=feature_vector_input_length).cuda()
                        model_temp, _, _, _ = load_checkpoint(
                            model=model_temp, optimizer=None, scheduler=None,
                            filename=path_weights_to_load_from + "/participant_%d/best_state_NO_recalibration%d.pt" % (
                                participant_i, int(j * 2)))
                        models_array.append(model_temp)

                corresponding_training_session_index = 0 if use_recalibration_data is False else int(session_j / 2)
                train_dataloader_replay, validationloader_replay, train_dataloader_pseudo, validationloader_pseudo = \
                    generate_dataloaders_evaluation_for_SCADANN(
                        dataloader_session_training=participants_train[participant_i][
                            corresponding_training_session_index],
                        dataloader_sessions_evaluation=participants_evaluation_dataloader[participant_i],
                        models=models_array,
                        current_session=session_j, validation_set_ratio=0.2,
                        batch_size=512, use_recalibration_data=use_recalibration_data)

                best_state = SCADANN_BN_training(replay_dataset_train=train_dataloader_replay,
                                                 target_validation_dataset=validationloader_pseudo,
                                                 target_dataset=train_dataloader_pseudo, model=model,
                                                 crossEntropyLoss=crossEntropyLoss,
                                                 optimizer_classifier=optimizer_classifier,
                                                 scheduler=scheduler, patience_increment=patience_increment,
                                                 max_epochs=500,
                                                 domain_loss_weight=1e-1)
                if use_recalibration_data:
                    if not os.path.exists(path_weights_SCADANN + algo_name + "/participant_%d" % participant_i):
                        os.makedirs(path_weights_SCADANN + algo_name + "/participant_%d" % participant_i)
                    torch.save(best_state, f=path_weights_SCADANN + algo_name +
                                             "/participant_%d/best_state_WITH_recalibration%d.pt" %
                                             (participant_i, session_j))
                else:
                    if not os.path.exists(path_weights_SCADANN + algo_name + "/participant_%d" % participant_i):
                        os.makedirs(path_weights_SCADANN + algo_name + "/participant_%d" % participant_i)
                    print(os.listdir(path_weights_SCADANN + algo_name))
                    torch.save(best_state, f=path_weights_SCADANN + algo_name +
                                             "/participant_%d/best_state_NO_recalibration%d.pt" % (
                                                 participant_i, session_j))
def test_network_convNet_with_activation(
        examples_datasets,
        labels_datasets,
        convNet,
        highest_activation_participants_gestures,
        num_kernels,
        filter_size=(4, 10),
        path_weights='../../weights',
        use_only_first_training=False):
    participants_evaluation_dataloader = load_dataloaders_test_sessions(
        examples_datasets_evaluation=examples_datasets,
        labels_datasets_evaluation=labels_datasets,
        batch_size=512)
    predictions = []
    ground_truths = []
    accuracies = []
    activations = []
    for participant_index, (dataset_participant,
                            highest_activation_participant) in enumerate(
                                zip(participants_evaluation_dataloader,
                                    highest_activation_participants_gestures)):
        predictions_participant = []
        ground_truth_participant = []
        accuracies_participant = []
        activations_participant = []
        model = convNet(number_of_class=11,
                        num_kernels=num_kernels,
                        kernel_size=filter_size).cuda()
        for session_index, dataloader_session in enumerate(
                dataset_participant):
            if use_only_first_training:
                best_weights = torch.load(
                    path_weights +
                    "/participant_%d/best_weights_participant_normal_training_%d.pt"
                    % (participant_index, 0))
            else:
                best_weights = torch.load(
                    path_weights +
                    "/participant_%d/best_weights_participant_normal_training_%d.pt"
                    % (participant_index, int(session_index / 2))
                )  # Because there is 2 evaluation sessions per training
            model.load_state_dict(best_weights, strict=False)
            predictions_evaluation_session = []
            ground_truth_evaluation_session = []
            activation_evaluation_session = []
            highest_activation_participant_for_this_training_session = highest_activation_participant[
                int(session_index / 2)]
            with torch.no_grad():
                model.eval()
                for inputs, labels in dataloader_session:
                    inputs = inputs.cuda()
                    output = model(inputs)
                    _, predicted = torch.max(output.data, 1)
                    predictions_evaluation_session.extend(
                        predicted.cpu().numpy())
                    ground_truth_evaluation_session.extend(labels.numpy())
                    activations_average = torch.mean(torch.abs(inputs),
                                                     dim=(1, 2, 3),
                                                     dtype=torch.double)
                    # the examples which are from the neutral gesture are set to 0
                    activations_average[labels == 0] = 0.
                    if len(labels) > 1:
                        highest_activation_with_associated_labels = torch.from_numpy(
                            np.array(
                                highest_activation_participant_for_this_training_session,
                                dtype=np.double)[labels]).cuda()
                    else:
                        highest_activation_with_associated_labels = torch.from_numpy(
                            np.array([
                                np.array(
                                    highest_activation_participant_for_this_training_session,
                                    dtype=np.double)[labels]
                            ])).cuda()
                    activations_average = activations_average / highest_activation_with_associated_labels
                    activation_evaluation_session.extend(
                        activations_average.cpu().numpy())
            print(
                "Participant: ", participant_index, " Accuracy: ",
                np.mean(
                    np.array(predictions_evaluation_session) == np.array(
                        ground_truth_evaluation_session)))
            predictions_participant.append(predictions_evaluation_session)
            ground_truth_participant.append(ground_truth_evaluation_session)
            accuracies_participant.append(
                np.mean(
                    np.array(predictions_evaluation_session) == np.array(
                        ground_truth_evaluation_session)))
            activations_participant.append(activation_evaluation_session)

        predictions.append(predictions_participant)
        ground_truths.append(ground_truth_participant)
        accuracies.append(np.array(accuracies_participant))
        activations.append(activations_participant)
        print("ACCURACY PARTICIPANT: ", accuracies_participant)

    print(np.array(accuracies).flatten())
    accuracies_to_display = []
    for accuracies_from_participant in np.array(accuracies).flatten():
        accuracies_to_display.extend(accuracies_from_participant)
    print(accuracies_to_display)
    print("OVERALL ACCURACY: " + str(np.mean(accuracies_to_display)))

    if use_only_first_training:
        file_to_open = "../../results/evaluation_sessions_no_retraining_and_with_activations" + str(filter_size[1]) + \
                       ".txt"
        np.save(
            "../../results/evaluation_sessions_no_retraining_and_with_activations",
            (ground_truths, predictions, activations))
    else:
        file_to_open = "../../results/evaluation_sessions_WITH_retraining_and_activations" + str(filter_size[1]) + \
                       ".txt"
        np.save(
            "../../results/evaluation_sessions_WITH_retraining_and_activations",
            (ground_truths, predictions, activations))
    with open(file_to_open, "a") as \
            myfile:
        myfile.write("Predictions: \n")
        myfile.write(str(predictions) + '\n')
        myfile.write("Ground Truth: \n")
        myfile.write(str(ground_truths) + '\n')
        myfile.write("ACCURACIES: \n")
        myfile.write(str(accuracies) + '\n')
        myfile.write("OVERALL ACCURACY: " +
                     str(np.mean(accuracies_to_display)))
Example #6
0
def test_network_TL_evaluation_algorithm(examples_datasets_evaluation, labels_datasets_evaluation, num_kernels,
                                         path_weights_normal='../weights', path_weights_TL='../weights_TL',
                                         path_weights_source_network='../weights_TL_Two_Cycles_Recalibration',
                                         filter_size=(4, 10), algo_name="TransferLearning"):
    participants_test = load_dataloaders_test_sessions(
        examples_datasets_evaluation=examples_datasets_evaluation,
        labels_datasets_evaluation=labels_datasets_evaluation, batch_size=512)
    class_predictions = []
    model_outputs = []
    ground_truths = []
    accuracies = []
    for participant_index, dataset_test in enumerate(participants_test):
        class_predictions_participant = []
        model_outputs_participant = []
        ground_truth_participant = []
        accuracies_participant = []
        print(np.shape(dataset_test))
        for session_index, training_session_test_data in enumerate(dataset_test):
            # if session_index % 2 != 0:
            if session_index < 2:
                model = rawConvNet(number_of_class=11, num_kernels=num_kernels, kernel_size=filter_size).cuda()
                best_state = torch.load(
                    path_weights_normal + "/participant_%d/best_state_%d.pt" %
                    (participant_index, 0))
            else:
                # Two evaluations session per training sessions
                state_pre_training = torch.load(path_weights_source_network +
                                                "/participant_%d/best_state_participant_pre_training_%d.pt" %
                                                (participant_index, int(session_index / 2)))
                weights_pre_training = state_pre_training['state_dict']
                model = TargetNetwork(weight_pre_trained_convNet=weights_pre_training, num_kernels=num_kernels,
                                      kernel_size=filter_size).cuda()
                best_state = torch.load(
                    path_weights_TL + "/participant_%d/best_state_%d.pt" %
                    (participant_index, int(session_index / 2)))
            model.load_state_dict(best_state['state_dict'])

            predictions_training_session = []
            model_outputs_session = []
            ground_truth_training_sesssion = []
            with torch.no_grad():
                model.eval()
                for inputs, labels in training_session_test_data:
                    inputs = inputs.cuda()
                    output = model(inputs)
                    _, predicted = torch.max(output.data, 1)
                    predictions_training_session.extend(predicted.cpu().numpy())
                    # print(torch.softmax(output, dim=1).cpu().numpy(), "  ", predicted)
                    model_outputs_session.extend(torch.softmax(output, dim=1).cpu().numpy())
                    ground_truth_training_sesssion.extend(labels.numpy())
            print("Participant ID: ", participant_index, " Session ID: ", session_index, " Accuracy: ",
                  np.mean(np.array(predictions_training_session) == np.array(ground_truth_training_sesssion)))
            class_predictions_participant.append(predictions_training_session)
            model_outputs_participant.append(model_outputs_session)
            ground_truth_participant.append(ground_truth_training_sesssion)
            accuracies_participant.append(np.mean(np.array(predictions_training_session) ==
                                                  np.array(ground_truth_training_sesssion)))
        accuracies.append(np.array(accuracies_participant))
        class_predictions.append(class_predictions_participant)
        model_outputs.append(model_outputs_participant)
        ground_truths.append(ground_truth_participant)
        print("ACCURACY PARTICIPANT: ", accuracies_participant)
    print(np.array(accuracies).flatten())
    accuracies_to_display = []
    for accuracies_from_participant in np.array(accuracies).flatten():
        accuracies_to_display.extend(accuracies_from_participant)
    print(accuracies_to_display)
    print("OVERALL ACCURACY: " + str(np.mean(accuracies_to_display)))

    np.save("../../results/predictions_EVALUATION_session_" + algo_name, (ground_truths, class_predictions,
                                                                          model_outputs))
    file_to_open = "../../results/test_accuracy_on_EVALUATION_sessions_" + algo_name + "_" + str(
        filter_size[1]) + ".txt"
    with open(file_to_open, "a") as myfile:
        myfile.write("Predictions: \n")
        myfile.write(str(class_predictions) + '\n')
        myfile.write("Ground Truth: \n")
        myfile.write(str(ground_truths) + '\n')
        myfile.write("ACCURACIES: \n")
        myfile.write(str(accuracies) + '\n')
        myfile.write("OVERALL ACCURACY: " + str(np.mean(accuracies_to_display)))
Example #7
0
def train_DA_spectrograms_evaluation(examples_datasets_evaluations, labels_datasets_evaluation,
                                     examples_datasets_train, labels_datasets_train, algo_name,
                                     num_kernels, filter_size, path_weights_to_load_from, path_weights_DA,
                                     batch_size=512, patience_increment=10, use_recalibration_data=False,
                                     number_of_cycle_for_first_training=4, number_of_cycles_rest_of_training=4,
                                     spectrogram_model=True, feature_vector_input_length=None, learning_rate=0.001316):
    # Get the data to use as the SOURCE from the training sessions
    participants_train, participants_validation, _ = load_dataloaders_training_sessions_spectrogram(
        examples_datasets_train, labels_datasets_train, batch_size=batch_size,
        number_of_cycle_for_first_training=number_of_cycle_for_first_training, get_validation_set=True,
        number_of_cycles_rest_of_training=number_of_cycles_rest_of_training, gestures_to_remove=None,
        ignore_first=True, shuffle=True)

    # Get the data to use as the TARGET from the evaluation sessions
    participants_evaluation_dataloader = load_dataloaders_test_sessions(
        examples_datasets_evaluation=examples_datasets_evaluations,
        labels_datasets_evaluation=labels_datasets_evaluation, batch_size=batch_size, shuffle=True, drop_last=True)

    for participant_i in range(len(participants_evaluation_dataloader)):
        print("SHAPE SESSIONS: ", np.shape(participants_evaluation_dataloader[participant_i]))
        for session_j in range(0, len(participants_evaluation_dataloader[participant_i])):
            # There is two evaluation session for every training session. We train on the first one
            if session_j % 2 == 0:
                # Get the weights trained
                corresponding_training_session_index = 0 if use_recalibration_data is False else int(session_j / 2)

                # Classifier and discriminator
                if spectrogram_model:
                    gesture_classification = SpectrogramConvNet(number_of_class=11, num_kernels=num_kernels,
                                                                kernel_size=filter_size, dropout=0.5).cuda()
                else:
                    gesture_classification = TSD_Network(number_of_class=11, num_neurons=num_kernels,
                                                         feature_vector_input_length=feature_vector_input_length).cuda()
                # loss functions
                crossEntropyLoss = nn.CrossEntropyLoss().cuda()
                # optimizer
                precision = 1e-8
                optimizer_classifier = optim.Adam(gesture_classification.parameters(), lr=learning_rate,
                                                  betas=(0.5, 0.999))
                scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer=optimizer_classifier, mode='min', factor=.2,
                                                                 patience=5, verbose=True, eps=precision)

                gesture_classification, optimizer_classifier, scheduler, start_epoch = load_checkpoint(
                    model=gesture_classification, optimizer=optimizer_classifier, scheduler=scheduler,
                    filename=path_weights_to_load_from +
                             "/participant_%d/best_state_%d.pt" %
                             (participant_i, corresponding_training_session_index))

                best_state = None
                if "DANN" in algo_name:
                    best_state = DANN_BN_Training(gesture_classifier=gesture_classification, scheduler=scheduler,
                                                  optimizer_classifier=optimizer_classifier,
                                                  train_dataset_source=participants_train[participant_i][
                                                      corresponding_training_session_index],
                                                  train_dataset_target=participants_evaluation_dataloader[
                                                      participant_i][session_j],
                                                  validation_dataset_source=participants_validation[participant_i][
                                                      corresponding_training_session_index],
                                                  crossEntropyLoss=crossEntropyLoss,
                                                  patience_increment=patience_increment,
                                                  domain_loss_weight=1e-1)
                elif "VADA" in algo_name:
                    # VADA need Conditional Entropy loss and Virtual Adversarial Training loss too
                    conditionalEntropy = ConditionalEntropyLoss().cuda()
                    vatLoss = VATLoss(gesture_classification).cuda()

                    best_state = vada_BN_Training(gesture_classifier=gesture_classification,
                                                  conditionalEntropyLoss=conditionalEntropy,
                                                  crossEntropyLoss=crossEntropyLoss, vatLoss=vatLoss,
                                                  scheduler=scheduler,
                                                  optimizer_classifier=optimizer_classifier,
                                                  train_dataset_source=participants_train[participant_i][
                                                      corresponding_training_session_index],
                                                  train_dataset_target=
                                                  participants_evaluation_dataloader[participant_i][session_j],
                                                  validation_dataset_source=participants_validation[participant_i][
                                                      corresponding_training_session_index],
                                                  patience_increment=patience_increment)
                elif "DirtT" in algo_name:
                    # Dirt T need Conditional Entropy loss and Virtual Adversarial Training loss too
                    conditionalEntropy = ConditionalEntropyLoss().cuda()
                    vatLoss = VATLoss(gesture_classification).cuda()

                    # Classifier and discriminator
                    if spectrogram_model:
                        gesture_classification = SpectrogramConvNet(number_of_class=11, num_kernels=num_kernels,
                                                                    kernel_size=filter_size, dropout=0.5).cuda()
                    else:
                        gesture_classification = TSD_Network(number_of_class=11, num_neurons=num_kernels,
                                                             feature_vector_input_length=feature_vector_input_length).cuda()
                    # loss functions
                    crossEntropyLoss = nn.CrossEntropyLoss().cuda()
                    # optimizer
                    precision = 1e-8
                    optimizer_classifier = optim.Adam(gesture_classification.parameters(), lr=learning_rate,
                                                      betas=(0.5, 0.999))
                    scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer=optimizer_classifier, mode='min',
                                                                     factor=.2,
                                                                     patience=5, verbose=True, eps=precision)
                    if use_recalibration_data:
                        gesture_classification, optimizer_classifier, scheduler, start_epoch = load_checkpoint(
                            model=gesture_classification, optimizer=optimizer_classifier, scheduler=scheduler,
                            filename=path_weights_to_load_from +
                                     "/participant_%d/best_state_WITH_recalibration%d.pt" %
                                     (participant_i, session_j))
                    else:
                        gesture_classification, optimizer_classifier, scheduler, start_epoch = load_checkpoint(
                            model=gesture_classification, optimizer=optimizer_classifier, scheduler=scheduler,
                            filename=path_weights_to_load_from +
                                     "/participant_%d/best_state_NO_recalibration%d.pt" %
                                     (participant_i, session_j))

                    best_state = dirt_T_training(gesture_classifier=gesture_classification,
                                                 conditionalEntropyLoss=conditionalEntropy,
                                                 crossEntropyLoss=crossEntropyLoss, vatLoss=vatLoss,
                                                 scheduler=scheduler,
                                                 optimizer_classifier=optimizer_classifier,
                                                 train_dataset_source=participants_evaluation_dataloader[participant_i][
                                                     session_j],
                                                 patience_increment=patience_increment, batch_size=batch_size)

                if use_recalibration_data:
                    if not os.path.exists(path_weights_DA + algo_name + "/participant_%d" % participant_i):
                        os.makedirs(path_weights_DA + algo_name + "/participant_%d" % participant_i)
                    torch.save(best_state, f=path_weights_DA + algo_name +
                                             "/participant_%d/best_state_WITH_recalibration%d.pt" %
                                             (participant_i, session_j))
                else:
                    if not os.path.exists(path_weights_DA + algo_name + "/participant_%d" % participant_i):
                        os.makedirs(path_weights_DA + algo_name + "/participant_%d" % participant_i)
                    print(os.listdir(path_weights_DA + algo_name))
                    torch.save(best_state, f=path_weights_DA + algo_name +
                                             "/participant_%d/best_state_NO_recalibration%d.pt" % (
                                                 participant_i, session_j))
def run_AdaBN_evaluation_sessions(examples_datasets_evaluations,
                                  labels_datasets_evaluation,
                                  algo_name,
                                  num_neurons,
                                  path_weights_to_load_from,
                                  path_weights_SCADANN,
                                  batch_size=512,
                                  use_recalibration_data=False,
                                  number_of_classes=11,
                                  feature_vector_input_length=385):
    # Get the data to use as the TARGET from the evaluation sessions
    participants_evaluation_dataloader = load_dataloaders_test_sessions(
        examples_datasets_evaluation=examples_datasets_evaluations,
        labels_datasets_evaluation=labels_datasets_evaluation,
        batch_size=batch_size,
        shuffle=False,
        drop_last=True)
    for participant_i in range(len(participants_evaluation_dataloader)):
        print("SHAPE SESSIONS: ",
              np.shape(participants_evaluation_dataloader[participant_i]))
        for session_j in range(
                0, len(participants_evaluation_dataloader[participant_i])):
            # There is two evaluation session for every training session. We train on the first one
            if session_j % 2 == 0:
                # Classifier and discriminator
                model = TSD_Network(
                    number_of_class=number_of_classes,
                    num_neurons=num_neurons,
                    feature_vector_input_length=feature_vector_input_length
                ).cuda()
                # loss functions
                crossEntropyLoss = nn.CrossEntropyLoss().cuda()
                # optimizer
                precision = 1e-8
                learning_rate = 0.001316
                optimizer_classifier = optim.Adam(model.parameters(),
                                                  lr=learning_rate,
                                                  betas=(0.5, 0.999))
                scheduler = optim.lr_scheduler.ReduceLROnPlateau(
                    optimizer=optimizer_classifier,
                    mode='min',
                    factor=.2,
                    patience=5,
                    verbose=True,
                    eps=precision)

                if use_recalibration_data:
                    model, optimizer_classifier, scheduler, start_epoch = load_checkpoint(
                        model=model,
                        optimizer=optimizer_classifier,
                        scheduler=scheduler,
                        filename=path_weights_to_load_from +
                        "/participant_%d/best_state_%d.pt" %
                        (participant_i, int(session_j / 2)))
                else:
                    model, optimizer_classifier, scheduler, start_epoch = load_checkpoint(
                        model=model,
                        optimizer=optimizer_classifier,
                        scheduler=scheduler,
                        filename=path_weights_to_load_from +
                        "/participant_%d/best_state_%d.pt" %
                        (participant_i, 0))
                # Freeze all the weights except those associated with the BN statistics
                model.freeze_all_except_BN()

                best_state = AdaBN_adaptation(
                    model=model,
                    scheduler=scheduler,
                    optimizer_classifier=optimizer_classifier,
                    dataloader=participants_evaluation_dataloader[
                        participant_i][session_j])

                if use_recalibration_data:
                    if not os.path.exists(path_weights_SCADANN + algo_name +
                                          "/participant_%d" % participant_i):
                        os.makedirs(path_weights_SCADANN + algo_name +
                                    "/participant_%d" % participant_i)
                    torch.save(
                        best_state,
                        f=path_weights_SCADANN + algo_name +
                        "/participant_%d/best_state_WITH_recalibration%d.pt" %
                        (participant_i, session_j))
                else:
                    if not os.path.exists(path_weights_SCADANN + algo_name +
                                          "/participant_%d" % participant_i):
                        os.makedirs(path_weights_SCADANN + algo_name +
                                    "/participant_%d" % participant_i)
                    print(os.listdir(path_weights_SCADANN + algo_name))
                    torch.save(
                        best_state,
                        f=path_weights_SCADANN + algo_name +
                        "/participant_%d/best_state_NO_recalibration%d.pt" %
                        (participant_i, session_j))