def run_SCADANN_training_sessions(examples_datasets, labels_datasets, num_neurons, feature_vector_input_length,
                                  path_weights_to_save_to="../weights_SCADANN_One_cycle",
                                  path_weights_Adversarial_training="../weights_REDUCED_DANN_TSD_TWO_Cycles",
                                  path_weights_Normal_training="../Weights/weights_TSD_ELVEN_Gestures",
                                  number_of_cycle_for_first_training=1, number_of_cycles_rest_of_training=1,
                                  gestures_to_remove=None, number_of_classes=11, percentage_same_gesture_stable=0.65,
                                  learning_rate=0.001316):
    participants_train, _, _ = load_dataloaders_training_sessions(
        examples_datasets, labels_datasets, batch_size=512,
        number_of_cycle_for_first_training=number_of_cycle_for_first_training,
        number_of_cycles_rest_of_training=number_of_cycles_rest_of_training, drop_last=False, get_validation_set=False,
        shuffle=False, ignore_first=True, gestures_to_remove=gestures_to_remove)

    for participant_i in range(len(participants_train)):
        for session_j in range(1, len(participants_train[participant_i])):
            model = TSD_Network(number_of_class=number_of_classes, num_neurons=num_neurons,
                                feature_vector_input_length=feature_vector_input_length).cuda()

            # Define Loss functions
            cross_entropy_loss_classes = nn.CrossEntropyLoss(reduction='mean').cuda()

            # Define Optimizer
            learning_rate = 0.001316
            print(model.parameters())
            optimizer = optim.Adam(model.parameters(), lr=learning_rate, betas=(0.5, 0.999))

            # Define Scheduler
            precision = 1e-8

            model, optimizer, _, start_epoch = load_checkpoint(
                model=model, optimizer=optimizer, scheduler=None,
                filename=path_weights_Adversarial_training + "/participant_%d/best_state_%d.pt" % (participant_i,
                                                                                                   session_j))
            scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer=optimizer, mode='min', factor=.2, patience=5,
                                                             verbose=True, eps=precision)
            models_array = []
            for j in range(0, session_j + 1):
                model_temp = TSD_Network(number_of_class=number_of_classes, num_neurons=num_neurons,
                                         feature_vector_input_length=feature_vector_input_length).cuda()
                if j == 0:
                    model_temp, _, _, _ = load_checkpoint(
                        model=model_temp, optimizer=None, scheduler=None,
                        filename=path_weights_Normal_training + "/participant_%d/best_state_%d.pt" % (participant_i,
                                                                                                      j))
                else:
                    model_temp, _, _, _ = load_checkpoint(
                        model=model_temp, optimizer=None, scheduler=None,
                        filename=path_weights_Adversarial_training + "/participant_%d/best_state_%d.pt" % (
                            participant_i,
                            j))
                models_array.append(model_temp)
            print(np.shape(models_array))

            train_dataloader_replay, validationloader_replay, train_dataloader_pseudo, validationloader_pseudo = \
                generate_dataloaders_for_SCADANN(dataloader_sessions=participants_train[participant_i],
                                                 models=models_array,
                                                 current_session=session_j, validation_set_ratio=0.2, batch_size=64,
                                                 percentage_same_gesture_stable=percentage_same_gesture_stable)
            best_state = SCADANN_BN_training(replay_dataset_train=train_dataloader_replay,
                                             target_validation_dataset=validationloader_pseudo,
                                             target_dataset=train_dataloader_pseudo, model=model,
                                             crossEntropyLoss=cross_entropy_loss_classes,
                                             optimizer_classifier=optimizer,
                                             scheduler=scheduler, patience_increment=10, max_epochs=500,
                                             domain_loss_weight=1e-1)
            if not os.path.exists(path_weights_to_save_to + "/participant_%d" % participant_i):
                os.makedirs(path_weights_to_save_to + "/participant_%d" % participant_i)
            print(os.listdir(path_weights_to_save_to))
            torch.save(best_state, f=path_weights_to_save_to +
                                     "/participant_%d/best_state_%d.pt" % (participant_i, session_j))
Пример #2
0
def run_AdaBN_evaluation_sessions(examples_datasets_evaluations,
                                  labels_datasets_evaluation,
                                  algo_name,
                                  num_neurons,
                                  path_weights_to_load_from,
                                  path_weights_SCADANN,
                                  batch_size=512,
                                  use_recalibration_data=False,
                                  number_of_classes=11,
                                  feature_vector_input_length=385):
    # Get the data to use as the TARGET from the evaluation sessions
    participants_evaluation_dataloader = load_dataloaders_test_sessions(
        examples_datasets_evaluation=examples_datasets_evaluations,
        labels_datasets_evaluation=labels_datasets_evaluation,
        batch_size=batch_size,
        shuffle=False,
        drop_last=True)
    for participant_i in range(len(participants_evaluation_dataloader)):
        print("SHAPE SESSIONS: ",
              np.shape(participants_evaluation_dataloader[participant_i]))
        for session_j in range(
                0, len(participants_evaluation_dataloader[participant_i])):
            # There is two evaluation session for every training session. We train on the first one
            if session_j % 2 == 0:
                # Classifier and discriminator
                model = TSD_Network(
                    number_of_class=number_of_classes,
                    num_neurons=num_neurons,
                    feature_vector_input_length=feature_vector_input_length
                ).cuda()
                # loss functions
                crossEntropyLoss = nn.CrossEntropyLoss().cuda()
                # optimizer
                precision = 1e-8
                learning_rate = 0.001316
                optimizer_classifier = optim.Adam(model.parameters(),
                                                  lr=learning_rate,
                                                  betas=(0.5, 0.999))
                scheduler = optim.lr_scheduler.ReduceLROnPlateau(
                    optimizer=optimizer_classifier,
                    mode='min',
                    factor=.2,
                    patience=5,
                    verbose=True,
                    eps=precision)

                if use_recalibration_data:
                    model, optimizer_classifier, scheduler, start_epoch = load_checkpoint(
                        model=model,
                        optimizer=optimizer_classifier,
                        scheduler=scheduler,
                        filename=path_weights_to_load_from +
                        "/participant_%d/best_state_%d.pt" %
                        (participant_i, int(session_j / 2)))
                else:
                    model, optimizer_classifier, scheduler, start_epoch = load_checkpoint(
                        model=model,
                        optimizer=optimizer_classifier,
                        scheduler=scheduler,
                        filename=path_weights_to_load_from +
                        "/participant_%d/best_state_%d.pt" %
                        (participant_i, 0))
                # Freeze all the weights except those associated with the BN statistics
                model.freeze_all_except_BN()

                best_state = AdaBN_adaptation(
                    model=model,
                    scheduler=scheduler,
                    optimizer_classifier=optimizer_classifier,
                    dataloader=participants_evaluation_dataloader[
                        participant_i][session_j])

                if use_recalibration_data:
                    if not os.path.exists(path_weights_SCADANN + algo_name +
                                          "/participant_%d" % participant_i):
                        os.makedirs(path_weights_SCADANN + algo_name +
                                    "/participant_%d" % participant_i)
                    torch.save(
                        best_state,
                        f=path_weights_SCADANN + algo_name +
                        "/participant_%d/best_state_WITH_recalibration%d.pt" %
                        (participant_i, session_j))
                else:
                    if not os.path.exists(path_weights_SCADANN + algo_name +
                                          "/participant_%d" % participant_i):
                        os.makedirs(path_weights_SCADANN + algo_name +
                                    "/participant_%d" % participant_i)
                    print(os.listdir(path_weights_SCADANN + algo_name))
                    torch.save(
                        best_state,
                        f=path_weights_SCADANN + algo_name +
                        "/participant_%d/best_state_NO_recalibration%d.pt" %
                        (participant_i, session_j))
Пример #3
0
def run_SCADANN_evaluation_sessions(examples_datasets_evaluations, labels_datasets_evaluation,
                                    examples_datasets_train, labels_datasets_train, algo_name,
                                    num_kernels, filter_size, path_weights_to_load_from, path_weights_SCADANN,
                                    batch_size=512, patience_increment=10, use_recalibration_data=False,
                                    number_of_cycle_for_first_training=4, number_of_cycles_rest_of_training=4,
                                    feature_vector_input_length=385, learning_rate=0.001316):
    # Get the data to use as the SOURCE from the training sessions
    participants_train, _, _ = load_dataloaders_training_sessions_spectrogram(
        examples_datasets_train, labels_datasets_train, batch_size=batch_size,
        number_of_cycle_for_first_training=number_of_cycle_for_first_training, get_validation_set=False,
        number_of_cycles_rest_of_training=number_of_cycles_rest_of_training, gestures_to_remove=None,
        ignore_first=True, shuffle=False, drop_last=False)

    # Get the data to use as the TARGET from the evaluation sessions
    participants_evaluation_dataloader = load_dataloaders_test_sessions(
        examples_datasets_evaluation=examples_datasets_evaluations,
        labels_datasets_evaluation=labels_datasets_evaluation, batch_size=batch_size, shuffle=False, drop_last=False)

    for participant_i in range(len(participants_evaluation_dataloader)):
        print("SHAPE SESSIONS: ", np.shape(participants_evaluation_dataloader[participant_i]))
        for session_j in range(0, len(participants_evaluation_dataloader[participant_i])):
            # There is two evaluation session for every training session. We train on the first one
            if session_j % 2 == 0:
                # Classifier and discriminator
                model = TSD_Network(number_of_class=number_of_classes, num_neurons=num_kernels,
                                    feature_vector_input_length=feature_vector_input_length).cuda()
                # loss functions
                crossEntropyLoss = nn.CrossEntropyLoss().cuda()
                # optimizer
                precision = 1e-8
                optimizer_classifier = optim.Adam(model.parameters(), lr=learning_rate,
                                                  betas=(0.5, 0.999))
                scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer=optimizer_classifier, mode='min', factor=.2,
                                                                 patience=5, verbose=True, eps=precision)

                if use_recalibration_data:
                    model, optimizer_classifier, _, start_epoch = load_checkpoint(
                        model=model, optimizer=optimizer_classifier, scheduler=None,
                        filename=path_weights_to_load_from +
                                 "/participant_%d/best_state_WITH_recalibration%d.pt" %
                                 (participant_i, session_j))
                    models_array = []
                    for j in range(0, int(session_j / 2) + 1):
                        model_temp = TSD_Network(number_of_class=number_of_classes, num_neurons=num_kernels,
                                                 feature_vector_input_length=feature_vector_input_length).cuda()
                        model_temp, _, _, _ = load_checkpoint(
                            model=model_temp, optimizer=None, scheduler=None,
                            filename=path_weights_to_load_from + "/participant_%d/best_state_WITH_recalibration%d.pt" %
                                     (participant_i, int(j * 2)))
                        models_array.append(model_temp)
                else:
                    model, optimizer_classifier, _, start_epoch = load_checkpoint(
                        model=model, optimizer=optimizer_classifier, scheduler=None,
                        filename=path_weights_to_load_from +
                                 "/participant_%d/best_state_NO_recalibration%d.pt" %
                                 (participant_i, session_j))

                    models_array = []
                    for j in range(0, int(session_j / 2) + 1):
                        model_temp = TSD_Network(number_of_class=number_of_classes, num_neurons=num_kernels,
                                                 feature_vector_input_length=feature_vector_input_length).cuda()
                        model_temp, _, _, _ = load_checkpoint(
                            model=model_temp, optimizer=None, scheduler=None,
                            filename=path_weights_to_load_from + "/participant_%d/best_state_NO_recalibration%d.pt" % (
                                participant_i, int(j * 2)))
                        models_array.append(model_temp)

                corresponding_training_session_index = 0 if use_recalibration_data is False else int(session_j / 2)
                train_dataloader_replay, validationloader_replay, train_dataloader_pseudo, validationloader_pseudo = \
                    generate_dataloaders_evaluation_for_SCADANN(
                        dataloader_session_training=participants_train[participant_i][
                            corresponding_training_session_index],
                        dataloader_sessions_evaluation=participants_evaluation_dataloader[participant_i],
                        models=models_array,
                        current_session=session_j, validation_set_ratio=0.2,
                        batch_size=512, use_recalibration_data=use_recalibration_data)

                best_state = SCADANN_BN_training(replay_dataset_train=train_dataloader_replay,
                                                 target_validation_dataset=validationloader_pseudo,
                                                 target_dataset=train_dataloader_pseudo, model=model,
                                                 crossEntropyLoss=crossEntropyLoss,
                                                 optimizer_classifier=optimizer_classifier,
                                                 scheduler=scheduler, patience_increment=patience_increment,
                                                 max_epochs=500,
                                                 domain_loss_weight=1e-1)
                if use_recalibration_data:
                    if not os.path.exists(path_weights_SCADANN + algo_name + "/participant_%d" % participant_i):
                        os.makedirs(path_weights_SCADANN + algo_name + "/participant_%d" % participant_i)
                    torch.save(best_state, f=path_weights_SCADANN + algo_name +
                                             "/participant_%d/best_state_WITH_recalibration%d.pt" %
                                             (participant_i, session_j))
                else:
                    if not os.path.exists(path_weights_SCADANN + algo_name + "/participant_%d" % participant_i):
                        os.makedirs(path_weights_SCADANN + algo_name + "/participant_%d" % participant_i)
                    print(os.listdir(path_weights_SCADANN + algo_name))
                    torch.save(best_state, f=path_weights_SCADANN + algo_name +
                                             "/participant_%d/best_state_NO_recalibration%d.pt" % (
                                                 participant_i, session_j))
Пример #4
0
def run_MultipleVote_training_sessions(
        examples_datasets,
        labels_datasets,
        num_kernels,
        filter_size=(4, 10),
        path_weights_to_save_to="../weights_SLADANN_One_cycle",
        path_weights_normal_training="../weights_REDUCED_DANN_Spectrogram_TWO_Cycles",
        number_of_cycle_for_first_training=1,
        number_of_cycles_rest_of_training=1,
        gestures_to_remove=None,
        number_of_classes=11,
        feature_vector_input_length=385):
    participants_train, _, _ = load_dataloaders_training_sessions(
        examples_datasets,
        labels_datasets,
        batch_size=512,
        number_of_cycle_for_first_training=number_of_cycle_for_first_training,
        number_of_cycles_rest_of_training=number_of_cycles_rest_of_training,
        drop_last=False,
        get_validation_set=False,
        shuffle=False,
        ignore_first=True,
        gestures_to_remove=gestures_to_remove)

    for participant_i in range(len(participants_train)):
        for session_j in range(1, len(participants_train[participant_i])):
            model = TSD_Network(
                number_of_class=number_of_classes,
                feature_vector_input_length=feature_vector_input_length,
                num_neurons=num_neurons).cuda()

            # Define Loss functions
            cross_entropy_loss_classes = nn.CrossEntropyLoss(
                reduction='mean').cuda()

            # Define Optimizer
            learning_rate = 0.001316
            print(model.parameters())
            optimizer = optim.Adam(model.parameters(),
                                   lr=learning_rate,
                                   betas=(0.5, 0.999))

            # Define Scheduler
            precision = 1e-8
            scheduler = optim.lr_scheduler.ReduceLROnPlateau(
                optimizer=optimizer,
                mode='min',
                factor=.2,
                patience=5,
                verbose=True,
                eps=precision)

            model, optimizer, _, start_epoch = load_checkpoint(
                model=model,
                optimizer=optimizer,
                scheduler=None,
                filename=path_weights_normal_training +
                "/participant_%d/best_state_%d.pt" % (participant_i, 0))

            train_dataloader_pseudo, validationloader_pseudo = \
                generate_dataloaders_for_MultipleVote(dataloader_sessions=participants_train[participant_i],
                                                      model=model, current_session=session_j, validation_set_ratio=0.2,
                                                      batch_size=256)

            best_state = train_model_standard(
                model=model,
                criterion=cross_entropy_loss_classes,
                optimizer=optimizer,
                scheduler=scheduler,
                dataloaders={
                    "train": train_dataloader_pseudo,
                    "val": validationloader_pseudo
                },
                precision=precision,
                patience=10,
                patience_increase=10)

            if not os.path.exists(path_weights_to_save_to +
                                  "/participant_%d" % participant_i):
                os.makedirs(path_weights_to_save_to +
                            "/participant_%d" % participant_i)
            print(os.listdir(path_weights_to_save_to))
            torch.save(best_state,
                       f=path_weights_to_save_to +
                       "/participant_%d/best_state_%d.pt" %
                       (participant_i, session_j))