Esempio n. 1
0
#%% Create DataLoader for First PSM

file_dir = '../PSM1_data' # define the file directory for dataset
model_type = "S"
train_list = [1,3,5,7]
val_list = [1]
test_list = [1]
config_dict={'file_dir':file_dir,
         'include_torque': False,
         'spatial_forces': False,
         'custom_state': None,
         'batch_size': 32,
         'crop_list':None,
         'trans_function': None}

loader_dict,loader_sizes = dat.init_dataset(train_list,val_list,test_list,model_type,config_dict)

# from the loader_dict extract the mean and std of the train set
mean1 = loader_dict["train"].dataset.mean
std1 = loader_dict["train"].dataset.stdev

#%% Create DataLoader for the Second PSM

file_dir = '../experiment_data' # define the file directory for dataset
model_type = "S"
train_list = [1,3,5,7,
                  8,10,12,14,
                  15,17,19,21]
val_list = [1]
test_list = [1]
config_dict={'file_dir':file_dir,
Esempio n. 2
0
    train_list = [1, 3, 5, 7, 8, 10, 12, 14, 15, 17, 19, 21]
    val_list = [2, 6, 9, 13, 16, 20]
    test_list = [4, 11, 18, 22, 23, 24, 25, 26, 27, 28, 29, 32, 33]
    config_dict = {
        'file_dir': file_dir,
        'include_torque': False,
        'spatial_forces': force_align,
        'custom_state': None,
        'batch_size': 32,
        'crop_list': crop_list,
        'trans_function': trans_function
    }

    dataloaders, dataset_sizes = dat.init_dataset(train_list, val_list,
                                                  val_list, model_type,
                                                  config_dict)

    # set the logarithmic learning rate
    learning_rates = [1e-1, 1e-2, 1e-3, 1e-4, 1e-5, 1e-6]
    best_loss = np.inf
    best_lr = 0

    for lr in learning_rates:
        print('lr:{}'.format(lr))
        # define model
        if model_type == "VS":
            model = mdl.StateVisionModel(30,
                                         54,
                                         3,
                                         feature_extract=feat_extract)
    #test_list = [4,11,18,
    #22,23,24,25,26,27,28,29,32,33]
    test_list = [4, 8]
    config_dict = {
        'file_dir': file_dir,
        'include_torque': False,
        'spatial_forces': force_align,
        'custom_state': None,
        'batch_size': 32,
        'crop_list': crop_list,
        'trans_function': trans_function
    }

    dataloaders, dataset_sizes = dat.init_dataset(train_list,
                                                  val_list,
                                                  test_list,
                                                  model_type,
                                                  config_dict,
                                                  augment=False)
    np.savetxt('PSM2_mean_smalldata.csv', dataloaders['train'].dataset.mean)
    np.savetxt('PSM2_std_smalldata.csv', dataloaders['train'].dataset.stdev)
    '''
    ## if we ablate uncomment these lines -----------------------------
    qty = ['t','fx','fy','fz','tx','ty','tz',
       'px','py','pz','qx','qy','qz','qw','vx','vy','vz','wx','wy','wz',
       'q1','q2','q3','q4','q5','q6','q7',
       'vq1','vq2','vq3','vq4','vq5','vq6','vq7',
       'tq1','tq2','tq3','tq4','tq5','tq6','tq7',
       'q1d','q2d','q3d','q4d','q5d','q6d','q7d',
       'tq1d','tq2d','tq3d','tq4d','tq5d','tq6d','tq7d',
       'psm_fx','psm_fy','psm_fz','psm_tx','psm_ty','psm_tz',
       'J1','J2','J3','J4','J5','J6','J1','J2','J3','J4','J5','J6',
Esempio n. 4
0
def run_ablations(model, num_ablations):
    '''set up some persistent tracking variables'''
    remove_features = []  # list of features we are removing
    metrics_list = []  # list storing dictionary of performance metrics
    # feature indexes
    full_state_index = np.arange(7, 61)
    input_state = 54
    # create loss function
    criterion = nn.MSELoss(reduction='sum')
    # define optimization method
    optimizer = opt.Adam(model.parameters(), lr=0.01)
    param_count = []
    param_count.append(count_params(model))
    current_feature_list = np.array(qty)

    # create the dataloader
    dataloaders, dataset_sizes = dat.init_dataset(train_list, val_list,
                                                  val_list, model_type,
                                                  config_dict)

    print('evaluating full model predictions...')
    predictions = mdl.evaluate_model(model,
                                     dataloaders['test'],
                                     model_type=model_type,
                                     no_pbar=True)
    # compute the loss statistics
    print('computing full model performance metrics...')
    metrics = model_eval.compute_loss_metrics(
        predictions, dataloaders['test'].dataset.label_array[:, 1:4])
    metrics_list.append(metrics)
    print('Performance Summary of Full Model:')
    print(metrics)

    print('Running ablation study on model type:' + model_type)

    for iteration in range(num_ablations):
        print('-' * 10)
        print('Begin ablation run: {}/{}'.format(iteration + 1, num_ablations))
        print('-' * 10)

        # compute the backprop values:
        gbp_data = model_eval.compute_GBP(model,
                                          dataloaders['test'],
                                          num_state_inputs=input_state,
                                          model_type=model_type,
                                          no_pbar=True)
        # evaluate means
        df_gbp_means = model_eval.compute_and_plot_gbp(gbp_data,
                                                       current_feature_list,
                                                       True,
                                                       suppress_plots=True)
        # group by feature type and rank by value
        df_gbp_means = df_gbp_means.groupby('feature').mean().sort_values(
            by='gbp', ascending=False).reset_index()
        # get top ranking value and append to removal list
        feature_to_remove = df_gbp_means.iloc[0, 0]
        print("removing " + feature_to_remove + "...")
        remove_features.append(feature_to_remove)
        # create the mask
        mask = np.isin(qty, remove_features, invert=True)
        # mask the full state vector in config_dict global variable
        config_dict['custom_state'] = full_state_index[mask]
        current_feature_list = np.array(qty)[
            mask]  #update the current feature list
        # decrease the input dimension of the model by one
        input_state = input_state - 1

        # redefine the models
        print('redefining model with input state dims: {}'.format(input_state))
        if model_type == "VS":
            model = mdl.StateVisionModel(30,
                                         input_state,
                                         3,
                                         feature_extract=feat_extract)
        elif model_type == "S":
            model = mdl.StateModel(input_state, 3)

        # recalculate the number of parameters
        param_count.append(count_params(model))

        # redefine the optimizer
        optimizer = opt.Adam(model.parameters(), lr=0.01)

        # redefine the dataloader
        dataloaders, dataset_sizes = dat.init_dataset(train_list, val_list,
                                                      val_list, model_type,
                                                      config_dict)

        # retrain the model
        model, train_history, val_history = mdl.train_model(
            model,
            criterion,
            optimizer,
            dataloaders,
            dataset_sizes,
            num_epochs=50,
            model_type=model_type,
            weight_file=weight_file,
            no_pbar=True)
        print('retraining completed')
        # do inference
        print('evaluating model predictions...')
        predictions = mdl.evaluate_model(model,
                                         dataloaders['test'],
                                         model_type=model_type,
                                         no_pbar=True)
        # compute the loss statistics
        print('computing performance metrics...')
        metrics = model_eval.compute_loss_metrics(
            predictions, dataloaders['test'].dataset.label_array[:, 1:4])
        metrics_list.append(metrics)
        print('Performance Summary:')
        print(metrics)

    return remove_features, param_count, metrics_list
Esempio n. 5
0
# we need to unnormalize our predictions...
file_dir = '../experiment_data'  # define the file directory for dataset
train_list = [1, 3, 5, 7, 8, 10, 12, 14, 15, 17, 19, 21]
val_list = [1]
config_dict = {
    'file_dir': file_dir,
    'include_torque': False,
    'spatial_forces': False,
    'custom_state': None,
    'batch_size': 32,
    'crop_list': [],
    'trans_function': None
}

test_list = [1]
loader_dict, loader_sizes = dat.init_dataset(train_list, val_list, test_list,
                                             'S', config_dict)
miu = loader_dict['train'].dataset.mean[1:4]
sigma = loader_dict['train'].dataset.stdev[1:4]
#%% TEST LIST SPEC

test_list_full = [
    4, 11, 18, 22, 23, 24, 25, 26, 27, 28, 29, 32, 33, 34, 36, 37, 38, 39
]

condition_list = [
    'center', 'right', 'left', 'right_less', 'right_less', 'right_more',
    'right_more', 'left_less', 'left_less', 'left_more', 'left_more',
    'new_tool', 'new_tool', 'new_material', 'new_material', 'center', 'right',
    'left'
]