Ejemplo n.º 1
0
    self.sig1 = self.sig1.to(device,torch.float)
    self.miu2 = self.miu2.to(device,torch.float)
    self.sig2 = self.sig2.to(device,torch.float)

  def forward(self,input1,input2):
    out1 = self.m1(input1)
    out2 = self.m2(input2)
    
    #unnormalize the predictions
    out1 = (out1*self.sig1)+self.miu1
    out2 = (out2*self.sig2)+self.miu2
    x = out1+out2
    
    return x

model1  = mdl.StateModel(54, 3)
model1.load_state_dict(torch.load("best_modelweights_S_PSM1.dat"))
model2  = mdl.StateModel(54, 3)
model2.load_state_dict(torch.load("best_modelweights_S.dat"))

full_model = dual_model(model1,model2,mean1[1:4],std1[1:4],mean2[1:4],std2[1:4])
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
full_model.transfer(device)
#%%
import torch.optim as opt
weight_file1 = "best_modelweights_S_PSM1_ft.dat"
weight_file2 = "best_modelweights_S_PSM2_ft.dat"

criterion = nn.MSELoss(reduction='sum')
# define optimization method
optimizer = opt.Adam(full_model.parameters(),lr=0.001,weight_decay=0)
Ejemplo n.º 2
0
    state_input = np.array(msg.data)
    state_input = (state_input - mean1[7:61]) / std1[7:61]


def teleop_callback(data):
    global teleop
    butt = data.buttons[0]
    if butt > 0.5:
        teleop = True
    else:
        teleop = False


#load model
model = mdl.StateModel(54, 3)
print("loading the model...")
weight_file = "best_modelweights_S.dat"  #"best_modelweights_S_PSM1.dat"
model.load_state_dict(torch.load(weight_file))
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = model.to(device, torch.float)
model.eval()

# create rosnode
rospy.init_node("NN_node")
# create the state vector for input as a global variable
global state_input
global force_pred
global force_pred_out
global mean1
global std1
Ejemplo n.º 3
0
def run_ablations(model, num_ablations):
    '''set up some persistent tracking variables'''
    remove_features = []  # list of features we are removing
    metrics_list = []  # list storing dictionary of performance metrics
    # feature indexes
    full_state_index = np.arange(7, 61)
    input_state = 54
    # create loss function
    criterion = nn.MSELoss(reduction='sum')
    # define optimization method
    optimizer = opt.Adam(model.parameters(), lr=0.01)
    param_count = []
    param_count.append(count_params(model))
    current_feature_list = np.array(qty)

    # create the dataloader
    dataloaders, dataset_sizes = dat.init_dataset(train_list, val_list,
                                                  val_list, model_type,
                                                  config_dict)

    print('evaluating full model predictions...')
    predictions = mdl.evaluate_model(model,
                                     dataloaders['test'],
                                     model_type=model_type,
                                     no_pbar=True)
    # compute the loss statistics
    print('computing full model performance metrics...')
    metrics = model_eval.compute_loss_metrics(
        predictions, dataloaders['test'].dataset.label_array[:, 1:4])
    metrics_list.append(metrics)
    print('Performance Summary of Full Model:')
    print(metrics)

    print('Running ablation study on model type:' + model_type)

    for iteration in range(num_ablations):
        print('-' * 10)
        print('Begin ablation run: {}/{}'.format(iteration + 1, num_ablations))
        print('-' * 10)

        # compute the backprop values:
        gbp_data = model_eval.compute_GBP(model,
                                          dataloaders['test'],
                                          num_state_inputs=input_state,
                                          model_type=model_type,
                                          no_pbar=True)
        # evaluate means
        df_gbp_means = model_eval.compute_and_plot_gbp(gbp_data,
                                                       current_feature_list,
                                                       True,
                                                       suppress_plots=True)
        # group by feature type and rank by value
        df_gbp_means = df_gbp_means.groupby('feature').mean().sort_values(
            by='gbp', ascending=False).reset_index()
        # get top ranking value and append to removal list
        feature_to_remove = df_gbp_means.iloc[0, 0]
        print("removing " + feature_to_remove + "...")
        remove_features.append(feature_to_remove)
        # create the mask
        mask = np.isin(qty, remove_features, invert=True)
        # mask the full state vector in config_dict global variable
        config_dict['custom_state'] = full_state_index[mask]
        current_feature_list = np.array(qty)[
            mask]  #update the current feature list
        # decrease the input dimension of the model by one
        input_state = input_state - 1

        # redefine the models
        print('redefining model with input state dims: {}'.format(input_state))
        if model_type == "VS":
            model = mdl.StateVisionModel(30,
                                         input_state,
                                         3,
                                         feature_extract=feat_extract)
        elif model_type == "S":
            model = mdl.StateModel(input_state, 3)

        # recalculate the number of parameters
        param_count.append(count_params(model))

        # redefine the optimizer
        optimizer = opt.Adam(model.parameters(), lr=0.01)

        # redefine the dataloader
        dataloaders, dataset_sizes = dat.init_dataset(train_list, val_list,
                                                      val_list, model_type,
                                                      config_dict)

        # retrain the model
        model, train_history, val_history = mdl.train_model(
            model,
            criterion,
            optimizer,
            dataloaders,
            dataset_sizes,
            num_epochs=50,
            model_type=model_type,
            weight_file=weight_file,
            no_pbar=True)
        print('retraining completed')
        # do inference
        print('evaluating model predictions...')
        predictions = mdl.evaluate_model(model,
                                         dataloaders['test'],
                                         model_type=model_type,
                                         no_pbar=True)
        # compute the loss statistics
        print('computing performance metrics...')
        metrics = model_eval.compute_loss_metrics(
            predictions, dataloaders['test'].dataset.label_array[:, 1:4])
        metrics_list.append(metrics)
        print('Performance Summary:')
        print(metrics)

    return remove_features, param_count, metrics_list