def main():

    train_dir = 'push_dataset/train'
    test_dir = 'push_dataset/test'

    logger.info("Importing data")
    train_loader = DataLoader(ObjPushDataset(train_dir),
                              batch_size=bsize,
                              shuffle=True)
    valid_loader = DataLoader(ObjPushDataset(test_dir),
                              batch_size=bsize,
                              shuffle=True)

    logger.info("Importing inverse model")
    model = InverseModel(start_state_dims=start_state_dims,
                         next_state_dims=next_state_dims,
                         action_dims=action_dims,
                         latent_var_1=nn_layer_1_size,
                         latent_var_2=nn_layer_2_size,
                         criterion=criterion,
                         lr=lr,
                         seed=seed)

    logger.info("Beginning training")
    loss_list, avg_loss_list, valid_loss_list = model.train_and_validate(
        train_loader, valid_loader, num_epochs)

    logger.info(f'Final train loss: {avg_loss_list[-1]}')
    logger.info(f'Final test loss: {valid_loss_list[-1]}')

    # Save trained model
    logger.info("Saving model parameters to invmodel file")
    torch.save(model.state_dict(), "invmodel_learned_params.pt")

    # plt.plot(loss_list[1000:])
    # plt.title("Loss")
    # plt.show()

    plt.plot(avg_loss_list, label="Average training loss per epoch")
    plt.plot(valid_loss_list, label="Average validation loss per epoch")
    plt.title("Results over all epochs")
    plt.xlabel("# of Epochs")
    plt.legend()
    plt.show()

    plt.plot(avg_loss_list[5:], label="Average training loss per epoch")
    plt.plot(valid_loss_list[5:], label="Average validation loss per epoch")
    shift = 10
    spacing = 5
    xpos = np.linspace(0, num_epochs - shift,
                       int((num_epochs - shift) // spacing + 1))
    my_xticks = np.linspace(shift, num_epochs, num_epochs // spacing)
    my_xticks = [int(i) for i in my_xticks]
    plt.xticks(xpos, my_xticks)
    plt.title(f"Zoomed-In Results (over all but first {shift} epochs)")
    plt.xlabel("# of Epochs")
    plt.legend()
    plt.show()
Exemple #2
0
    def __init__(self):
        self.net = ForwardModelNet()

        train_dir = 'push_dataset/train'
        test_dir = 'push_dataset/test'
        bsize = 64

        self.train_loader = DataLoader(ObjPushDataset(train_dir),
                                       batch_size=bsize,
                                       shuffle=True)
        self.valid_loader = DataLoader(ObjPushDataset(test_dir),
                                       batch_size=bsize,
                                       shuffle=True)
Exemple #3
0
def train_forward():

    torch.manual_seed(1)
    np.random.seed(1)
    random.seed(1)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False

    train_dir = 'push_dataset/train'
    test_dir = 'push_dataset/test'
    bsize = 64
    num_epochs = 5
    num_inputs = 6
    num_outputs = 2

    train_loader = DataLoader(ObjPushDataset(train_dir), batch_size=bsize, shuffle=True)
    valid_loader = DataLoader(ObjPushDataset(test_dir), batch_size=bsize, shuffle=True)  
    logger = Logger(logdir="runs/", run_name=f"{train_forward}-{time.ctime()}")
    model = Model(num_inputs,num_outputs)
    print("Model Architecture", model)

    for epoch in range(num_epochs):  
        for data in train_loader:
            obj1 = data['obj1']
            obj2 = data['obj2']
            push = data['push']

            input_feed = torch.cat((obj1.float(), push.float()), axis=1)
            loss = model.optimize(input_feed, obj2)

            logger.log_epoch("training_loss", loss,epoch)

    print("Done")
    print("Train Loss", get_loss(train_loader, model))
    print("Test Loss", get_loss(valid_loader,model))
    model.save("models")

    return model 
Exemple #4
0
def main():
    logger.info("Instantiating model and importing weights")
    # instantiate forward model and import pretrained weights
    inv_model = InverseModel(start_state_dims=start_state_dims,
                             next_state_dims=next_state_dims,
                             action_dims=action_dims,
                             latent_var_1=nn_layer_1_size,
                             latent_var_2=nn_layer_2_size,
                             criterion=criterion,
                             lr=lr,
                             seed=seed)

    inv_model.load_state_dict(torch.load("invmodel_learned_params.pt"))

    # Load in data
    logger.info("Importing test data")
    test_dir = 'push_dataset/test'
    # only want 1 push each time, so set batch_size to 1
    test_loader = DataLoader(ObjPushDataset(test_dir), batch_size=1, shuffle=True)

    env = PushingEnv()

    errors = []
    true_pushes = []
    pred_pushes = []

    logger.info("Running loop")
    for i, (start_state, goal_state, true_action) in enumerate(test_loader):
        logger.info(f'Iteration #{i}')
        # Convert inputs to floats
        start_state = start_state.float()
        goal_state = goal_state.float()
        true_action = true_action.float()

        # Use inverse model to predict action given the start and goal states
        combined_input = torch.cat((start_state, goal_state), dim=1)
        pred_action = inv_model(combined_input)

        # Switch output from tensors to numpy for easy use later
        start_state = start_state.data.numpy()[0]
        goal_state = goal_state.data.numpy()[0]
        true_action = true_action.data.numpy()[0]
        pred_action = pred_action.data.numpy()[0]

        start_x, start_y, end_x, end_y = pred_action
        _, end_state = env.execute_push(start_x, start_y, end_x, end_y)
        end_state = np.array(end_state)

        # Calculate errors
        action_error = np.linalg.norm(true_action - pred_action)
        state_error = np.linalg.norm(goal_state - end_state)

        # Keep the results
        errors.append(dict(action_error=action_error, state_error=state_error))
        true_pushes.append(dict(obj_x=start_state[0], obj_y=start_state[1], start_push_x=true_action[0],
                                start_push_y=true_action[1], end_push_x=true_action[2], end_push_y=true_action[3]))
        pred_pushes.append(dict(obj_x=start_state[0], obj_y=start_state[1], start_push_x=pred_action[0],
                                start_push_y=pred_action[1], end_push_x=pred_action[2], end_push_y=pred_action[3]))

        if i > num_pushes - 1:
            break

        pd.DataFrame(errors).to_csv("results/P1/inverse_model_errors.csv")
        pd.DataFrame(true_pushes).to_csv("results/P1/true_pushes.csv")
        pd.DataFrame(pred_pushes).to_csv("results/P1/pred_pushes.csv")
Exemple #5
0
def main():
    logger.info("Instantiating model and importing weights")
    # instantiate forward model and import pretrained weights
    fwd_model = ForwardModel(start_state_dims=start_state_dims,
                             next_state_dims=next_state_dims,
                             action_dims=action_dims,
                             latent_var_1=nn_layer_1_size,
                             latent_var_2=nn_layer_2_size,
                             criterion=criterion,
                             lr=lr,
                             seed=seed)
    fwd_model.load_state_dict(torch.load("fwdmodel_learned_params.pt"))

    logger.info("Instantiating CEM for planning")
    # instantiate CEM for planning
    cem = CEM(fwd_model=fwd_model,
              n_iterations=n_iterations,
              population_size=population_size,
              elite_frac=elite_frac,
              ang_sigma=ang_sigma,
              len_sigma=len_sigma,
              smoothing_param=smoothing_param
              )

    errors = []
    true_pushes = []
    pred_pushes = []
    fails = 0

    # Load in data
    logger.info("Importing test data")
    test_dir = 'push_dataset/test'
    # only want 1 push each time, so set batch_size to 1
    test_loader = DataLoader(ObjPushDataset(test_dir), batch_size=1, shuffle=True)

    logger.info("Running loop")
    for i, (start_state, goal_state, true_action) in enumerate(test_loader):

        logger.info(f'Iteration #{i}')
        # Convert inputs to floats
        start_state = start_state.float()
        goal_state = goal_state.float()
        true_action = true_action.float()

        # Generate planned action and compare to true action
        try:
            planned_action = cem.action_plan(start_state=start_state, goal_state=goal_state)
        except ValueError:
            planned_action = None
            fails += 1

        if planned_action is not None:
            # Switch output from tensors to numpy for easy use later
            start_state = start_state.data.numpy()[0]
            goal_state = goal_state.data.numpy()[0]
            true_action = true_action.data.numpy()[0]

            # Execute planned action
            _, output_state = np.array(cem.sampler.environment.execute_push(*planned_action))

            # Calculate errors
            action_error = np.linalg.norm(true_action - planned_action)
            state_error = np.linalg.norm(goal_state - output_state)

            # Keep the results
            errors.append(dict(action_error=action_error, state_error=state_error))
            true_pushes.append(dict(obj_x=start_state[0], obj_y=start_state[1], start_push_x=true_action[0],
                                    start_push_y=true_action[1], end_push_x=true_action[2], end_push_y=true_action[3]))
            pred_pushes.append(dict(obj_x=start_state[0], obj_y=start_state[1], start_push_x=planned_action[0],
                                    start_push_y=planned_action[1], end_push_x=planned_action[2], end_push_y=planned_action[3]))

        if i - fails > num_pushes - 1:
            break

        pd.DataFrame(errors).to_csv("results/P2/forward_model_errors.csv")
        pd.DataFrame(true_pushes).to_csv("results/P2/true_pushes.csv")
        pd.DataFrame(pred_pushes).to_csv("results/P2/pred_pushes.csv")