Example #1
0
    IM_HEIGHT = 128
    IM_WIDTH = 128
    Z_DIMS = 512
    HIDDEN_LAYER = 100
    EPOCHS = 300
    print("Prediction Dims: {}".format(len(train_set[0][1])))

    vae_model = EncodeDecodePredictor(IM_HEIGHT, IM_WIDTH, Z_DIMS,
                                      HIDDEN_LAYER, len(train_set[0][1]))
    vae_model.to(torch.device("cuda"))

    optimizer = optim.Adam(vae_model.parameters())
    loss_criterion = full_loss
    #print(vae_model)

    results_folder = "logs/vae-test-{}".format(t_stamp())
    decoder_preview_folder = join(results_folder, "decoder-preview")
    if not os.path.exists(results_folder):
        os.makedirs(results_folder)
        os.makedirs(decoder_preview_folder)

    # df = pd.DataFrame(index=np.arange(0, EPOCHS), columns=["t-full", "t-recon", "t-kl", "v-full", "v-recon", "v-kl"])
    # print(df.columns)

    for epoch in range(EPOCHS):
        # print("Epoch: {}".format(epoch))
        vae_model.train()
        train_losses = []
        for i, in_batch in enumerate(train_loader):

            temp_print("T Batch {}/{}".format(i, len(train_loader)))
Example #2
0
import matplotlib.pyplot as plt
import numpy as np
from numpy.random import uniform as unif
from scipy import interpolate
from scipy.interpolate import CubicSpline
from scipy import optimize
from helper_funcs.utils import t_stamp
import os
from os.path import join
from curve_funcs import equally_space_curve



demo_dir = "demos/splines2D-{}".format(t_stamp())

os.mkdir(demo_dir)
os.mkdir(demo_dir + "/train")
os.mkdir(demo_dir + "/val")

ts = np.array([0.0, 0.25, 0.75, 1.0])

fig, ax = plt.subplots()

# Training Data
for i in range(70):
    start = unif([0.0, 0.0], [0.1, 0.1])
    goal = unif([0.9, 0.9], [1.0, 1.0])
    c1 = unif([0.5, 0.05], [0.65, 0.25])
    c2 = unif([0.05, 0.5], [0.25, 0.65])
    distractor_1 = unif([0.3, 0.3], [0.7, 0.7])
Example #3
0
loss_fn = nn.MSELoss()


train_num = 20
train_set = TensorDataset(start_states[:train_num], pose_hists[:train_num])
val_set = TensorDataset(start_states[train_num:], pose_hists[train_num:])
print("Train Set Size: {}, Val Set Size: {}".format(len(train_set), len(val_set)))

train_loader = DataLoader(train_set, shuffle=True, batch_size=32)
val_loader = DataLoader(val_set, shuffle=False, batch_size=32)



#%%

results_folder = "logs/synth-wave-{}".format(t_stamp())
if not os.path.exists(results_folder):
    os.makedirs(results_folder)


#%%

# display("", display_id="batch_progress")
for epoch in range(500):
    ## Training Loop
    train_losses = []
    for batch_idx, (ss_batch, rollout_batch) in enumerate(train_loader):
        dims = rollout_batch.shape[2]

        learned_weights = model(ss_batch.view(ss_batch.shape[0], -1)).view(-1, dims, n_basis_funcs)
        dmp = DMP(num_basis_funcs=n_basis_funcs, dt=dt, d=dims, weights=learned_weights)
Example #4
0
                                 im_params["file_glob"],
                                 from_demo=75)
    val_set = ImagePoseFuturePose(val_paths,
                                  "l_wrist_roll_link",
                                  get_trans(im_params, distorted=False),
                                  skip_count=5)
    val_loader = DeviceDataLoader(
        DataLoader(val_set, exp_config["batch_size"], shuffle=False),
        torch.device("cuda"))

    # current_lr = 1e-5
    optimizer = optim.Adam(model.parameters(), lr=1e-3)
    # joint_weights = torch.linspace(10, 1, len(exp_config["nn_joint_names"]), device=torch.device("cuda"))
    loss_criterion = nn.L1Loss()

    results_folder = "logs/cup-pour-poseOnly-{}".format(t_stamp())
    if not os.path.exists(results_folder):
        os.makedirs(results_folder)

    #  TODO: Do I have to rewrite these constraints for the image context?
    # min_bounds = torch.tensor([0.55, -0.1, 0.8], device=torch.device("cuda"))
    # max_bounds = torch.tensor([0.765, 0.24, 1.24], device=torch.device("cuda"))
    # constraint = StayInZone(model, min_bounds, max_bounds)
    # constraint = MoveSlowly(model, 0.02)
    constraint = None

    for epoch in range(exp_config["epochs"]):
        model.train()

        train_losses = []
        for i, in_batch in enumerate(train_loader):
                                 im_params["file_glob"],
                                 from_demo=30)
    val_set = PoseAndGoal(val_paths,
                          "l_wrist_roll_link",
                          "r_wrist_roll_link",
                          skip_count=10)
    val_loader = DeviceDataLoader(
        DataLoader(val_set, exp_config["batch_size"], shuffle=False),
        torch.device("cuda"))

    model = PosePlusStateNet(100)
    model.to(torch.device("cuda"))
    optimizer = optim.Adam(model.parameters(), lr=1e-3)
    loss_criterion = nn.L1Loss()

    results_folder = "logs/cupPourPoseGoalZoneStay-{}".format(t_stamp())
    if not os.path.exists(results_folder):
        os.makedirs(results_folder)

    min_bounds = torch.tensor([0.55, -0.1, 0.8], device=torch.device("cuda"))
    max_bounds = torch.tensor([0.765, 0.24, 1.24], device=torch.device("cuda"))
    constraint = StayInZone(model, min_bounds, max_bounds)
    # constraint = MoveSlowly(model, 0.02)
    # constraint=None

    for epoch in range(exp_config["epochs"]):
        model.train()
        train_losses = []
        for i, in_batch in enumerate(train_loader):
            print("T Batch {}/{}".format(i, len(train_loader)),
                  end='\r',
        from_demo=60,
        to_demo=80,
        skip_count=3)

    model = ResnetJointPredictor(im_params["resize_height"],
                                 im_params["resize_width"],
                                 len(exp_config["nn_joint_names"]))
    model.to(torch.device("cuda"))
    optimizer = optim.Adam(model.parameters())
    joint_weights = torch.linspace(10,
                                   1,
                                   len(exp_config["nn_joint_names"]),
                                   device=torch.device("cuda"))
    loss_criterion = Weighted_MSE(joint_weights)  # nn.MSELoss()

    results_folder = "logs/frozen-resnet-wj-skip-2{}".format(t_stamp())
    if not os.path.exists(results_folder):
        os.makedirs(results_folder)

    for epoch in range(exp_config["epochs"]):
        # Unfreeze after you've learned a little
        if epoch == 10:
            for param in model.resnet.parameters():
                param.requires_grad = True

        model.train()
        train_losses = []
        for i, in_batch in enumerate(train_loader):

            temp_print("T Batch {}/{}".format(i, len(train_loader)))
            (img_ins, _), target_joints = in_batch
Example #7
0
import matplotlib.pyplot as plt


def rbf(x, c, h):
    return np.exp(-h * (x - c)**2)

def force_func(start, goal, c1, c2, x):
    return start + x * (goal - start)  + rbf(x, c1, 100) - rbf(x, c2, 150)

def force_func_2d(start, goal, c1, t):
    straights = start + (goal - start) * t[:, None]
    return straights + rbf(straights, c1, 100)



demo_dir = "demos/wave_combos-{}".format(t_stamp())
os.mkdir(demo_dir)

# Ok so how about you generate an x and y?
ts = np.linspace(0, 1, 100)

for i in range(1):
    start = np.array([0.0, 0.0])
    goal = np.array([1.0, 1.0])
    c1 = np.array([0.5, 0.75])
    ps = force_func_2d(start, goal, c1, ts)
    plt.scatter(ps[:, 0], ps[:, 1])

    # start = np.random.uniform(0, 0.1)
    # goal = np.random.uniform(0.9, 1.0)
    # c1 = np.random.uniform(0.15, 0.45)
Example #8
0
    val_set, validation_loader = load_rgbd_demos(
        image_demo_paths(exp_config["demo_folder"], im_params["file_glob"], from_demo=90)
        exp_config["batch_size"],
        "l_wrist_roll_link",
        get_trans(im_params, distorted=True),
        get_grayscale_trans(im_params),
        False,
        torch.device("cuda"))

    model = ZhangNet(im_params["resize_height"], im_params["resize_width"])
    model.to(torch.device("cuda"))
    optimizer = optim.Adam(model.parameters(), weight_decay=1e-4)
    loss_criterion = ZhangLoss()

    results_folder = "logs/zhang-{}".format(t_stamp())
    if not os.path.exists(results_folder):
        os.makedirs(results_folder)

    for epoch in range(exp_config["epochs"]):
        model.train()
        train_losses = []
        with autograd.detect_anomaly():
            for i, in_batch in enumerate(train_loader):
                print("T Batch {}/{}".format(i, len(train_loader)), end='\r', flush=True)
                rgb_ins, depth_ins, past_ins, target_ins = in_batch 
                next_pred, aux_pred = model(rgb_ins, depth_ins, past_ins)
                train_loss = loss_criterion(next_pred, aux_pred, target_ins, past_ins[:, 4])
                train_losses.append([t.item() for t in train_loss])
                optimizer.zero_grad()
                train_loss[0].backward()
Example #9
0
validation_set, validation_loader = load_demos(exp_config["demo_folder"],
                                               im_params["file_glob"],
                                               exp_config["batch_size"],
                                               exp_config["nn_joint_names"],
                                               im_trans,
                                               False,
                                               device,
                                               from_demo=90,
                                               to_demo=100)
"""
for i in range(0, 9):
    show_torched_im(train_set[i][0][0])
"""

# Train the model
log_path = "./logs/{}-{}".format(exp_config["experiment_name"], t_stamp())
full_model = setup_model(device, im_params["resize_height"],
                         im_params["resize_width"],
                         exp_config["nn_joint_names"])

lower_bounds = joints_lower_limits(exp_config["nn_joint_names"], robot_model)
upper_bounds = joints_upper_limits(exp_config["nn_joint_names"], robot_model)

print(lower_bounds)
print(upper_bounds)

# constraint = StayInZone(full_model, mbs, maxbs, nn_joint_names, robot_model)
# constraint = MoveSlowly(full_model, 2.0, nn_joint_names, robot_model)
# constraint = SmoothMotion(full_model, 0.5, nn_joint_names, robot_model)

# constraint = MatchOrientation(full_model, target_orientation, exp_config["nn_joint_names"], robot_model)