Пример #1
0
def chart_current_v_next(demo_path):
    exp_config = load_json("config/experiment_config.json")
    im_params = exp_config["image_config"]
    demo_paths = image_demo_paths(exp_config["demo_folder"],
                                  im_params["file_glob"],
                                  from_demo=0,
                                  to_demo=1)

    # Load all of the poses into a list, reuse some load-data functions here
    demo_trajectories = [
        get_pose_hist(d, "l_wrist_roll_link") for d in demo_paths
    ]
    demo_trajectories = [
        np.array([quat_pose_to_rpy(p) for p in dt]).transpose(1, 0)
        for dt in demo_trajectories
    ]

    pose_dim_names = ["x", "y", "z", "r-x", "r-y", "r-z"]
    for i in range(6):
        plt.subplot(2, 3, i + 1)
        plt.xlabel("current {}".format(pose_dim_names[i]))
        plt.ylabel("next {}".format(pose_dim_names[i]))
        for d in demo_trajectories:
            plt.scatter(d[i][0:-5], d[i][5:], alpha=0.05, s=10, color="blue")

    plt.show()
Пример #2
0
def chart_all_poses_3d(demo_path):
    exp_config = load_json("config/experiment_config.json")
    im_params = exp_config["image_config"]
    demo_paths = image_demo_paths(exp_config["demo_folder"],
                                  im_params["file_glob"],
                                  from_demo=0,
                                  to_demo=5)

    # Load all of the poses into a list, reuse some load-data functions here
    demo_trajectories = [
        get_pose_hist(d, "l_wrist_roll_link") for d in demo_paths
    ]
    demo_trajectories = [
        np.array([quat_pose_to_rpy(p) for p in dt]).transpose(1, 0)
        for dt in demo_trajectories
    ]

    fig = plt.figure()
    ax = fig.add_subplot(111, projection='3d')
    ax.set_xlabel("X")
    ax.set_ylabel("Y")
    ax.set_zlabel("t")

    for d in demo_trajectories:
        time = np.linspace(0, 1, len(d[2]))
        ax.scatter(d[0], d[1], time)

    plt.show()
Пример #3
0
def reset_pose():
    rospy.init_node('pr2_mover', anonymous=True)
    r = rospy.Rate(1)

    moveit_commander.roscpp_initialize(sys.argv)

    robot = moveit_commander.RobotCommander()
    scene = moveit_commander.PlanningSceneInterface()
    l_group = moveit_commander.MoveGroupCommander('left_arm')
    l_group.set_pose_reference_frame("base_link")

    r_group = moveit_commander.MoveGroupCommander('right_arm')
    r_group.set_pose_reference_frame("base_link")

    exp_config = byteify(load_json("config/experiment_config.json"))

    left_command = rospy.Publisher('/l_arm_controller/command',
                                   JointTrajectory,
                                   queue_size=10)
    right_command = rospy.Publisher('/r_arm_controller/command',
                                    JointTrajectory,
                                    queue_size=10)

    right_joints = [
        "r_shoulder_pan_joint", "r_shoulder_lift_joint",
        "r_upper_arm_roll_joint", "r_elbow_flex_joint", "r_forearm_roll_joint",
        "r_wrist_flex_joint", "r_wrist_roll_joint"
    ]

    # left_start = [ 0.2242,  0.3300,  1.4105, -0.8090,  0.1163, -0.9732,  0.2731]
    # right_start = [-0.7920,  0.5493, -1.1246, -1.0972, -0.8366, -1.0461, -0.0410]

    # rospy.sleep(3)
    # send_arm_goal(left_start, left_command, exp_config["nn_joint_names"])
    # send_arm_goal(right_start, right_command, right_joints)
    # rospy.sleep(3)

    r_start_pos = np.array([0.6, -0.45, 0.9])
    r_start_rpy = np.array([-1.75, 0.08, 1.17])

    l_start_pos = np.array([0.4, 0.2, 0.8])
    l_start_rpy = r_start_rpy + np.array([np.pi / 2, 0, -np.pi / 2])

    move_to_position_rotation(r_group, r_start_pos, r_start_rpy)
    move_to_position_rotation(l_group, l_start_pos, l_start_rpy)

    print('Robot policy Prepared.')
    # model.eval()
    while not rospy.is_shutdown():
        l_start_rpy[0] += 0.1
        move_to_position_rotation(l_group, l_start_pos, l_start_rpy)
        r.sleep()

    print('Done.')
Пример #4
0
def chart_demo_joint_trajectories(demo_path, demo_num):
    exp_config = load_json("config/experiment_config.json")
    # TODO: This is in multiple places, so I think it needs to be config
    im_params = exp_config["image_config"]

    im_trans = Compose([
        Crop(im_params["crop_top"], im_params["crop_left"],
             im_params["crop_height"], im_params["crop_width"]),
        Resize(im_params["resize_height"], im_params["resize_width"])
    ])

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    demo_set, demo_loader = load_demos(demo_path,
                                       im_params["file_glob"],
                                       exp_config["batch_size"],
                                       exp_config["nn_joint_names"],
                                       im_trans,
                                       False,
                                       device,
                                       from_demo=demo_num,
                                       to_demo=demo_num + 1)

    joint_poses = []
    next_poses = []
    with torch.no_grad():
        for ins, targets in demo_loader:
            _, in_pos = ins
            joint_pos = [x.cpu().numpy() for x in in_pos]
            next_pos = [x.cpu().numpy() for x in targets]

            joint_poses.extend(joint_pos)
            next_poses.extend(next_pos)

    joint_poses = np.array(joint_poses).transpose()
    next_poses = np.array(next_poses).transpose()

    fig = plt.figure()

    for i, _ in enumerate(joint_poses):
        c_ax = fig.add_subplot(ceil(len(joint_poses) / 3.0), 3, i + 1)
        c_ax.plot(joint_poses[i], label="Joint Poses")
        c_ax.plot(next_poses[i], label="Next Poses")
        c_ax.legend()
        c_ax.title.set_text(exp_config["nn_joint_names"][i])
        c_ax.set_xlabel("t")
        c_ax.set_ylabel("Normed Angle")

    plt.show()
Пример #5
0
def chart_all_poses(demo_path):
    exp_config = load_json("config/experiment_config.json")
    im_params = exp_config["image_config"]
    demo_paths = image_demo_paths(exp_config["demo_folder"],
                                  im_params["file_glob"],
                                  from_demo=0)

    # Load all of the poses into a list, reuse some load-data functions here
    demo_trajectories = [
        get_pose_hist(d, "l_wrist_roll_link") for d in demo_paths
    ]
    # demo_trajectories = [np.array([quat_pose_to_rpy(p) for p in dt]).transpose(1,0) for dt in demo_trajectories]
    smoothed_trajectories = []
    for dt in demo_trajectories:
        smoothed_dt = [dt[0]]
        for i in range(1, len(dt)):
            # print(i)
            current_pos, current_quat = dt[i][0:3], dt[i][3:]
            prev_quat = smoothed_dt[i - 1][3:]
            orig_diff = np.abs(current_quat - prev_quat).sum()
            flipped_diff = np.abs(-current_quat - prev_quat).sum()
            if flipped_diff <= orig_diff:
                print("Orig diff {}, Flipped diff{} at {} ".format(
                    orig_diff, flipped_diff, i))
                print("Original quat {}, Flipped {} ".format(
                    current_quat, -current_quat))
                smoothed_dt.append(np.concatenate(
                    (current_pos, -current_quat)))
                print(len(smoothed_dt))
            else:
                smoothed_dt.append(np.concatenate((current_pos, current_quat)))
        smoothed_trajectories.append(np.array(smoothed_dt))

    # smoothed_trajectories = demo_trajectories

    # demo_trajectories = [dt.transpose(1,0) for dt in demo_trajectories]
    demo_trajectories = [dt.transpose(1, 0) for dt in smoothed_trajectories]

    # pose_dim_names = ["x", "y", "z", "r-x", "r-y", "r-z"]
    pose_dim_names = ["x", "y", "z", "q-x", "q-y", "q-z", "q-w"]
    for i in range(len(pose_dim_names)):
        plt.subplot(3, 3, i + 1)
        plt.xlabel("time")
        plt.ylabel((pose_dim_names[i]))
        for d in demo_trajectories:
            plt.plot(d[i], alpha=0.15, color="green")

    plt.show()
Пример #6
0
    # plt.plot(training_df.error, label="Results")
    # # plt.plot(validation_df.error, label="Validation")
    plt.xlabel("Epoch")
    plt.ylabel("Losses")
    plt.legend()
    if save_path is not None:
        plt.savefig(save_path)

    if show_fig:
        plt.show()

    plt.close()


if __name__ == "__main__":
    exp_config = load_json("config/experiment_config.json")
    im_params = exp_config["image_config"]
    im_trans = get_trans(im_params, distorted=True)

    train_set, train_loader = load_demos(exp_config["demo_folder"],
                                         im_params["file_glob"],
                                         exp_config["batch_size"],
                                         exp_config["nn_joint_names"],
                                         im_trans,
                                         True,
                                         torch.device("cuda"),
                                         from_demo=0,
                                         to_demo=60,
                                         skip_count=5)

    # for i in range(12):
Пример #7
0
def chart_demo_predictions(model_path, demo_path, demo_num):
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    exp_config = load_json("config/experiment_config.json")
    # TODO: This is in multiple places, so I think it needs to be config
    im_params = exp_config["image_config"]

    im_trans = Compose([
        Crop(im_params["crop_top"], im_params["crop_left"],
             im_params["crop_height"], im_params["crop_width"]),
        Resize(im_params["resize_height"], im_params["resize_width"])
    ])
    model = load_model(model_path, device, im_params["resize_height"],
                       im_params["resize_width"], exp_config["nn_joint_names"])

    demo_set, demo_loader = load_demos(demo_path,
                                       im_params["file_glob"],
                                       exp_config["batch_size"],
                                       exp_config["nn_joint_names"],
                                       im_trans,
                                       False,
                                       device,
                                       from_demo=demo_num,
                                       to_demo=demo_num + 1)

    # show_torched_im(demo_set[0][0][0])

    model.eval()
    currents = []
    ests = []
    trues = []

    with torch.no_grad():
        for (ins, targets) in demo_loader:
            _, current_pos = ins
            est_next = model(*ins)
            est_next = [x.cpu().numpy() for x in est_next]
            true_next = [x.cpu().numpy() for x in targets]
            current_pos = [x.cpu().numpy() for x in current_pos]

            ests.extend(est_next)
            trues.extend(true_next)
            currents.extend(current_pos)

    ests = np.array(ests).transpose()
    trues = np.array(trues).transpose()
    currents = np.array(currents).transpose()

    fig = plt.figure()

    for i, _ in enumerate(ests):
        c_ax = fig.add_subplot(ceil(len(ests) / 3.0), 3, i + 1)
        c_ax.plot()
        # c_ax.plot(currents[i], label="Current Joint")
        c_ax.plot(ests[i], label="Estimated Next Joints")
        c_ax.plot(trues[i], label="True Next Joints")
        c_ax.legend()
        c_ax.title.set_text(exp_config["nn_joint_names"][i])
        c_ax.set_xlabel("t")
        c_ax.set_ylabel("Normed Radians")

    plt.show()
Пример #8
0
def chart_mdn_means(model_path, demo_path, demo_num):
    exp_config = load_json("config/experiment_config.json")
    im_params = exp_config["image_config"]

    exp_config = load_json("config/experiment_config.json")
    im_params = exp_config["image_config"]
    _, demo_loader = load_pose_state_demos(
        image_demo_paths(exp_config["demo_folder"],
                         im_params["file_glob"],
                         from_demo=demo_num,
                         to_demo=demo_num + 1), exp_config["batch_size"],
        "l_wrist_roll_link", "r_wrist_roll_link", False, torch.device("cuda"))

    model = PosePlusStateNet(100, 2)
    model.load_state_dict(
        torch.load(model_path, map_location=torch.device("cuda")))
    model.to(torch.device("cuda"))
    model.eval()
    mu_all = []
    std_all = []
    pis_all = []
    targets_all = []

    with torch.no_grad():
        for ins in demo_loader:
            current_pose, goal_pose, targets = ins
            pis, stds, mus = model(current_pose, goal_pose)
            print("mu {} std {} pis {}".format(mus.shape, stds.shape,
                                               pis.shape))
            mu_all.append(mus)
            std_all.append(stds)
            pis_all.append(pis)
            targets_all.append(targets)

    mu_all = torch.cat(mu_all)
    std_all = torch.cat(std_all)
    pis_all = torch.cat(pis_all)
    targets_all = torch.cat(targets_all)

    # Im expecting: mu: N x 5 X 7, std: N x 5, pis: N * 5, targets_all: N * 7
    # print("Shapes: mu: {}, std: {}, pis {}, targets{}".format(mu_all.shape, std_all.shape, pis_all.shape, targets_all.shape))

    # I want : mu: 7 X 5 X N, std: 5 X N, pis: 5 X N, targets_all: 7 * N
    mu_all = mu_all.permute(2, 1, 0).cpu().numpy()
    std_all = std_all.permute(2, 1, 0).cpu().numpy()
    pis_all = pis_all.permute(1, 0).cpu().numpy()
    targets_all = targets_all.permute(1, 0).cpu().numpy()

    print("Shapes: mu: {}, std: {}, pis {}, targets{}".format(
        mu_all.shape, std_all.shape, pis_all.shape, targets_all.shape))

    fig = plt.figure()

    print("STD example", std_all[0][0])

    n_joints, n_components, n_samples = mu_all.shape[0], mu_all.shape[
        1], mu_all.shape[2]
    print("joints {} components {} samples {}".format(n_joints, n_components,
                                                      n_samples))
    for joint_id in range(n_joints):
        plt.subplot(ceil(n_joints / 3.0), 4, joint_id + 1)
        for pi_id in range(n_components):
            plt.plot(mu_all[joint_id, pi_id], label="pi-{}".format(pi_id))
            plt.fill_between(
                range(n_samples),
                mu_all[joint_id, pi_id] - std_all[joint_id, pi_id],
                mu_all[joint_id, pi_id] + std_all[joint_id, pi_id],
                alpha=0.1)

        weighted_mus = mu_all[joint_id] * pis_all
        averaged_mus = weighted_mus.sum(0)
        print("muall: {}, pis_all: {} Weighted: {}, Averaged: {}".format(
            mu_all.shape, pis_all.shape, weighted_mus.shape,
            averaged_mus.shape))
        plt.plot(averaged_mus, label="Averaged")
        plt.plot(targets_all[joint_id], label="Targets")

        plt.legend()
        plt.title(["x", "y", "z", "r-x", "r-y", "r-z"][joint_id])
        plt.xlabel("t")
        plt.ylabel("Normed Radians")

    plt.subplot(ceil(n_joints / 3.0), 4, n_joints + 1)
    for pi_id in range(n_components):
        plt.plot(pis_all[pi_id], label="pi_{}".format(pi_id))
    plt.legend()
    plt.xlabel("t")
    plt.ylabel("Component Weight")
    plt.title("Pis")

    plt.show()
Пример #9
0
def chart_pred_pose(model_path, demo_path, demo_num):
    exp_config = load_json("config/experiment_config.json")
    im_params = exp_config["image_config"]

    train_paths = image_demo_paths(exp_config["demo_folder"],
                                   im_params["file_glob"],
                                   from_demo=demo_num,
                                   to_demo=demo_num + 1)
    train_set = ImagePoseFuturePose(train_paths, "l_wrist_roll_link",
                                    get_trans(im_params, distorted=True), 10)
    demo_loader = DeviceDataLoader(
        DataLoader(train_set, exp_config["batch_size"], shuffle=False),
        torch.device("cuda"))

    model = ImageOnlyNet(im_params["resize_height"], im_params["resize_width"],
                         6)
    model.load_state_dict(
        torch.load(model_path, map_location=torch.device("cuda")))
    model.to(torch.device("cuda"))

    model.eval()
    next_pred_all = []
    # current_pred_all = []
    current_targets_all = []
    next_targets_all = []

    with torch.no_grad():
        for ins in demo_loader:
            rgb_ins, current_ins, target_ins = ins
            next_pred = model(rgb_ins)
            next_pred_all.append(next_pred)
            # current_pred_all.append(current_pred)
            current_targets_all.append(current_ins)
            next_targets_all.append(target_ins)

    next_pred_all = torch.cat(next_pred_all)
    # current_pred_all = torch.cat(current_pred_all)
    current_targets_all = torch.cat(current_targets_all)
    next_targets_all = torch.cat(next_targets_all)

    # Shapes: N X 7

    next_pred_all = next_pred_all.permute(1, 0).cpu().numpy()
    # current_pred_all = current_pred_all.permute(1,0).cpu().numpy()
    current_targets_all = current_targets_all.permute(1, 0).cpu().numpy()
    next_targets_all = next_targets_all.permute(1, 0).cpu().numpy()

    n_pose_dims, n_samples = next_pred_all.shape[0], next_pred_all.shape[1]
    print(n_pose_dims)
    dim_names = ["p-x", "p-y", "p-z", "roll", "pitch", "yaw"]
    for dim_id in range(n_pose_dims):
        plt.subplot(ceil(n_pose_dims / 3.0), 3, dim_id + 1)
        plt.plot(next_pred_all[dim_id], label="Predicted Pose")
        # plt.plot(current_pred_all[dim_id], label="Aux Prediction")
        plt.plot(current_targets_all[dim_id], label="Current Pose")
        plt.plot(next_targets_all[dim_id], label="Target Pose")
        plt.legend()
        plt.xlabel("t")
        plt.ylabel(dim_names[dim_id])

    plt.show()
Пример #10
0
def main(model_path):
    exp_config = load_json("config/experiment_config.json")
    im_params = exp_config["image_config"]
    device = torch.device("cpu")
    rgb_trans = get_trans(im_params, distorted=False)
    # depth_trans = get_grayscale_trans(im_params)

    # train_set, train_loader = load_rgbd_demos(
    #     exp_config["demo_folder"],
    #     im_params["file_glob"],
    #     exp_config["batch_size"],
    #     "l_wrist_roll_link",
    #     rgb_trans,
    #     depth_trans,
    #     True,
    #     device,
    #     from_demo=0,
    #     to_demo=1)

    model = ImagePlusPoseNet(
        (im_params["resize_height"], im_params["resize_width"]), 100)
    model.load_state_dict(torch.load(model_path, map_location=device))
    model.to(device)
    model.eval()  # Imporant if you have dropout / batchnorm layers!

    state_cache = RobotStateCache()
    rgb_sub = rospy.Subscriber('/kinect2/qhd/image_color_rect', Image,
                               state_cache.update_rgb_img)
    # depth_sub = rospy.Subscriber('/kinect2/qhd/image_depth_rect', Image, state_cache.update_depth_img)
    l_pose_sub = rospy.Subscriber('/l_wrist_roll_link_as_posestamped',
                                  gm.PoseStamped, state_cache.update_l_pose)
    r_pose_sub = rospy.Subscriber('/r_wrist_roll_link_as_posestamped',
                                  gm.PoseStamped, state_cache.update_r_pose)
    print("Subscribed")

    rospy.init_node("testMover")

    moveit_commander.roscpp_initialize(sys.argv)

    robot = moveit_commander.RobotCommander()
    scene = moveit_commander.PlanningSceneInterface()
    l_group = moveit_commander.MoveGroupCommander('left_arm')
    l_group.set_pose_reference_frame("base_link")

    r_group = moveit_commander.MoveGroupCommander('right_arm')
    r_group.set_pose_reference_frame("base_link")

    # print(l_group.get_end_effector_link())
    # print(l_group.get_pose_reference_frame())

    r = rospy.Rate(1.5)

    # left_start_pos = np.array([0.68368, 0.1201, 0.8733])
    # left_start_quat = np.array([0.49261, -0.5294, -0.4433, 0.529611])

    left_start_pos = np.array([6.88e-01, 1.62e-01,
                               8.38e-01])  # + (np.random.rand(3) - 0.5) * 0.1
    # left_start_quat = np.array([3.746527957264187553e-02, -1.138874327630349376e-02, -5.064174634791493990e-01, 8.613988634984809378e-01])
    left_start_rpy = np.array([0.0, 0.0, -np.pi / 2.0])

    right_start_pos = np.array([
        4.350030651697819328e-01, -3.619320769156886275e-01,
        9.532789909342967993e-01
    ])  #+ (np.random.rand(3) - 0.5) * 0.1
    # right_start_quat = np.array([7.092342993967834519e-02,-1.200804160961578410e-01,5.726577827747867389e-01, 8.078450498599533125e-01])
    # right_start_rpy = tf.transformations.euler_from_quaternion(right_start_quat)
    right_start_rpy = np.array([0.0, 0.0, np.pi / 2.0])

    move_to_pos_rpy(l_group, left_start_pos, left_start_rpy)
    move_to_pos_rpy(r_group, right_start_pos, right_start_rpy)

    rospy.sleep(3)

    # Closed Loop
    while not rospy.is_shutdown():
        print("Step")
        if state_cache.l_pose is not None and state_cache.r_pose is not None and state_cache.rgb_im is not None:
            with torch.no_grad():
                current_pose = torch.tensor(state_cache.l_pose,
                                            dtype=torch.float)
                goal_pose = torch.tensor(state_cache.r_pose, dtype=torch.float)
                print("Current: {}".format(current_pose))

                next_pose = model(
                    rgb_trans(state_cache.rgb_im).unsqueeze(0),
                    current_pose.unsqueeze(0))[0]
                # pis, stds, mus = model( current_pose.unsqueeze(0), goal_pose.unsqueeze(0))
                # next_pose = approx_ml(pis[0], stds[0], mus[0], num_samples=1000)

                print("Next {}".format(next_pose))
                next_position = next_pose[0:3].numpy().astype(np.float64)
                next_rpy = next_pose[3:].numpy().astype(np.float64) * np.pi
                # print("For sanity {}\n{}\n{}".format(next_pose[3:], next_pose[3:].numpy(), next_pose[3:].numpy().astype(np.float64)))
                # print("Next rpy {}".format(next_rpy))
                next_quat = R.from_euler("xyz", next_rpy).as_quat()

                move_to_pos_quat(l_group, next_position, next_quat)
        r.sleep()