Exemple #1
0
    def gripper(gripper, grasp, T_obj_world, color=(0.5, 0.5, 0.5), T_camera_world=None, point=None, point_color=None):
        """ Plots a robotic gripper in a pose specified by a particular grasp object.

        Parameters
        ----------
        gripper : :obj:`dexnet.grasping.RobotGripper`
            the gripper to plot
        grasp : :obj:`dexnet.grasping.Grasp`
            the grasp to plot the gripper performing
        T_obj_world : :obj:`autolab_core.RigidTransform`
            the pose of the object that the grasp is referencing in world frame
        color : 3-tuple
            color of the gripper mesh
        """
        T_gripper_obj = grasp.gripper_pose(gripper)
        T_gripper_world = T_obj_world * T_gripper_obj
        T_mesh_world = T_gripper_world * gripper.T_mesh_gripper.inverse()        
        T_mesh_world = T_mesh_world.as_frames('obj', 'world')
        Visualizer3D.mesh(gripper.mesh.trimesh, T_mesh_world=T_mesh_world, style='surface', color=color)
        Visualizer3D.pose(T_gripper_world)
        if T_camera_world is not None:
            Visualizer3D.pose(T_camera_world)
        if point is not None:
            world_points = np.dot(T_obj_world.matrix, point.T).T
            Visualizer3D.points(world_points[:, :3], scale=0.002, color=point_color)
Exemple #2
0
def visualize_scene(mesh, T_world_ar, T_ar_cam, T_cam_obj):
    T_cam_cam = RigidTransform()
    T_obj_cam = T_cam_obj.inverse()
    T_cam_ar = T_ar_cam.inverse()

    Twc = np.matmul(T_world_ar.matrix, T_ar_cam.matrix)
    T_world_cam = RigidTransform(Twc[:3, :3], Twc[:3, 3])
    T_cam_world = T_world_cam.inverse()

    o = np.array([0., 0., 0.])
    centroid_cam = apply_transform(o, T_cam_obj)
    tag_cam = apply_transform(o, T_cam_ar)
    robot_cam = apply_transform(o, T_cam_world)

    # camera
    vis3d.points(o, color=(0, 1, 0), scale=0.01)
    vis3d.pose(T_cam_cam, alpha=0.05, tube_radius=0.005, center_scale=0.002)

    # object
    vis3d.mesh(mesh)
    vis3d.points(centroid_cam, color=(0, 0, 0), scale=0.01)
    vis3d.pose(T_cam_obj, alpha=0.05, tube_radius=0.005, center_scale=0.002)

    # AR tag
    vis3d.points(tag_cam, color=(1, 0, 1), scale=0.01)
    vis3d.pose(T_cam_ar, alpha=0.05, tube_radius=0.005, center_scale=0.002)
    vis3d.table(T_cam_ar, dim=0.074)

    # robot
    vis3d.points(robot_cam, color=(1, 0, 0), scale=0.01)
    vis3d.pose(T_cam_world, alpha=0.05, tube_radius=0.005, center_scale=0.002)

    vis3d.show()
Exemple #3
0
def visualize_normals(mesh, vertices, normals):
    scale = 0.01
    normals_scaled = normals * scale

    vis3d.pose(RigidTransform(),
               alpha=0.01,
               tube_radius=0.001,
               center_scale=0.002)
    vis3d.mesh(mesh, style='wireframe')
    vis3d.points(mesh.centroid, color=(0, 0, 0), scale=0.003)
    vis3d.points(vertices, color=(1, 0, 0), scale=0.001)

    for v, n in zip(vertices, normals_scaled):
        vis3d.plot3d([v, v + n], color=(1, 0, 0), tube_radius=0.0005)

    vis3d.show()
Exemple #4
0
    def grasp(grasp,
              T_obj_world=RigidTransform(from_frame='obj', to_frame='world'),
              tube_radius=0.0002,
              endpoint_color=(0, 1, 0),
              endpoint_scale=0.0005,
              grasp_axis_color=(0, 1, 0)):
        """ Plots a grasp as an axis and center.

        Parameters
        ----------
        grasp : :obj:`dexnet.grasping.Grasp`
            the grasp to plot
        T_obj_world : :obj:`autolab_core.RigidTransform`
            the pose of the object that the grasp is referencing in world frame
        tube_radius : float
            radius of the plotted grasp axis
        endpoint_color : 3-tuple
            color of the endpoints of the grasp axis
        endpoint_scale : 3-tuple
            scale of the plotted endpoints
        grasp_axis_color : 3-tuple
            color of the grasp axis
        """
        g1, g2 = grasp.endpoints
        center = grasp.center
        g1 = Point(g1, 'obj')
        g2 = Point(g2, 'obj')
        center = Point(center, 'obj')

        g1_tf = T_obj_world.apply(g1)
        g2_tf = T_obj_world.apply(g2)
        center_tf = T_obj_world.apply(center)
        grasp_axis_tf = np.array([g1_tf.data, g2_tf.data])

        Visualizer3D.points(g1_tf.data,
                            color=endpoint_color,
                            scale=endpoint_scale)
        Visualizer3D.points(g2_tf.data,
                            color=endpoint_color,
                            scale=endpoint_scale)
        Visualizer3D.plot3d(grasp_axis_tf,
                            color=grasp_axis_color,
                            tube_radius=tube_radius)
        Visualizer3D.pose(grasp.T_grasp_obj,
                          alpha=endpoint_scale * 10,
                          tube_radius=tube_radius,
                          center_scale=endpoint_scale)
Exemple #5
0
def visualize_gripper(mesh, T_obj_grasp, vertices):
    o = np.array([0., 0., 0.])
    grasp = apply_transform(o, T_obj_grasp)

    # object
    vis3d.mesh(mesh)
    vis3d.points(o, color=(0, 0, 0), scale=0.002)
    vis3d.pose(RigidTransform(),
               alpha=0.01,
               tube_radius=0.001,
               center_scale=0.001)

    # gripper
    vis3d.points(grasp, color=(1, 0, 0), scale=0.002)
    vis3d.points(vertices, color=(1, 0, 0), scale=0.002)
    vis3d.pose(T_obj_grasp, alpha=0.01, tube_radius=0.001, center_scale=0.001)

    vis3d.show()
Exemple #6
0
def visualize_grasps(mesh, vertices, metrics):
    vis3d.pose(RigidTransform(),
               alpha=0.01,
               tube_radius=0.001,
               center_scale=0.002)
    vis3d.mesh(mesh, style='wireframe')
    vis3d.points(mesh.centroid, color=(0, 0, 0), scale=0.003)

    min_score = float(np.min(metrics))
    max_score = float(np.max(metrics))
    metrics_normalized = (metrics.astype(float) - min_score) / (max_score -
                                                                min_score)

    for v, m in zip(vertices, metrics_normalized):
        vis3d.points(v, color=(1 - m, m, 0), scale=0.001)
        vis3d.plot3d(v, color=(1 - m, m, 0), tube_radius=0.0003)

    vis3d.show()
Exemple #7
0
def visualize_test():
    I = RigidTransform()
    g_ab = RigidTransform()
    g_ab.translation = np.array([0.05, 0, 0])
    g_ab.rotation = np.array([[0, -1, 0], [1, 0, 0], [0, 0, 1]])

    q_a = np.array([0., 0., 0.])

    p_b = np.array([0., 0., 0.])
    p_a = apply_transform(p_b, g_ab)

    print('g_ab = \n{}'.format(g_ab.matrix))

    vis3d.pose(I, alpha=0.01, tube_radius=0.001, center_scale=0.001)
    vis3d.points(q_a, color=(1, 0, 0), scale=0.005)

    vis3d.pose(g_ab, alpha=0.01, tube_radius=0.001, center_scale=0.001)
    vis3d.points(p_a, color=(0, 1, 0), scale=0.005)
    vis3d.show()
Exemple #8
0
def visualize_plan(mesh, T_world_obj, T_world_grasp):
    # visualize the plan in the world frame
    mesh.apply_transform(T_world_obj.matrix)

    o = np.array([0., 0., 0.])
    obj = apply_transform(o, T_world_obj)
    grasp = apply_transform(o, T_world_grasp)

    # base frame
    vis3d.points(o, color=(1, 0, 0), scale=0.005)
    vis3d.pose(RigidTransform(),
               alpha=0.03,
               tube_radius=0.002,
               center_scale=0.001)

    # object
    vis3d.mesh(mesh)
    vis3d.points(obj, color=(0, 0, 0), scale=0.005)
    vis3d.pose(T_world_obj, alpha=0.03, tube_radius=0.002, center_scale=0.001)

    # grasp
    vis3d.points(grasp, color=(0, 1, 1), scale=0.005)
    vis3d.pose(T_world_grasp,
               alpha=0.03,
               tube_radius=0.002,
               center_scale=0.001)

    vis3d.show()
Exemple #9
0
        T_tool_world, cartesian_impedances=[2000, 2000, 1000, 300, 300, 300])

    logging.info('Finding closest orthogonal grasp')
    T_grasp_world = get_closest_grasp_pose(T_tag_world, T_ready_world)
    T_lift = RigidTransform(translation=[0, 0, 0.2],
                            from_frame=T_ready_world.to_frame,
                            to_frame=T_ready_world.to_frame)
    T_lift_world = T_lift * T_grasp_world

    logging.info('Visualizing poses')
    _, depth_im, _ = sensor.frames()
    points_world = T_camera_world * intr.deproject(depth_im)

    if cfg['vis_detect']:
        vis3d.figure()
        vis3d.pose(RigidTransform())
        vis3d.points(subsample(points_world.data.T, 0.01),
                     color=(0, 1, 0),
                     scale=0.002)
        vis3d.pose(T_ready_world, length=0.05)
        vis3d.pose(T_camera_world, length=0.1)
        vis3d.pose(T_tag_world)
        vis3d.pose(T_grasp_world)
        vis3d.pose(T_lift_world)
        vis3d.show()

    #const_rotation=np.array([[1,0,0],[0,-1,0],[0,0,-1]])
    #test = RigidTransform(rotation=const_rotation,translation=T_tag_world.translation, from_frame='franka_tool', to_frame='world')
    #import pdb; pdb.set_trace()

    rotation = T_tag_world.rotation
Exemple #10
0
    output_filename = os.path.join(
        output_path, '{0}_to_world.tf'.format(T_world_obj.from_frame))
    print T_world_obj
    T_world_obj.save(output_filename)

    if config['vis'] and VIS_SUPPORTED:

        _, depth_im, _ = sensor.frames()
        pc_cam = ir_intrinsics.deproject(depth_im)
        pc_world = T_world_cam * pc_cam

        mesh_file = ObjFile(
            os.path.join(object_path, '{0}.obj'.format(args.object_name)))
        mesh = mesh_file.read()

        vis.figure(bgcolor=(0.7, 0.7, 0.7))
        vis.mesh(mesh, T_world_obj.as_frames('obj', 'world'), style='surface')
        vis.pose(T_world_obj, alpha=0.04, tube_radius=0.002, center_scale=0.01)
        vis.pose(RigidTransform(from_frame='origin'),
                 alpha=0.04,
                 tube_radius=0.002,
                 center_scale=0.01)
        vis.pose(T_world_cam, alpha=0.04, tube_radius=0.002, center_scale=0.01)
        vis.pose(T_world_cam * T_cb_cam.inverse(),
                 alpha=0.04,
                 tube_radius=0.002,
                 center_scale=0.01)
        vis.points(pc_world, subsample=20)
        vis.show()
    sensor.stop()
Exemple #11
0
                points_world = T_camera_world * ir_intrinsics.deproject(depth_im)
                true_robot_points_world = PointCloud(np.array([T.translation for T in robot_poses]).T,
                                                     frame=ir_intrinsics.frame)
                est_robot_points_world = T_camera_world * PointCloud(np.array(robot_points_camera).T,
                                                                     frame=ir_intrinsics.frame)
                mean_est_robot_point = np.mean(est_robot_points_world.data, axis=1).reshape(3,1)
                est_robot_points_world._data = est_robot_points_world._data - mean_est_robot_point + mean_true_robot_point
                fixed_robot_points_world = T_corrected_cb_world * est_robot_points_world
                mean_fixed_robot_point = np.mean(fixed_robot_points_world.data, axis=1).reshape(3,1)
                fixed_robot_points_world._data = fixed_robot_points_world._data - mean_fixed_robot_point + mean_true_robot_point
                vis3d.figure()
                vis3d.points(points_world, color=(0,1,0), subsample=10, random=True, scale=0.001)
                vis3d.points(true_robot_points_world, color=(0,0,1), scale=0.001)
                vis3d.points(fixed_robot_points_world, color=(1,1,0), scale=0.001)
                vis3d.points(est_robot_points_world, color=(1,0,0), scale=0.001)
                vis3d.pose(T_camera_world)
                vis3d.show()

        # save tranformation arrays based on setup
        output_dir = os.path.join(config['calib_dir'], sensor_frame)
        if not os.path.exists(output_dir):
            os.makedirs(output_dir)
        pose_filename = os.path.join(output_dir, '%s_to_world.tf' %(sensor_frame))
        T_camera_world.save(pose_filename)
        intr_filename = os.path.join(output_dir, '%s.intr' %(sensor_frame))
        ir_intrinsics.save(intr_filename)
        f = open(os.path.join(output_dir, 'corners_cb_%s.npy' %(sensor_frame)), 'w')
        np.save(f, reg_result.cb_points_cam.data)
                
        # move the robot to the chessboard center for verification
        if config['use_robot']:  
        from_frame='obj',
        to_frame='table')

    stable_pose = mesh.resting_pose(T_obj_table)
    #print stable_pose.r

    table_dim = 0.3
    T_obj_table_plot = mesh.get_T_surface_obj(T_obj_table)
    T_obj_table_plot.translation[0] += 0.1
    vis.figure()
    vis.mesh(mesh, T_obj_table_plot, color=(1, 0, 0), style='wireframe')
    vis.points(Point(mesh.center_of_mass, 'obj'),
               T_obj_table_plot,
               color=(1, 0, 1),
               scale=0.01)
    vis.pose(T_obj_table_plot, alpha=0.1)
    vis.mesh_stable_pose(mesh,
                         stable_pose,
                         dim=table_dim,
                         color=(0, 1, 0),
                         style='surface')
    vis.pose(stable_pose.T_obj_table, alpha=0.1)
    vis.show()
    exit(0)

    # compute stable poses
    vis.figure()
    vis.mesh(mesh, color=(1, 1, 0), style='surface')
    vis.mesh(mesh.convex_hull(), color=(1, 0, 0))

    stable_poses = mesh.stable_poses()
Exemple #13
0
    vertices = mesh.vertices
    triangles = mesh.triangles
    normals = mesh.normals

    print 'Num vertices:', len(vertices)
    print 'Num triangles:', len(triangles)
    print 'Num normals:', len(normals)

    # 1. Generate candidate pairs of contact points

    # 2. Check for force closure

    # 3. Convert each grasp to a hand pose
    contact1 = vertices[500]
    contact2 = vertices[2000]
    T_obj_gripper = contacts_to_baxter_hand_pose(contact1, contact2)
    print 'Translation', T_obj_gripper.translation
    print 'Rotation', T_obj_gripper.quaternion

    pose_msg = T_obj_gripper.pose_msg

    # 3d visualization to help debug
    vis.figure()
    vis.mesh(mesh)
    vis.points(Point(contact1, frame='test'))
    vis.points(Point(contact2, frame='test'))
    vis.pose(T_obj_gripper, alpha=0.05)
    vis.show()

    # 4. Execute on the actual robot
        vis.imshow(depth_im)
        vis.subplot(1, num_plot, 2)
        vis.imshow(segmask)
        vis.subplot(1, num_plot, 3)
        vis.imshow(obj_segmask)
        vis.show()

        from visualization import Visualizer3D as vis3d
        point_cloud = camera_intr.deproject(depth_im)
        vis3d.figure()
        vis3d.points(point_cloud,
                     subsample=3,
                     random=True,
                     color=(0, 0, 1),
                     scale=0.001)
        vis3d.pose(RigidTransform())
        vis3d.pose(T_camera_world.inverse())
        vis3d.show()

    # Create state.
    rgbd_im = RgbdImage.from_color_and_depth(color_im, depth_im)
    state = RgbdImageState(rgbd_im, camera_intr, segmask=segmask)

    # Init policy.
    policy_type = "cem"
    if "type" in policy_config:
        policy_type = policy_config["type"]
    if policy_type == "ranking":
        policy = RobustGraspingPolicy(policy_config)
    else:
        policy = CrossEntropyRobustGraspingPolicy(policy_config)
                vis3d.figure()
                vis3d.points(points_world,
                             color=(0, 1, 0),
                             subsample=10,
                             random=True,
                             scale=0.001)
                vis3d.points(true_robot_points_world,
                             color=(0, 0, 1),
                             scale=0.001)
                vis3d.points(fixed_robot_points_world,
                             color=(1, 1, 0),
                             scale=0.001)
                vis3d.points(est_robot_points_world,
                             color=(1, 0, 0),
                             scale=0.001)
                vis3d.pose(T_camera_world)
                vis3d.show()

        # save tranformation arrays based on setup
        output_dir = os.path.join(config['calib_dir'], sensor_frame)
        if not os.path.exists(output_dir):
            os.makedirs(output_dir)
        pose_filename = os.path.join(output_dir,
                                     '%s_to_world.tf' % (sensor_frame))
        T_camera_world.save(pose_filename)
        intr_filename = os.path.join(output_dir, '%s.intr' % (sensor_frame))
        ir_intrinsics.save(intr_filename)
        f = open(
            os.path.join(output_dir, 'corners_cb_%s.npy' % (sensor_frame)),
            'w')
        np.save(f, reg_result.cb_points_cam.data)
Exemple #16
0
        # camera pose
        cam_dist = 0.3
        T_camera_world = RigidTransform(rotation=np.array([[0, 1,
                                                            0], [1, 0, 0],
                                                           [0, 0, -1]]),
                                        translation=[0, 0, cam_dist],
                                        from_frame=camera_intr.frame,
                                        to_frame='world')

        T_obj_camera = T_camera_world.inverse() * T_obj_world

        # show mesh
        if False:
            vis3d.figure()
            vis3d.mesh(mesh, T_obj_camera)
            vis3d.pose(RigidTransform(), alpha=0.1)
            vis3d.pose(T_obj_camera, alpha=0.1)
            vis3d.show()

        # render depth image
        render_start = time.time()
        IPython.embed()
        renders = virtual_camera.wrapped_images(mesh, [T_obj_camera],
                                                RenderMode.RGBD_SCENE,
                                                mat_props=mat_props,
                                                light_props=light_props,
                                                debug=False)
        render_stop = time.time()
        logging.info('Render took %.3f sec' % (render_stop - render_start))

        vis.subplot(d, d, k + 1)