Ejemplo n.º 1
0
    def gripper(gripper, grasp, T_obj_world, color=(0.5, 0.5, 0.5), T_camera_world=None, point=None, point_color=None):
        """ Plots a robotic gripper in a pose specified by a particular grasp object.

        Parameters
        ----------
        gripper : :obj:`dexnet.grasping.RobotGripper`
            the gripper to plot
        grasp : :obj:`dexnet.grasping.Grasp`
            the grasp to plot the gripper performing
        T_obj_world : :obj:`autolab_core.RigidTransform`
            the pose of the object that the grasp is referencing in world frame
        color : 3-tuple
            color of the gripper mesh
        """
        T_gripper_obj = grasp.gripper_pose(gripper)
        T_gripper_world = T_obj_world * T_gripper_obj
        T_mesh_world = T_gripper_world * gripper.T_mesh_gripper.inverse()        
        T_mesh_world = T_mesh_world.as_frames('obj', 'world')
        Visualizer3D.mesh(gripper.mesh.trimesh, T_mesh_world=T_mesh_world, style='surface', color=color)
        Visualizer3D.pose(T_gripper_world)
        if T_camera_world is not None:
            Visualizer3D.pose(T_camera_world)
        if point is not None:
            world_points = np.dot(T_obj_world.matrix, point.T).T
            Visualizer3D.points(world_points[:, :3], scale=0.002, color=point_color)
Ejemplo n.º 2
0
    def showpoint(surface_point,
                  T_obj_world=RigidTransform(from_frame='obj',
                                             to_frame='world'),
                  color=(0.5, 0.5, 0),
                  scale=0.001):
        """ Plots a grasp as an axis and center.

        Parameters
        ----------
        grasp : :obj:`dexnet.grasping.Grasp`
            the grasp to plot
        T_obj_world : :obj:`autolab_core.RigidTransform`
            the pose of the object that the grasp is referencing in world frame
        tube_radius : float
            radius of the plotted grasp axis
        endpoint_color : 3-tuple
            color of the endpoints of the grasp axis
        endpoint_scale : 3-tuple
            scale of the plotted endpoints
        grasp_axis_color : 3-tuple
            color of the grasp axis
        """

        surface_point = Point(surface_point, 'obj')
        surface_point_tf = T_obj_world.apply(surface_point)
        #center_tf = T_obj_world.apply(center)

        Visualizer3D.points(surface_point_tf.data, color=color, scale=scale)
Ejemplo n.º 3
0
def main(args):
    # set logging
    logging.getLogger().setLevel(logging.INFO)
    rospy.init_node("ensenso_reader", anonymous=True)

    num_frames = 10
    sensor = RgbdSensorFactory("ensenso", cfg={"frame": "ensenso"})
    sensor.start()

    total_time = 0
    for i in range(num_frames):
        if i > 0:
            start_time = time.time()

        _, depth_im, _ = sensor.frames()

        if i > 0:
            total_time += time.time() - start_time
            print("Frame %d" % (i))
            print("Avg FPS: %.5f" % (float(i) / total_time))

    depth_im = sensor.median_depth_img(num_img=5)
    point_cloud = sensor.ir_intrinsics.deproject(depth_im)
    point_cloud.remove_zero_points()

    sensor.stop()

    vis2d.figure()
    vis2d.imshow(depth_im)
    vis2d.title("Ensenso - Raw")
    vis2d.show()

    vis3d.figure()
    vis3d.points(point_cloud, random=True, subsample=10, scale=0.0025)
    vis3d.show()
Ejemplo n.º 4
0
def grid_search(pc, indices, model, shadow, img_file):
    length, width, height = shadow.extents
    split_size = max(length, width)
    pc_data, ind = get_pc_data(pc)
    maxes = np.max(pc_data, axis=0)
    mins = np.min(pc_data, axis=0)
    bin_base = mins[2]
    plane_normal = model[0:3]

    scores = np.zeros((int(np.round((maxes[0] - mins[0]) / split_size)),
                       int(np.round((maxes[1] - mins[1]) / split_size))))
    for i in range(int(np.round((maxes[0] - mins[0]) / split_size))):
        x = mins[0] + i * split_size
        for j in range(int(np.round((maxes[1] - mins[1]) / split_size))):
            y = mins[1] + j * split_size
            for sh in rotations(shadow, 8):
                scores[i][j] = do_stuff(pc, indices, model, sh, img_file)

    print("\nScores: \n" + str(scores))
    best = best_cell(scores)
    print("\nBest Cell: " + str(best) + ", with score = " +
          str(scores[best[0]][best[1]]))
    #-------
    # Visualize best placement
    vis3d.figure()
    x = mins[0] + best[0] * split_size
    y = mins[1] + best[1] * split_size
    cell_indices = np.where((x < pc_data[:, 0])
                            & (pc_data[:, 0] < x + split_size)
                            & (y < pc_data[:, 1])
                            & (pc_data[:, 1] < y + split_size))[0]
    points = pc_data[cell_indices]
    rest = pc_data[np.setdiff1d(np.arange(len(pc_data)), cell_indices)]
    vis3d.points(points, color=(0, 1, 1))
Ejemplo n.º 5
0
def show_points(points, color=(0,1,0), scale=0.005, frame_size=0.2, frame_radius=0.02):
    vis.figure(bgcolor=(1,1,1), size=(500,500))
    vis.points(np.array(points), color=color, scale=scale)

    vis.plot3d(np.array(([0, 0, 0], [frame_size, 0, 0])).astype(np.float32), color=(1,0,0), tube_radius=frame_radius)
    vis.plot3d(np.array(([0, 0, 0], [0, frame_size, 0])).astype(np.float32), color=(0,1,0), tube_radius=frame_radius)
    vis.plot3d(np.array(([0, 0, 0], [0, 0, frame_size])).astype(np.float32), color=(0,0,1), tube_radius=frame_radius)

    vis.show()
Ejemplo n.º 6
0
def fine_grid_search(pc, indices, model, shadow, splits):
    length, width, height = shadow.extents
    split_size = max(length, width)
    pc_data, ind = get_pc_data(pc, indices)
    maxes = np.max(pc_data, axis=0)
    mins = np.min(pc_data, axis=0)
    bin_base = mins[2]
    plane_normal = model[0:3]
    #splits = 3
    step_size = split_size / splits
    
    plane_data = get_plane_data(pc, indices)
    plane_pc = PointCloud(plane_data.T, pc.frame)
    plane_pc = cp.inverse().apply(plane_pc)
    di = ci.project_to_image(plane_pc)
    bi = di.to_binary()
    bi = bi.inverse()

    scene = Scene()
    camera = VirtualCamera(ci, cp)
    scene.camera = camera
    shadow_obj = SceneObject(shadow)
    scene.add_object('shadow', shadow_obj)
    orig_tow = shadow_obj.T_obj_world

    numx = (int(np.round((maxes[0]-mins[0])/split_size)) - 1) * splits + 1
    numy = (int(np.round((maxes[1]-mins[1])/split_size)) - 1) * splits + 1
    scores = np.zeros((numx, numy))
    for i in range(numx):
        x = mins[0] + i*step_size
        for j in range(numy):
            y = mins[1] + j*step_size

            for tow in transforms(pc, pc_data, shadow, x, y, x+split_size, y+split_size, 8, orig_tow):
                shadow_obj.T_obj_world = tow
                scores[i][j] = under_shadow(scene, bi)
                shadow_obj.T_obj_world = orig_tow

    print("\nScores: \n" + str(scores))
    best = best_cell(scores)
    print("\nBest Cell: " + str(best) + ", with score = " + str(scores[best[0]][best[1]]))
    #-------
    # Visualize best placement
    vis3d.figure()
    x = mins[0] + best[0]*step_size
    y = mins[1] + best[1]*step_size
    cell_indices = np.where((x < pc_data[:,0]) & (pc_data[:,0] < x+split_size) & (y < pc_data[:,1]) & (pc_data[:,1] < y+split_size))[0]
    points = pc_data[cell_indices]
    rest = pc_data[np.setdiff1d(np.arange(len(pc_data)), cell_indices)]
    vis3d.points(points, color=(0,1,1))
    vis3d.points(rest, color=(1,0,1))
    vis3d.show()
    #--------
    return best, scene
Ejemplo n.º 7
0
def visualize_scene(mesh, T_world_ar, T_ar_cam, T_cam_obj):
    T_cam_cam = RigidTransform()
    T_obj_cam = T_cam_obj.inverse()
    T_cam_ar = T_ar_cam.inverse()

    Twc = np.matmul(T_world_ar.matrix, T_ar_cam.matrix)
    T_world_cam = RigidTransform(Twc[:3, :3], Twc[:3, 3])
    T_cam_world = T_world_cam.inverse()

    o = np.array([0., 0., 0.])
    centroid_cam = apply_transform(o, T_cam_obj)
    tag_cam = apply_transform(o, T_cam_ar)
    robot_cam = apply_transform(o, T_cam_world)

    # camera
    vis3d.points(o, color=(0, 1, 0), scale=0.01)
    vis3d.pose(T_cam_cam, alpha=0.05, tube_radius=0.005, center_scale=0.002)

    # object
    vis3d.mesh(mesh)
    vis3d.points(centroid_cam, color=(0, 0, 0), scale=0.01)
    vis3d.pose(T_cam_obj, alpha=0.05, tube_radius=0.005, center_scale=0.002)

    # AR tag
    vis3d.points(tag_cam, color=(1, 0, 1), scale=0.01)
    vis3d.pose(T_cam_ar, alpha=0.05, tube_radius=0.005, center_scale=0.002)
    vis3d.table(T_cam_ar, dim=0.074)

    # robot
    vis3d.points(robot_cam, color=(1, 0, 0), scale=0.01)
    vis3d.pose(T_cam_world, alpha=0.05, tube_radius=0.005, center_scale=0.002)

    vis3d.show()
Ejemplo n.º 8
0
def visualize_normals(mesh, vertices, normals):
    scale = 0.01
    normals_scaled = normals * scale

    vis3d.pose(RigidTransform(),
               alpha=0.01,
               tube_radius=0.001,
               center_scale=0.002)
    vis3d.mesh(mesh, style='wireframe')
    vis3d.points(mesh.centroid, color=(0, 0, 0), scale=0.003)
    vis3d.points(vertices, color=(1, 0, 0), scale=0.001)

    for v, n in zip(vertices, normals_scaled):
        vis3d.plot3d([v, v + n], color=(1, 0, 0), tube_radius=0.0005)

    vis3d.show()
Ejemplo n.º 9
0
def main():
    logging.getLogger().setLevel(logging.INFO)

    # parse args
    parser = argparse.ArgumentParser(description='Register a webcam to the Photoneo PhoXi')
    parser.add_argument('--config_filename', type=str, default='cfg/tools/colorize_phoxi.yaml', help='filename of a YAML configuration for registration')
    args = parser.parse_args()
    config_filename = args.config_filename
    config = YamlConfig(config_filename)

    sensor_data = config['sensors']
    phoxi_config = sensor_data['phoxi']
    phoxi_config['frame'] = 'phoxi'

    # Initialize ROS node
    rospy.init_node('colorize_phoxi', anonymous=True)
    logging.getLogger().addHandler(rl.RosStreamHandler())

    # Get PhoXi sensor set up
    phoxi = RgbdSensorFactory.sensor(phoxi_config['type'], phoxi_config)
    phoxi.start()

    # Capture PhoXi and webcam images
    phoxi_color_im, phoxi_depth_im, _ = phoxi.frames()

    # vis2d.figure()
    # vis2d.subplot(121)
    # vis2d.imshow(phoxi_color_im)
    # vis2d.subplot(122)
    # vis2d.imshow(phoxi_depth_im)
    # vis2d.show()

    phoxi_pc = phoxi.ir_intrinsics.deproject(phoxi_depth_im)
    colors = phoxi_color_im.data.reshape((phoxi_color_im.shape[0] * phoxi_color_im.shape[1], phoxi_color_im.shape[2])) / 255.0
    vis3d.figure()
    vis3d.points(phoxi_pc.data.T[::3], color=colors[::3], scale=0.001)
    vis3d.show()

    # Export to PLY file
    vertices = phoxi.ir_intrinsics.deproject(phoxi_depth_im).data.T
    colors = phoxi_color_im.data.reshape(phoxi_color_im.data.shape[0] * phoxi_color_im.data.shape[1], phoxi_color_im.data.shape[2])
    f = open('pcloud.ply', 'w')
    f.write('ply\nformat ascii 1.0\nelement vertex {}\nproperty float x\nproperty float y\nproperty float z\nproperty uchar red\n'.format(len(vertices)) +
            'property uchar green\nproperty uchar blue\nend_header\n')
    for v, c in zip(vertices,colors):
        f.write('{} {} {} {} {} {}\n'.format(v[0], v[1], v[2], c[0], c[1], c[2]))
    f.close()
Ejemplo n.º 10
0
    def grasp(grasp,
              T_obj_world=RigidTransform(from_frame='obj', to_frame='world'),
              tube_radius=0.0002,
              endpoint_color=(0, 1, 0),
              endpoint_scale=0.0005,
              grasp_axis_color=(0, 1, 0)):
        """ Plots a grasp as an axis and center.

        Parameters
        ----------
        grasp : :obj:`dexnet.grasping.Grasp`
            the grasp to plot
        T_obj_world : :obj:`autolab_core.RigidTransform`
            the pose of the object that the grasp is referencing in world frame
        tube_radius : float
            radius of the plotted grasp axis
        endpoint_color : 3-tuple
            color of the endpoints of the grasp axis
        endpoint_scale : 3-tuple
            scale of the plotted endpoints
        grasp_axis_color : 3-tuple
            color of the grasp axis
        """
        g1, g2 = grasp.endpoints
        center = grasp.center
        g1 = Point(g1, 'obj')
        g2 = Point(g2, 'obj')
        center = Point(center, 'obj')

        g1_tf = T_obj_world.apply(g1)
        g2_tf = T_obj_world.apply(g2)
        center_tf = T_obj_world.apply(center)
        grasp_axis_tf = np.array([g1_tf.data, g2_tf.data])

        Visualizer3D.points(g1_tf.data,
                            color=endpoint_color,
                            scale=endpoint_scale)
        Visualizer3D.points(g2_tf.data,
                            color=endpoint_color,
                            scale=endpoint_scale)
        Visualizer3D.plot3d(grasp_axis_tf,
                            color=grasp_axis_color,
                            tube_radius=tube_radius)
        Visualizer3D.pose(grasp.T_grasp_obj,
                          alpha=endpoint_scale * 10,
                          tube_radius=tube_radius,
                          center_scale=endpoint_scale)
Ejemplo n.º 11
0
def grid_search(pc, indices, model, shadow, img_file):
    length, width, height = shadow.extents
    split_size = max(length, width)
    pc_data, ind = get_pc_data(pc, indices)
    maxes = np.max(pc_data, axis=0)
    mins = np.min(pc_data, axis=0)
    bin_base = mins[2]
    plane_normal = model[0:3]

    scores = np.zeros((int(np.round((maxes[0] - mins[0]) / split_size)),
                       int(np.round((maxes[1] - mins[1]) / split_size))))
    for i in range(int(np.round((maxes[0] - mins[0]) / split_size))):
        x = mins[0] + i * split_size
        for j in range(int(np.round((maxes[1] - mins[1]) / split_size))):
            y = mins[1] + j * split_size

            #binarized_overlap_image(pc, x, y, x+split_size, y+split_size, shadow, plane_normal, indices, model)

            for sh in rotations(shadow, 8):
                #overlap_size = binarized_overlap_image(pc, x, y, x+split_size, y+split_size, sh, plane_normal, indices, model)
                #scores[i][j] = -1*overlap_size
                scene = Scene()
                camera = VirtualCamera(ci, cp)
                scene.camera = camera
                scores[i][j] = under_shadow(pc, pc_data, indices, model, sh, x,
                                            x + split_size, y, y + split_size,
                                            scene)

    print("\nScores: \n" + str(scores))
    best = best_cell(scores)
    print("\nBest Cell: " + str(best) + ", with score = " +
          str(scores[best[0]][best[1]]))
    #-------
    # Visualize best placement
    vis3d.figure()
    x = mins[0] + best[0] * split_size
    y = mins[1] + best[1] * split_size
    cell_indices = np.where((x < pc_data[:, 0])
                            & (pc_data[:, 0] < x + split_size)
                            & (y < pc_data[:, 1])
                            & (pc_data[:, 1] < y + split_size))[0]
    points = pc_data[cell_indices]
    rest = pc_data[np.setdiff1d(np.arange(len(pc_data)), cell_indices)]
    vis3d.points(points, color=(0, 1, 1))
    vis3d.points(rest, color=(1, 0, 1))
    vis3d.show()
Ejemplo n.º 12
0
    def vis(self, mesh, grasp_vertices, grasp_qualities, grasp_normals):
        """
        Pass in any grasp and its associated grasp quality.  this function will plot
        each grasp on the object and plot the grasps as a bar between the points, with
        colored dots on the line endpoints representing the grasp quality associated
        with each grasp

        Parameters
        ----------
        mesh : :obj:`Trimesh`
        grasp_vertices : mx2x3 :obj:`numpy.ndarray`
            m grasps.  Each grasp containts two contact points.  Each contact point
            is a 3 dimensional vector, hence the shape mx2x3
        grasp_qualities : mx' :obj:`numpy.ndarray`
            vector of grasp qualities for each grasp
        """
        vis3d.mesh(mesh)

        middle_of_part = np.mean(np.mean(grasp_vertices, axis=1), axis=0)
        print(middle_of_part)
        vis3d.points(middle_of_part, scale=0.003)

        dirs = normalize(grasp_vertices[:, 0] - grasp_vertices[:, 1], axis=1)

        midpoints = (grasp_vertices[:, 0] + grasp_vertices[:, 1]) / 2
        grasp_endpoints = np.zeros(grasp_vertices.shape)
        grasp_endpoints[:, 0] = midpoints + dirs * MAX_HAND_DISTANCE / 2
        grasp_endpoints[:, 1] = midpoints - dirs * MAX_HAND_DISTANCE / 2

        n0 = np.zeros(grasp_endpoints.shape)
        n1 = np.zeros(grasp_endpoints.shape)

        normal_scale = 0.01
        n0[:, 0] = grasp_vertices[:, 0]
        n0[:, 1] = grasp_vertices[:, 0] + normal_scale * grasp_normals[:, 0]
        n1[:, 0] = grasp_vertices[:, 1]
        n1[:, 1] = grasp_vertices[:, 1] + normal_scale * grasp_normals[:, 1]

        for grasp, quality, normal0, normal1 in zip(grasp_endpoints,
                                                    grasp_qualities, n0, n1):
            color = [min(1, 2 * (1 - quality)), min(1, 2 * quality), 0, 1]
            vis3d.plot3d(grasp, color=color, tube_radius=.001)
            vis3d.plot3d(normal0, color=(0, 0, 0), tube_radius=.002)
            vis3d.plot3d(normal1, color=(0, 0, 0), tube_radius=.002)
        vis3d.show()
Ejemplo n.º 13
0
def visualize_grasps(mesh, vertices, metrics):
    vis3d.pose(RigidTransform(),
               alpha=0.01,
               tube_radius=0.001,
               center_scale=0.002)
    vis3d.mesh(mesh, style='wireframe')
    vis3d.points(mesh.centroid, color=(0, 0, 0), scale=0.003)

    min_score = float(np.min(metrics))
    max_score = float(np.max(metrics))
    metrics_normalized = (metrics.astype(float) - min_score) / (max_score -
                                                                min_score)

    for v, m in zip(vertices, metrics_normalized):
        vis3d.points(v, color=(1 - m, m, 0), scale=0.001)
        vis3d.plot3d(v, color=(1 - m, m, 0), tube_radius=0.0003)

    vis3d.show()
Ejemplo n.º 14
0
def visualize_test():
    I = RigidTransform()
    g_ab = RigidTransform()
    g_ab.translation = np.array([0.05, 0, 0])
    g_ab.rotation = np.array([[0, -1, 0], [1, 0, 0], [0, 0, 1]])

    q_a = np.array([0., 0., 0.])

    p_b = np.array([0., 0., 0.])
    p_a = apply_transform(p_b, g_ab)

    print('g_ab = \n{}'.format(g_ab.matrix))

    vis3d.pose(I, alpha=0.01, tube_radius=0.001, center_scale=0.001)
    vis3d.points(q_a, color=(1, 0, 0), scale=0.005)

    vis3d.pose(g_ab, alpha=0.01, tube_radius=0.001, center_scale=0.001)
    vis3d.points(p_a, color=(0, 1, 0), scale=0.005)
    vis3d.show()
Ejemplo n.º 15
0
def main():
    start_time = time.time()
    img_file = '/nfs/diskstation/projects/dex-net/placing/datasets/real/sample_ims_05_22/depth_ims_numpy/image_000001.npy'
    ci_file = '/nfs/diskstation/projects/dex-net/placing/datasets/real/sample_ims_05_22/camera_intrinsics.intr'
    mesh_file = 'demon_helmet.obj'

    indices, model, image, pc = largest_planar_surface(img_file, ci_file)
    mesh, best_pose, rt = find_stable_poses(mesh_file)
    shadow = find_shadow(mesh, best_pose, model[0:3])

    vis3d.figure()
    vis3d.points(pc, color=(1, 0, 0))
    vis3d.mesh(shadow, rt)
    vis3d.show()

    scores, split_size = score_cells(pc, indices, model, shadow, ci_file)
    ind = best_cell(scores)
    # print("Scores: \n" + str(scores))
    # print("\nBest cell = " + str(ind))

    print("--- %s seconds ---" % (time.time() - start_time))
Ejemplo n.º 16
0
def visualize_plan(mesh, T_world_obj, T_world_grasp):
    # visualize the plan in the world frame
    mesh.apply_transform(T_world_obj.matrix)

    o = np.array([0., 0., 0.])
    obj = apply_transform(o, T_world_obj)
    grasp = apply_transform(o, T_world_grasp)

    # base frame
    vis3d.points(o, color=(1, 0, 0), scale=0.005)
    vis3d.pose(RigidTransform(),
               alpha=0.03,
               tube_radius=0.002,
               center_scale=0.001)

    # object
    vis3d.mesh(mesh)
    vis3d.points(obj, color=(0, 0, 0), scale=0.005)
    vis3d.pose(T_world_obj, alpha=0.03, tube_radius=0.002, center_scale=0.001)

    # grasp
    vis3d.points(grasp, color=(0, 1, 1), scale=0.005)
    vis3d.pose(T_world_grasp,
               alpha=0.03,
               tube_radius=0.002,
               center_scale=0.001)

    vis3d.show()
Ejemplo n.º 17
0
def main(args):
    # set logging
    logging.getLogger().setLevel(logging.INFO)
    rospy.init_node('ensenso_reader', anonymous=True)

    num_frames = 10
    #sensor = PhoXiSensor(frame='phoxi',
    #                     size='small')
    sensor = EnsensoSensor(frame='ensenso')
    sensor.start()

    total_time = 0
    for i in range(num_frames):
        if i > 0:
            start_time = time.time()

        _, depth_im, _ = sensor.frames()

        if i > 0:
            total_time += time.time() - start_time
            print('Frame %d' % (i))
            print('Avg FPS: %.5f' % (float(i) / total_time))

    depth_im = sensor.median_depth_img(num_img=5)
    point_cloud = sensor.ir_intrinsics.deproject(depth_im)
    point_cloud.remove_zero_points()

    sensor.stop()

    vis2d.figure()
    vis2d.imshow(depth_im)
    vis2d.title('PhoXi - Raw')
    vis2d.show()

    vis3d.figure()
    vis3d.points(point_cloud, random=True, subsample=10, scale=0.0025)
    vis3d.show()
Ejemplo n.º 18
0
def visualize_gripper(mesh, T_obj_grasp, vertices):
    o = np.array([0., 0., 0.])
    grasp = apply_transform(o, T_obj_grasp)

    # object
    vis3d.mesh(mesh)
    vis3d.points(o, color=(0, 0, 0), scale=0.002)
    vis3d.pose(RigidTransform(),
               alpha=0.01,
               tube_radius=0.001,
               center_scale=0.001)

    # gripper
    vis3d.points(grasp, color=(1, 0, 0), scale=0.002)
    vis3d.points(vertices, color=(1, 0, 0), scale=0.002)
    vis3d.pose(T_obj_grasp, alpha=0.01, tube_radius=0.001, center_scale=0.001)

    vis3d.show()
Ejemplo n.º 19
0
    def vis(self, mesh, grasp_vertices, grasp_qualities, top_n_grasp_vertices,
            approach_directions, rs, TG):
        """
        Pass in any grasp and its associated grasp quality.  this function will plot
        each grasp on the object and plot the grasps as a bar between the points, with
        colored dots on the line endpoints representing the grasp quality associated
        with each grasp

        Parameters
        ----------
        mesh : :obj:`Trimesh`
        grasp_vertices : mx2x3 :obj:`numpy.ndarray`
            m grasps.  Each grasp containts two contact points.  Each contact point
            is a 3 dimensional vector, hence the shape mx2x3
        grasp_qualities : mx' :obj:`numpy.ndarray`
            vector of grasp qualities for each grasp
        """
        vis3d.mesh(mesh)

        dirs = normalize(grasp_vertices[:, 0] - grasp_vertices[:, 1], axis=1)
        midpoints = (grasp_vertices[:, 0] + grasp_vertices[:, 1]) / 2
        grasp_endpoints = np.zeros(grasp_vertices.shape)
        grasp_vertices[:, 0] = midpoints + dirs * MAX_HAND_DISTANCE / 2
        grasp_vertices[:, 1] = midpoints - dirs * MAX_HAND_DISTANCE / 2

        for i, (grasp,
                quality) in enumerate(zip(grasp_vertices, grasp_qualities)):
            color = [min(1, 2 * (1 - quality)), min(1, 2 * quality), 0, 1]
            vis3d.plot3d(grasp, color=color, tube_radius=.001)

        blue = [0, 0, 255]
        light_blue = [50, 50, 200]

        for i, (grasp, approach_direction) in enumerate(
                zip(top_n_grasp_vertices, approach_directions)):
            midpoint = np.mean(grasp, axis=0)
            approach_direction = np.asarray(
                [midpoint, midpoint - 0.1 * approach_direction])
            if (i == 0):
                vis3d.plot3d(approach_direction, color=blue, tube_radius=.005)
                vis3d.points(grasp, color=blue, scale=.005)
            else:
                vis3d.plot3d(approach_direction,
                             color=light_blue,
                             tube_radius=.001)
                vis3d.points(grasp, color=light_blue, scale=.001)

        midpoint = np.mean(top_n_grasp_vertices[0], axis=0)
        x_purp = [204, 0, 204]
        x_axis = np.asarray([midpoint, midpoint + 0.1 * rs[:, 0]])
        y_cyan = [0, 204, 204]
        y_axis = np.asarray([midpoint, midpoint + 0.1 * rs[:, 1]])
        z_black = [0, 0, 0]
        z_axis = np.asarray([midpoint, midpoint + 0.3 * rs[:, 2]])

        vis3d.plot3d(x_axis, color=x_purp, tube_radius=.005)
        vis3d.plot3d(y_axis, color=y_cyan, tube_radius=.005)
        vis3d.plot3d(z_axis, color=z_black, tube_radius=.005)

        origin = np.asarray([0, 0, 0])
        x_axis = np.asarray([origin, 0.2 * np.array([1, 0, 0])])
        y_axis = np.asarray([origin, 0.2 * np.array([0, 1, 0])])
        z_axis = np.asarray([origin, 0.2 * np.array([0, 0, 1])])

        vis3d.plot3d(x_axis, color=x_purp, tube_radius=.005)
        vis3d.plot3d(y_axis, color=y_cyan, tube_radius=.005)
        vis3d.plot3d(z_axis, color=z_black, tube_radius=.005)

        # eucl_orien = np.asarray(tfs.euler_from_quaternion(TG.quaternion))
        intermediate_pos = TG.position - np.reshape(
            np.matmul(TG.rotation, np.array([[0], [0], [0.2]])), (1, 3))
        print(intermediate_pos, np.shape(intermediate_pos))
        red = [255, 0, 0]
        vis3d.points(intermediate_pos, color=red, scale=.01)
        vis3d.points(TG.position, color=red, scale=.01)

        vis3d.show()
Ejemplo n.º 20
0
    output_filename = os.path.join(
        output_path, '{0}_to_world.tf'.format(T_world_obj.from_frame))
    print T_world_obj
    T_world_obj.save(output_filename)

    if config['vis'] and VIS_SUPPORTED:

        _, depth_im, _ = sensor.frames()
        pc_cam = ir_intrinsics.deproject(depth_im)
        pc_world = T_world_cam * pc_cam

        mesh_file = ObjFile(
            os.path.join(object_path, '{0}.obj'.format(args.object_name)))
        mesh = mesh_file.read()

        vis.figure(bgcolor=(0.7, 0.7, 0.7))
        vis.mesh(mesh, T_world_obj.as_frames('obj', 'world'), style='surface')
        vis.pose(T_world_obj, alpha=0.04, tube_radius=0.002, center_scale=0.01)
        vis.pose(RigidTransform(from_frame='origin'),
                 alpha=0.04,
                 tube_radius=0.002,
                 center_scale=0.01)
        vis.pose(T_world_cam, alpha=0.04, tube_radius=0.002, center_scale=0.01)
        vis.pose(T_world_cam * T_cb_cam.inverse(),
                 alpha=0.04,
                 tube_radius=0.002,
                 center_scale=0.01)
        vis.points(pc_world, subsample=20)
        vis.show()
    sensor.stop()
Ejemplo n.º 21
0
    logging.info('Finding closest orthogonal grasp')
    T_grasp_world = get_closest_grasp_pose(T_tag_world, T_ready_world)
    T_lift = RigidTransform(translation=[0, 0, 0.2],
                            from_frame=T_ready_world.to_frame,
                            to_frame=T_ready_world.to_frame)
    T_lift_world = T_lift * T_grasp_world

    logging.info('Visualizing poses')
    _, depth_im, _ = sensor.frames()
    points_world = T_camera_world * intr.deproject(depth_im)

    if cfg['vis_detect']:
        vis3d.figure()
        vis3d.pose(RigidTransform())
        vis3d.points(subsample(points_world.data.T, 0.01),
                     color=(0, 1, 0),
                     scale=0.002)
        vis3d.pose(T_ready_world, length=0.05)
        vis3d.pose(T_camera_world, length=0.1)
        vis3d.pose(T_tag_world)
        vis3d.pose(T_grasp_world)
        vis3d.pose(T_lift_world)
        vis3d.show()

    #const_rotation=np.array([[1,0,0],[0,-1,0],[0,0,-1]])
    #test = RigidTransform(rotation=const_rotation,translation=T_tag_world.translation, from_frame='franka_tool', to_frame='world')
    #import pdb; pdb.set_trace()

    rotation = T_tag_world.rotation
    rotation[0:2, :] = -1 * rotation[0:2, :]
Ejemplo n.º 22
0
    def compute_approach_direction(self, mesh, grasp_vertices, grasp_quality,
                                   grasp_normals):

        ## initalizing stuff ##
        visualize = True
        nb_directions_to_test = 6
        normal_scale = 0.01
        plane_normal = normalize(grasp_vertices[0] - grasp_vertices[1])

        midpoint = (grasp_vertices[0] + grasp_vertices[1]) / 2

        ## generating a certain number of approach directions ##
        theta = np.pi / nb_directions_to_test
        rot_mat = rotation_3d(-plane_normal, theta)

        horizontal_direction = normalize(
            np.cross(plane_normal, np.array([0, 0, 1])))
        directions_to_test = [horizontal_direction]  #these are vectors
        approach_directions = [
            np.array(
                [midpoint, midpoint + horizontal_direction * normal_scale])
        ]  #these are two points for visualization

        for i in range(nb_directions_to_test - 1):
            directions_to_test.append(
                normalize(np.matmul(rot_mat, directions_to_test[-1])))
            approach_directions.append(
                np.array([
                    midpoint, midpoint + directions_to_test[-1] * normal_scale
                ]))

        ## computing the palm position for each approach direction ##
        palm_positions = []
        for i in range(nb_directions_to_test):
            palm_positions.append(midpoint +
                                  finger_length * directions_to_test[i])

        if visualize:
            ## plotting the whole mesh ##
            vis3d.mesh(mesh, style='wireframe')

            ## computing and plotting midpoint and gripper position ##
            dirs = (grasp_vertices[0] - grasp_vertices[1]
                    ) / np.linalg.norm(grasp_vertices[0] - grasp_vertices[1])
            grasp_endpoints = np.zeros(grasp_vertices.shape)
            grasp_endpoints[0] = midpoint + dirs * MAX_HAND_DISTANCE / 2
            grasp_endpoints[1] = midpoint - dirs * MAX_HAND_DISTANCE / 2

            color = [
                min(1, 2 * (1 - grasp_quality)),
                min(1, 2 * grasp_quality), 0, 1
            ]
            vis3d.plot3d(grasp_endpoints, color=color, tube_radius=.001)
            vis3d.points(midpoint, scale=0.003)

            ## computing and plotting normals at contact points ##
            n0 = np.zeros(grasp_endpoints.shape)
            n1 = np.zeros(grasp_endpoints.shape)
            n0[0] = grasp_vertices[0]
            n0[1] = grasp_vertices[0] + normal_scale * grasp_normals[0]
            n1[0] = grasp_vertices[1]
            n1[1] = grasp_vertices[1] + normal_scale * grasp_normals[1]
            vis3d.plot3d(n0, color=(0, 0, 0), tube_radius=.002)
            vis3d.plot3d(n1, color=(0, 0, 0), tube_radius=.002)

            ## plotting normals the palm positions for each potential approach direction ##
            for i in range(nb_directions_to_test):
                vis3d.points(palm_positions[i], scale=0.003)

            vis3d.show()

        directions_to_test = [
            directions_to_test[3], directions_to_test[2],
            directions_to_test[4], directions_to_test[1],
            directions_to_test[5], directions_to_test[0]
        ]
        palm_positions = [
            palm_positions[3], palm_positions[2], palm_positions[4],
            palm_positions[1], palm_positions[5], palm_positions[0]
        ]

        ## checking if some approach direction is valid ##
        for i in range(nb_directions_to_test):
            if len(
                    trimesh.intersections.mesh_plane(mesh,
                                                     directions_to_test[i],
                                                     palm_positions[i])) == 0:
                # it means the palm won't bump with part
                return directions_to_test[i]

        # it means all approach directions will bump with part
        return -1
KSIZE = 9

if __name__ == '__main__':
    depth_im_filename = sys.argv[1]
    camera_intr_filename = sys.argv[2]

    camera_intr = CameraIntrinsics.load(camera_intr_filename)
    depth_im = DepthImage.open(depth_im_filename, frame=camera_intr.frame)

    depth_im = depth_im.inpaint()

    point_cloud_im = camera_intr.deproject_to_image(depth_im)
    normal_cloud_im = point_cloud_im.normal_cloud_im(ksize=KSIZE)

    vis3d.figure()
    vis3d.points(point_cloud_im.to_point_cloud(), scale=0.0025)

    alpha = 0.025
    subsample = 20
    for i in range(0, point_cloud_im.height, subsample):
        for j in range(0, point_cloud_im.width, subsample):
            p = point_cloud_im[i, j]
            n = normal_cloud_im[i, j]
            n2 = normal_cloud_im_s[i, j]
            if np.linalg.norm(n) > 0:
                points = np.array([p, p + alpha * n])
                vis3d.plot3d(points, tube_radius=0.001, color=(1, 0, 0))

                points = np.array([p, p + alpha * n2])
                vis3d.plot3d(points, tube_radius=0.001, color=(1, 0, 1))
Ejemplo n.º 24
0
            
            # vis
            if config['vis_points']:
                _, depth_im, _ = sensor.frames()
                points_world = T_camera_world * ir_intrinsics.deproject(depth_im)
                true_robot_points_world = PointCloud(np.array([T.translation for T in robot_poses]).T,
                                                     frame=ir_intrinsics.frame)
                est_robot_points_world = T_camera_world * PointCloud(np.array(robot_points_camera).T,
                                                                     frame=ir_intrinsics.frame)
                mean_est_robot_point = np.mean(est_robot_points_world.data, axis=1).reshape(3,1)
                est_robot_points_world._data = est_robot_points_world._data - mean_est_robot_point + mean_true_robot_point
                fixed_robot_points_world = T_corrected_cb_world * est_robot_points_world
                mean_fixed_robot_point = np.mean(fixed_robot_points_world.data, axis=1).reshape(3,1)
                fixed_robot_points_world._data = fixed_robot_points_world._data - mean_fixed_robot_point + mean_true_robot_point
                vis3d.figure()
                vis3d.points(points_world, color=(0,1,0), subsample=10, random=True, scale=0.001)
                vis3d.points(true_robot_points_world, color=(0,0,1), scale=0.001)
                vis3d.points(fixed_robot_points_world, color=(1,1,0), scale=0.001)
                vis3d.points(est_robot_points_world, color=(1,0,0), scale=0.001)
                vis3d.pose(T_camera_world)
                vis3d.show()

        # save tranformation arrays based on setup
        output_dir = os.path.join(config['calib_dir'], sensor_frame)
        if not os.path.exists(output_dir):
            os.makedirs(output_dir)
        pose_filename = os.path.join(output_dir, '%s_to_world.tf' %(sensor_frame))
        T_camera_world.save(pose_filename)
        intr_filename = os.path.join(output_dir, '%s.intr' %(sensor_frame))
        ir_intrinsics.save(intr_filename)
        f = open(os.path.join(output_dir, 'corners_cb_%s.npy' %(sensor_frame)), 'w')
Ejemplo n.º 25
0
    # filter high
    high_indices = np.where(point_cloud_world.data[2, :] > max_height)[0]
    point_cloud_filtered.data[2, high_indices] = max_height

    # re-project and update depth im
    #depth_im_filtered = camera_intr.project_to_image(T_camera_world.inverse() * point_cloud_filtered)
    logging.info('Clipping took %.3f sec' % (time.time() - clip_start))

    # vis
    focal_point = np.mean(point_cloud_filtered.data, axis=1)
    if vis_clipping:
        vis3d.figure(camera_pose=T_camera_world.as_frames('camera', 'world'),
                     focal_point=focal_point)
        vis3d.points(point_cloud_world,
                     scale=0.001,
                     color=(1, 0, 0),
                     subsample=10)
        vis3d.points(point_cloud_filtered,
                     scale=0.001,
                     color=(0, 0, 1),
                     subsample=10)
        vis3d.show()

    pcl_start = time.time()

    # subsample point cloud
    #rate = int(1.0 / rescale_factor)**2
    #point_cloud_filtered = point_cloud_filtered.subsample(rate, random=False)
    box = Box(np.array([0.2, -0.24, min_height]),
              np.array([0.56, 0.21, max_height]),
              frame='world')
Ejemplo n.º 26
0
di = DepthImage(image, frame=ci.frame)
pc = ci.deproject(di)

## Visualize the depth image
#vis2d.figure()
#vis2d.imshow(di)
#vis2d.show()

# Make and display a PCL type point cloud from the image
p = pcl.PointCloud(pc.data.T.astype(np.float32))

# Make a segmenter and segment the point cloud.
seg = p.make_segmenter()
seg.set_model_type(pcl.SACMODEL_PARALLEL_PLANE)
seg.set_method_type(pcl.SAC_RANSAC)
seg.set_distance_threshold(0.005)
indices, model = seg.segment()
print(model)

#pdb.set_trace()
vis3d.figure()
pc_plane = pc.data.T[indices]
pc_plane = pc_plane[np.where(pc_plane[::, 1] < 0.16)]

maxes = np.max(pc_plane, axis=0)
mins = np.min(pc_plane, axis=0)
print('maxes are :', maxes, '\nmins are : ', mins)

vis3d.points(pc_plane, color=(1, 0, 0))
vis3d.show()
    #                             from_frame='obj', to_frame='table')
    T_obj_table = RigidTransform(
        rotation=[-0.1335021, 0.87671711, 0.41438141, 0.20452958],
        from_frame='obj',
        to_frame='table')

    stable_pose = mesh.resting_pose(T_obj_table)
    #print stable_pose.r

    table_dim = 0.3
    T_obj_table_plot = mesh.get_T_surface_obj(T_obj_table)
    T_obj_table_plot.translation[0] += 0.1
    vis.figure()
    vis.mesh(mesh, T_obj_table_plot, color=(1, 0, 0), style='wireframe')
    vis.points(Point(mesh.center_of_mass, 'obj'),
               T_obj_table_plot,
               color=(1, 0, 1),
               scale=0.01)
    vis.pose(T_obj_table_plot, alpha=0.1)
    vis.mesh_stable_pose(mesh,
                         stable_pose,
                         dim=table_dim,
                         color=(0, 1, 0),
                         style='surface')
    vis.pose(stable_pose.T_obj_table, alpha=0.1)
    vis.show()
    exit(0)

    # compute stable poses
    vis.figure()
    vis.mesh(mesh, color=(1, 1, 0), style='surface')
    vis.mesh(mesh.convex_hull(), color=(1, 0, 0))
Ejemplo n.º 28
0
        vis.figure(size=(10, 10))
        num_plot = 3
        vis.subplot(1, num_plot, 1)
        vis.imshow(depth_im)
        vis.subplot(1, num_plot, 2)
        vis.imshow(segmask)
        vis.subplot(1, num_plot, 3)
        vis.imshow(obj_segmask)
        vis.show()

        from visualization import Visualizer3D as vis3d
        point_cloud = camera_intr.deproject(depth_im)
        vis3d.figure()
        vis3d.points(point_cloud,
                     subsample=3,
                     random=True,
                     color=(0, 0, 1),
                     scale=0.001)
        vis3d.pose(RigidTransform())
        vis3d.pose(T_camera_world.inverse())
        vis3d.show()

    # Create state.
    rgbd_im = RgbdImage.from_color_and_depth(color_im, depth_im)
    state = RgbdImageState(rgbd_im, camera_intr, segmask=segmask)

    # Init policy.
    policy_type = "cem"
    if "type" in policy_config:
        policy_type = policy_config["type"]
    if policy_type == "ranking":
Ejemplo n.º 29
0
def visualize_vertices(mesh, vertices):
    vis3d.mesh(mesh, style='wireframe')
    vis3d.points(mesh.centroid, color=(0, 0, 0), scale=0.003)
    vis3d.points(vertices, color=(1, 0, 0), scale=0.001)
    vis3d.show()
Ejemplo n.º 30
0
    vertices = mesh.vertices
    triangles = mesh.triangles
    normals = mesh.normals

    print 'Num vertices:', len(vertices)
    print 'Num triangles:', len(triangles)
    print 'Num normals:', len(normals)

    # 1. Generate candidate pairs of contact points

    # 2. Check for force closure

    # 3. Convert each grasp to a hand pose
    contact1 = vertices[500]
    contact2 = vertices[2000]
    T_obj_gripper = contacts_to_baxter_hand_pose(contact1, contact2)
    print 'Translation', T_obj_gripper.translation
    print 'Rotation', T_obj_gripper.quaternion

    pose_msg = T_obj_gripper.pose_msg

    # 3d visualization to help debug
    vis.figure()
    vis.mesh(mesh)
    vis.points(Point(contact1, frame='test'))
    vis.points(Point(contact2, frame='test'))
    vis.pose(T_obj_gripper, alpha=0.05)
    vis.show()

    # 4. Execute on the actual robot