Esempio n. 1
0
 def show(self, color=(0.5, 0.5, 0.5)):
     """Render the 3D mesh for the segment using mayavi.
     """
     Visualizer3D.mesh(self.mesh, style='surface', color=color, opacity=1.0)
     Visualizer3D.show()
KSIZE = 9

if __name__ == '__main__':
    depth_im_filename = sys.argv[1]
    camera_intr_filename = sys.argv[2]

    camera_intr = CameraIntrinsics.load(camera_intr_filename)
    depth_im = DepthImage.open(depth_im_filename, frame=camera_intr.frame)

    depth_im = depth_im.inpaint()

    point_cloud_im = camera_intr.deproject_to_image(depth_im)
    normal_cloud_im = point_cloud_im.normal_cloud_im(ksize=KSIZE)

    vis3d.figure()
    vis3d.points(point_cloud_im.to_point_cloud(), scale=0.0025)

    alpha = 0.025
    subsample = 20
    for i in range(0, point_cloud_im.height, subsample):
        for j in range(0, point_cloud_im.width, subsample):
            p = point_cloud_im[i, j]
            n = normal_cloud_im[i, j]
            n2 = normal_cloud_im_s[i, j]
            if np.linalg.norm(n) > 0:
                points = np.array([p, p + alpha * n])
                vis3d.plot3d(points, tube_radius=0.001, color=(1, 0, 0))

                points = np.array([p, p + alpha * n2])
                vis3d.plot3d(points, tube_radius=0.001, color=(1, 0, 1))
Esempio n. 3
0
    fa.goto_pose_with_cartesian_control(
        T_tool_world, cartesian_impedances=[2000, 2000, 1000, 300, 300, 300])

    logging.info('Finding closest orthogonal grasp')
    T_grasp_world = get_closest_grasp_pose(T_tag_world, T_ready_world)
    T_lift = RigidTransform(translation=[0, 0, 0.2],
                            from_frame=T_ready_world.to_frame,
                            to_frame=T_ready_world.to_frame)
    T_lift_world = T_lift * T_grasp_world

    logging.info('Visualizing poses')
    _, depth_im, _ = sensor.frames()
    points_world = T_camera_world * intr.deproject(depth_im)

    if cfg['vis_detect']:
        vis3d.figure()
        vis3d.pose(RigidTransform())
        vis3d.points(subsample(points_world.data.T, 0.01),
                     color=(0, 1, 0),
                     scale=0.002)
        vis3d.pose(T_ready_world, length=0.05)
        vis3d.pose(T_camera_world, length=0.1)
        vis3d.pose(T_tag_world)
        vis3d.pose(T_grasp_world)
        vis3d.pose(T_lift_world)
        vis3d.show()

    #const_rotation=np.array([[1,0,0],[0,-1,0],[0,0,-1]])
    #test = RigidTransform(rotation=const_rotation,translation=T_tag_world.translation, from_frame='franka_tool', to_frame='world')
    #import pdb; pdb.set_trace()
    mesh.center_of_mass = np.load('../dex-net/data/meshes/lego_com.npy')

    #T_obj_table = RigidTransform(rotation=[0.92275663,  0.13768089,  0.35600924, -0.05311874],
    #                             from_frame='obj', to_frame='table')
    T_obj_table = RigidTransform(
        rotation=[-0.1335021, 0.87671711, 0.41438141, 0.20452958],
        from_frame='obj',
        to_frame='table')

    stable_pose = mesh.resting_pose(T_obj_table)
    #print stable_pose.r

    table_dim = 0.3
    T_obj_table_plot = mesh.get_T_surface_obj(T_obj_table)
    T_obj_table_plot.translation[0] += 0.1
    vis.figure()
    vis.mesh(mesh, T_obj_table_plot, color=(1, 0, 0), style='wireframe')
    vis.points(Point(mesh.center_of_mass, 'obj'),
               T_obj_table_plot,
               color=(1, 0, 1),
               scale=0.01)
    vis.pose(T_obj_table_plot, alpha=0.1)
    vis.mesh_stable_pose(mesh,
                         stable_pose,
                         dim=table_dim,
                         color=(0, 1, 0),
                         style='surface')
    vis.pose(stable_pose.T_obj_table, alpha=0.1)
    vis.show()
    exit(0)
Esempio n. 5
0
import numpy as np
import trimesh
import operator

from autolab_core import RigidTransform
from visualization import Visualizer2D as vis2d, Visualizer3D as vis3d

# Load the mesh and compute stable poses
bc = trimesh.load('demon_helmet.obj')
stable_poses, probs = bc.compute_stable_poses()
assert len(stable_poses) == len(probs)

# Find the most probable stable pose
i, value = max(enumerate(probs), key=operator.itemgetter(1))
best_pose = stable_poses[i]
print("Most probable pose:")
print(best_pose)
print("Probability = " + str(probs[i]))

# Visualize the mesh in the most probable stable state
rotation = np.asarray(best_pose[:3, :3])
translation = np.asarray(best_pose[:3, 3])
rt = RigidTransform(rotation, translation, from_frame='obj', to_frame='world')
vis3d.mesh(bc, rt)
vis3d.show()

# TODO: Project the mesh onto the plane defined by the largest flat surface in the bin
Esempio n. 6
0
def visualize_plan(mesh, T_world_obj, T_world_grasp):
    # visualize the plan in the world frame
    mesh.apply_transform(T_world_obj.matrix)

    o = np.array([0., 0., 0.])
    obj = apply_transform(o, T_world_obj)
    grasp = apply_transform(o, T_world_grasp)

    # base frame
    vis3d.points(o, color=(1, 0, 0), scale=0.005)
    vis3d.pose(RigidTransform(),
               alpha=0.03,
               tube_radius=0.002,
               center_scale=0.001)

    # object
    vis3d.mesh(mesh)
    vis3d.points(obj, color=(0, 0, 0), scale=0.005)
    vis3d.pose(T_world_obj, alpha=0.03, tube_radius=0.002, center_scale=0.001)

    # grasp
    vis3d.points(grasp, color=(0, 1, 1), scale=0.005)
    vis3d.pose(T_world_grasp,
               alpha=0.03,
               tube_radius=0.002,
               center_scale=0.001)

    vis3d.show()
Esempio n. 7
0
def visualize_vertices(mesh, vertices):
    vis3d.mesh(mesh, style='wireframe')
    vis3d.points(mesh.centroid, color=(0, 0, 0), scale=0.003)
    vis3d.points(vertices, color=(1, 0, 0), scale=0.001)
    vis3d.show()
    args = parser.parse_args()
    image_filename = args.input_image
    extrusion = args.extrusion
    scale_factor = args.scale_factor
    output_filename = args.output_filename

    # read the image
    binary_im = BinaryImage.open(image_filename)
    sdf = binary_im.to_sdf()
    #plt.figure()
    #plt.imshow(sdf)
    #plt.show()

    # convert to a mesh
    mesh = ImageToMeshConverter.binary_image_to_mesh(binary_im,
                                                     extrusion=extrusion,
                                                     scale_factor=scale_factor)
    vis.figure()
    vis.mesh(mesh)
    vis.show()

    # optionally save
    if output_filename is not None:
        file_root, file_ext = os.path.splitext(output_filename)
        binary_im.save(file_root + '.jpg')
        ObjFile(file_root + '.obj').write(mesh)
        np.savetxt(file_root + '.csv',
                   sdf,
                   delimiter=',',
                   header='%d %d' % (sdf.shape[0], sdf.shape[1]))
Esempio n. 9
0
    def graspwithapproachvectorusingapproach_point(
        grasp,
        T_obj_world=RigidTransform(from_frame='obj', to_frame='world'),
        tube_radius=0.001,
        endpoint_color=(0, 1, 0),
        endpoint_scale=0.004,
        grasp_axis_color=(0, 1, 0),
        sdfcenter=np.array([50., 50., 50.]),
        sdforigin=np.array([0, 0, 0])):
        """ Plots a grasp as an axis and center.

        Parameters
        ----------
        grasp : :obj:`dexnet.grasping.Grasp`
            the grasp to plot
        T_obj_world : :obj:`autolab_core.RigidTransform`
            the pose of the object that the grasp is referencing in world frame
        tube_radius : float
            radius of the plotted grasp axis
        endpoint_color : 3-tuple
            color of the endpoints of the grasp axis
        endpoint_scale : 3-tuple
            scale of the plotted endpoints
        grasp_axis_color : 3-tuple
            color of the grasp axis
        """
        g1, g2 = grasp.endpoints
        center = grasp.center  #这个center未必是真正的center,所以后面不能用它
        approach_point = np.array(grasp.approach_point)
        approach_axis = grasp.rotated_full_axis[:, 0]
        approach_pointfar = approach_point + approach_axis * 0.1
        # print 'approach_angle:',grasp.approach_angle
        #  print 'approach_point:',grasp.approach_point

        g1 = Point(g1, 'obj')
        g2 = Point(g2, 'obj')
        center = Point(center, 'obj')
        approach_pointfar2 = Point(approach_pointfar, 'obj')
        approach_point2 = Point(approach_point, 'obj')

        g1_tf = T_obj_world.apply(g1)
        g2_tf = T_obj_world.apply(g2)
        #center_tf = T_obj_world.apply(center)
        approach_pointfar_tf = T_obj_world.apply(approach_pointfar2)
        approach_point2_tf = T_obj_world.apply(approach_point2)
        Visualizer3D.points(approach_point2_tf.data,
                            color=(1, 0, 0),
                            scale=0.001)

        grasp_axis_tf = np.array([g1_tf.data, g2_tf.data])
        approach_axis_tf = np.array(
            [approach_point2_tf.data, approach_pointfar_tf.data])
        #approach_point2_tf_again = np.array([approach_point2_tf.data, approach_point2_tf.data])

        # print [(x[0], x[1], x[2]) for x in grasp_axis_tf]
        #print [(x[0], x[1], x[2]) for x in grasp_axis_tf]
        #points = [(x[0]+sdfcenter[0]+sdforigin[0] , x[1]+sdfcenter[1]+sdforigin[1] , x[2] +sdfcenter[2]+sdforigin[2]) for x in grasp_axis_tf]
        points = [(x[0], x[1], x[2]) for x in grasp_axis_tf]
        approaching = [(y[0], y[1], y[2]) for y in approach_axis_tf]
        # approachingpoint=[(z[0]  , z[1]  , z[2]  ) for z in approach_point2_tf_again]

        #points = [(x[0]  , x[1]  , x[2] ) for x in grasp_axis_tf]
        Visualizer3D.plot3d(points,
                            color=grasp_axis_color,
                            tube_radius=tube_radius)
        Visualizer3D.plot3d(approaching,
                            color=(0, 0.5, 0.5),
                            tube_radius=tube_radius)
Esempio n. 10
0
def fast_grid_search(pc, indices, model, shadow, img_file):
    length, width, height = shadow.extents
    split_size = max(length, width)
    pc_data, ind = get_pc_data(pc, indices)
    maxes = np.max(pc_data, axis=0)
    mins = np.min(pc_data, axis=0)
    bin_base = mins[2]
    plane_normal = model[0:3]

    di_temp = ci.project_to_image(pc)
    vis2d.figure()
    vis2d.imshow(di_temp)
    vis2d.show()

    plane_data = pc.data.T[indices]
    #all_indices = np.where([(plane_data[::,2] > 0.795) & (plane_data[::,2] < 0.862)])
    #all_indices = np.where((plane_data[::,1] < 0.16) & (plane_data[::,1] > -0.24) & (plane_data[::,0] > -0.3) & (plane_data[::,0] < 0.24))[0]
    #plane_data = plane_data[all_indices]

    plane_pc = PointCloud(plane_data.T, pc.frame)
    di = ci.project_to_image(plane_pc)
    bi = di.to_binary()

    scene = Scene()
    camera = VirtualCamera(ci, cp)
    scene.camera = camera
    # Get shadow depth img.
    shadow_obj = SceneObject(shadow)
    scene.add_object('shadow', shadow_obj)

    orig_tow = shadow_obj.T_obj_world

    scores = np.zeros((int(np.round((maxes[0] - mins[0]) / split_size)),
                       int(np.round((maxes[1] - mins[1]) / split_size))))
    for i in range(int(np.round((maxes[0] - mins[0]) / split_size))):
        x = mins[0] + i * split_size
        for j in range(int(np.round((maxes[1] - mins[1]) / split_size))):
            y = mins[1] + j * split_size

            for tow in transforms(pc, pc_data, shadow, x, y, x + split_size,
                                  y + split_size, 8):
                shadow_obj.T_obj_world = tow
                scores[i][j] = under_shadow(pc, pc_data, indices, model,
                                            shadow, x, x + split_size, y,
                                            y + split_size, scene, bi)
                shadow_obj.T_obj_world = orig_tow

    print("\nScores: \n" + str(scores))
    best = best_cell(scores)
    print("\nBest Cell: " + str(best) + ", with score = " +
          str(scores[best[0]][best[1]]))
    #-------
    # Visualize best placement
    vis3d.figure()
    x = mins[0] + best[0] * split_size
    y = mins[1] + best[1] * split_size
    cell_indices = np.where((x < pc_data[:, 0])
                            & (pc_data[:, 0] < x + split_size)
                            & (y < pc_data[:, 1])
                            & (pc_data[:, 1] < y + split_size))[0]
    points = pc_data[cell_indices]
    rest = pc_data[np.setdiff1d(np.arange(len(pc_data)), cell_indices)]
    vis3d.points(points, color=(0, 1, 1))
    vis3d.points(rest, color=(1, 0, 1))
    vis3d.show()
Esempio n. 11
0
    def quality(self, state, actions, params=None):
        """Given a suction point, compute a score based on a best-fit 3D plane of the neighboring points.

        Parameters
        ----------
        state : :obj:`RgbdImageState`
            An RgbdImageState instance that encapsulates rgbd_im, camera_intr, segmask, full_observed.
        action: :obj:`SuctionPoint2D`
            A suction grasp in image space that encapsulates center, approach direction, depth, camera_intr.
        params: dict
            Stores params used in computing suction quality.

        Returns
        -------
        :obj:`numpy.ndarray`
            Array of the quality for each grasp
        """
        qualities = []

        # deproject points
        point_cloud_image = state.camera_intr.deproject_to_image(
            state.rgbd_im.depth)

        # compute negative SSE from the best fit plane for each grasp
        for i, action in enumerate(actions):
            if not isinstance(action, SuctionPoint2D):
                raise ValueError(
                    'This function can only be used to evaluate suction quality'
                )

            points = self._points_in_window(
                point_cloud_image, action,
                segmask=state.segmask)  # x,y in matrix A and z is vector z.
            A, b = self._points_to_matrices(points)
            w = self._action_to_plane(
                point_cloud_image,
                action)  # vector w w/ a bias term represents a best-fit plane.
            sse = self._sum_of_squared_residuals(w, A, b)

            if params is not None and params['vis']['plane']:
                from visualization import Visualizer2D as vis2d
                from visualization import Visualizer3D as vis3d
                mid_i = A.shape[0] / 2
                pred_z = A.dot(w)
                p0 = np.array([A[mid_i, 0], A[mid_i, 1], pred_z[mid_i]])
                n = np.array([w[0], w[1], -1])
                n = n / np.linalg.norm(n)
                tx = np.array([n[1], -n[0], 0])
                tx = tx / np.linalg.norm(tx)
                ty = np.cross(n, tx)
                R = np.array([tx, ty, n]).T

                c = state.camera_intr.deproject_pixel(action.depth,
                                                      action.center)
                d = Point(c.data - 0.01 * action.axis, frame=c.frame)

                T_table_world = RigidTransform(rotation=R,
                                               translation=p0,
                                               from_frame='patch',
                                               to_frame='world')

                vis3d.figure()
                vis3d.points(point_cloud_image.to_point_cloud(),
                             scale=0.0025,
                             subsample=10,
                             random=True,
                             color=(0, 0, 1))
                vis3d.points(PointCloud(points.T),
                             scale=0.0025,
                             color=(1, 0, 0))
                vis3d.points(c, scale=0.005, color=(1, 1, 0))
                vis3d.points(d, scale=0.005, color=(1, 1, 0))
                vis3d.table(T_table_world, dim=0.01)
                vis3d.show()

                vis2d.figure()
                vis2d.imshow(state.rgbd_im.depth)
                vis2d.scatter(action.center.x, action.center.y, s=50, c='b')
                vis2d.show()

            quality = np.exp(
                -sse
            )  # evaluate how well best-fit plane describles all points in window.
            qualities.append(quality)

        return np.array(qualities)
    sensor.start()
    camera_intr = sensor.ir_intrinsics

    n = 15
    frame_rates = []
    for i in range(n):
        logging.info("Reading frame %d of %d" % (i + 1, n))
        read_start = time.time()
        color_im, depth_im, _ = sensor.frames()
        read_stop = time.time()
        frame_rates.append(1.0 / (read_stop - read_start))

    logging.info("Avg fps: %.3f" % (np.mean(frame_rates)))

    color_im = color_im.inpaint(rescale_factor=0.5)
    depth_im = depth_im.inpaint(rescale_factor=0.5)
    point_cloud = camera_intr.deproject(depth_im)

    vis3d.figure()
    vis3d.points(point_cloud, subsample=15)
    vis3d.show()

    vis.figure()
    vis.subplot(1, 2, 1)
    vis.imshow(color_im)
    vis.subplot(1, 2, 2)
    vis.imshow(depth_im)
    vis.show()

    sensor.stop()
Esempio n. 13
0
def show_points(points, color=(0,1,0), scale=0.005, frame_size=0.2, frame_radius=0.02):
    vis.figure(bgcolor=(1,1,1), size=(500,500))
    vis.points(np.array(points), color=color, scale=scale)

    vis.plot3d(np.array(([0, 0, 0], [frame_size, 0, 0])).astype(np.float32), color=(1,0,0), tube_radius=frame_radius)
    vis.plot3d(np.array(([0, 0, 0], [0, frame_size, 0])).astype(np.float32), color=(0,1,0), tube_radius=frame_radius)
    vis.plot3d(np.array(([0, 0, 0], [0, 0, frame_size])).astype(np.float32), color=(0,0,1), tube_radius=frame_radius)

    vis.show()
Esempio n. 14
0
    args = parser.parse_args()

    vis_normals = False

    # read data
    mesh_filename = args.mesh_filename
    _, mesh_ext = os.path.splitext(mesh_filename)
    if mesh_ext != '.obj':
        raise ValueError('Must provide mesh in Wavefront .OBJ format!')
    orig_mesh = ObjFile(mesh_filename).read()
    mesh = orig_mesh.subdivide(min_tri_length=0.01)
    mesh.compute_vertex_normals()
    stable_poses = mesh.stable_poses()

    if vis_normals:
        vis3d.figure()
        vis3d.mesh(mesh)
        vis3d.normals(NormalCloud(mesh.normals.T),
                      PointCloud(mesh.vertices.T),
                      subsample=10)
        vis3d.show()

    d = utils.sqrt_ceil(len(stable_poses))
    vis.figure(size=(16, 16))

    table_mesh = ObjFile('data/meshes/table.obj').read()
    table_mesh = table_mesh.subdivide()
    table_mesh.compute_vertex_normals()
    table_mat_props = MaterialProperties(color=(0, 255, 0),
                                         ambient=0.5,
                                         diffuse=1.0,
Esempio n. 15
0
def visualize_grasps(mesh, vertices, metrics):
    vis3d.pose(RigidTransform(),
               alpha=0.01,
               tube_radius=0.001,
               center_scale=0.002)
    vis3d.mesh(mesh, style='wireframe')
    vis3d.points(mesh.centroid, color=(0, 0, 0), scale=0.003)

    min_score = float(np.min(metrics))
    max_score = float(np.max(metrics))
    metrics_normalized = (metrics.astype(float) - min_score) / (max_score -
                                                                min_score)

    for v, m in zip(vertices, metrics_normalized):
        vis3d.points(v, color=(1 - m, m, 0), scale=0.001)
        vis3d.plot3d(v, color=(1 - m, m, 0), tube_radius=0.0003)

    vis3d.show()
Esempio n. 16
0
    def vis(self, mesh, grasp_vertices, grasp_qualities, grasp_normals):
        """
        Pass in any grasp and its associated grasp quality.  this function will plot
        each grasp on the object and plot the grasps as a bar between the points, with
        colored dots on the line endpoints representing the grasp quality associated
        with each grasp

        Parameters
        ----------
        mesh : :obj:`Trimesh`
        grasp_vertices : mx2x3 :obj:`numpy.ndarray`
            m grasps.  Each grasp containts two contact points.  Each contact point
            is a 3 dimensional vector, hence the shape mx2x3
        grasp_qualities : mx' :obj:`numpy.ndarray`
            vector of grasp qualities for each grasp
        """
        vis3d.mesh(mesh)

        middle_of_part = np.mean(np.mean(grasp_vertices, axis=1), axis=0)
        print(middle_of_part)
        vis3d.points(middle_of_part, scale=0.003)

        dirs = normalize(grasp_vertices[:, 0] - grasp_vertices[:, 1], axis=1)

        midpoints = (grasp_vertices[:, 0] + grasp_vertices[:, 1]) / 2
        grasp_endpoints = np.zeros(grasp_vertices.shape)
        grasp_endpoints[:, 0] = midpoints + dirs * MAX_HAND_DISTANCE / 2
        grasp_endpoints[:, 1] = midpoints - dirs * MAX_HAND_DISTANCE / 2

        n0 = np.zeros(grasp_endpoints.shape)
        n1 = np.zeros(grasp_endpoints.shape)

        normal_scale = 0.01
        n0[:, 0] = grasp_vertices[:, 0]
        n0[:, 1] = grasp_vertices[:, 0] + normal_scale * grasp_normals[:, 0]
        n1[:, 0] = grasp_vertices[:, 1]
        n1[:, 1] = grasp_vertices[:, 1] + normal_scale * grasp_normals[:, 1]

        for grasp, quality, normal0, normal1 in zip(grasp_endpoints,
                                                    grasp_qualities, n0, n1):
            color = [min(1, 2 * (1 - quality)), min(1, 2 * quality), 0, 1]
            vis3d.plot3d(grasp, color=color, tube_radius=.001)
            vis3d.plot3d(normal0, color=(0, 0, 0), tube_radius=.002)
            vis3d.plot3d(normal1, color=(0, 0, 0), tube_radius=.002)
        vis3d.show()
Esempio n. 17
0
def visualize_gripper(mesh, T_obj_grasp, vertices):
    o = np.array([0., 0., 0.])
    grasp = apply_transform(o, T_obj_grasp)

    # object
    vis3d.mesh(mesh)
    vis3d.points(o, color=(0, 0, 0), scale=0.002)
    vis3d.pose(RigidTransform(),
               alpha=0.01,
               tube_radius=0.001,
               center_scale=0.001)

    # gripper
    vis3d.points(grasp, color=(1, 0, 0), scale=0.002)
    vis3d.points(vertices, color=(1, 0, 0), scale=0.002)
    vis3d.pose(T_obj_grasp, alpha=0.01, tube_radius=0.001, center_scale=0.001)

    vis3d.show()
Esempio n. 18
0
    def vis_transform(self, mesh, G_transform, vertices):
        """
        Pass in any grasp and its associated grasp quality.  this function will plot
        each grasp on the object and plot the grasps as a bar between the points, with
        colored dots on the line endpoints representing the grasp quality associated
        with each grasp

        Parameters
        ----------
        mesh : :obj:`Trimesh`
        grasp_vertices : mx2x3 :obj:`numpy.ndarray`
            m grasps.  Each grasp containts two contact points.  Each contact point
            is a 3 dimensional vector, hence the shape mx2x3
        grasp_qualities : mx' :obj:`numpy.ndarray`
            vector of grasp qualities for each grasp
        """
        L = MAX_HAND_DISTANCE / 2  # gripper half width

        # transform from gripper to contact 1
        G_gc1 = np.array([[1, 0, 0, 0], [0, 0, 1, -1 * L], [0, -1, 0, 0],
                          [0, 0, 0, 1]])

        # transform from gripper to contact 2
        G_gc2 = np.array([[1, 0, 0, 0], [0, 0, -1, L], [0, 1, 0, 0],
                          [0, 0, 0, 1]])

        G = G_transform.matrix

        print('G')
        print(G)

        G_oc1 = np.matmul(G, G_gc1)
        G_oc2 = np.matmul(G, G_gc2)

        scale = 0.01
        o = np.array([0, 0, 0, 1])
        x = np.array([scale, 0, 0, 1])
        y = np.array([0, scale, 0, 1])
        z = np.array([0, 0, scale, 1])

        ot = np.matmul(G, o)
        xt = np.matmul(G, x)
        yt = np.matmul(G, y)
        zt = np.matmul(G, z)

        o1 = np.matmul(G_oc1, o)
        x1 = np.matmul(G_oc1, x)
        y1 = np.matmul(G_oc1, y)
        z1 = np.matmul(G_oc1, z)

        o2 = np.matmul(G_oc2, o)
        x2 = np.matmul(G_oc2, x)
        y2 = np.matmul(G_oc2, y)
        z2 = np.matmul(G_oc2, z)

        vis3d.mesh(mesh, style='wireframe')

        #Plot origin axes
        x_axis = np.array([o, x])[:, :3]
        y_axis = np.array([o, y])[:, :3]
        z_axis = np.array([o, z])[:, :3]

        x_axis_t = np.array([ot, xt])[:, :3]
        y_axis_t = np.array([ot, yt])[:, :3]
        z_axis_t = np.array([ot, zt])[:, :3]

        x_axis_1 = np.array([o1, x1])[:, :3]
        y_axis_1 = np.array([o1, y1])[:, :3]
        z_axis_1 = np.array([o1, z1])[:, :3]

        x_axis_2 = np.array([o2, x2])[:, :3]
        y_axis_2 = np.array([o2, y2])[:, :3]
        z_axis_2 = np.array([o2, z2])[:, :3]

        vis3d.plot3d(x_axis, color=(0.5, 0, 0), tube_radius=0.001)
        vis3d.plot3d(y_axis, color=(0, 0.5, 0), tube_radius=0.001)
        vis3d.plot3d(z_axis, color=(0, 0, 0.5), tube_radius=0.001)

        vis3d.plot3d(x_axis_t, color=(255, 0, 0), tube_radius=0.001)
        vis3d.plot3d(y_axis_t, color=(0, 255, 0), tube_radius=0.001)
        vis3d.plot3d(z_axis_t, color=(0, 0, 255), tube_radius=0.001)

        vis3d.plot3d(x_axis_1, color=(255, 0, 0), tube_radius=0.001)
        vis3d.plot3d(y_axis_1, color=(0, 255, 0), tube_radius=0.001)
        vis3d.plot3d(z_axis_1, color=(0, 0, 255), tube_radius=0.001)

        vis3d.plot3d(x_axis_2, color=(255, 0, 0), tube_radius=0.001)
        vis3d.plot3d(y_axis_2, color=(0, 255, 0), tube_radius=0.001)
        vis3d.plot3d(z_axis_2, color=(0, 0, 255), tube_radius=0.001)

        vis3d.points(vertices[0], scale=0.003)
        vis3d.points(vertices[1], scale=0.003)

        vis3d.show()
Esempio n. 19
0
def visualize_scene(mesh, T_world_ar, T_ar_cam, T_cam_obj):
    T_cam_cam = RigidTransform()
    T_obj_cam = T_cam_obj.inverse()
    T_cam_ar = T_ar_cam.inverse()

    Twc = np.matmul(T_world_ar.matrix, T_ar_cam.matrix)
    T_world_cam = RigidTransform(Twc[:3, :3], Twc[:3, 3])
    T_cam_world = T_world_cam.inverse()

    o = np.array([0., 0., 0.])
    centroid_cam = apply_transform(o, T_cam_obj)
    tag_cam = apply_transform(o, T_cam_ar)
    robot_cam = apply_transform(o, T_cam_world)

    # camera
    vis3d.points(o, color=(0, 1, 0), scale=0.01)
    vis3d.pose(T_cam_cam, alpha=0.05, tube_radius=0.005, center_scale=0.002)

    # object
    vis3d.mesh(mesh)
    vis3d.points(centroid_cam, color=(0, 0, 0), scale=0.01)
    vis3d.pose(T_cam_obj, alpha=0.05, tube_radius=0.005, center_scale=0.002)

    # AR tag
    vis3d.points(tag_cam, color=(1, 0, 1), scale=0.01)
    vis3d.pose(T_cam_ar, alpha=0.05, tube_radius=0.005, center_scale=0.002)
    vis3d.table(T_cam_ar, dim=0.074)

    # robot
    vis3d.points(robot_cam, color=(1, 0, 0), scale=0.01)
    vis3d.pose(T_cam_world, alpha=0.05, tube_radius=0.005, center_scale=0.002)

    vis3d.show()
Esempio n. 20
0
    def compute_approach_direction(self, mesh, grasp_vertices, grasp_quality,
                                   grasp_normals):

        ## initalizing stuff ##
        visualize = True
        nb_directions_to_test = 6
        normal_scale = 0.01
        plane_normal = normalize(grasp_vertices[0] - grasp_vertices[1])

        midpoint = (grasp_vertices[0] + grasp_vertices[1]) / 2

        ## generating a certain number of approach directions ##
        theta = np.pi / nb_directions_to_test
        rot_mat = rotation_3d(-plane_normal, theta)

        horizontal_direction = normalize(
            np.cross(plane_normal, np.array([0, 0, 1])))
        directions_to_test = [horizontal_direction]  #these are vectors
        approach_directions = [
            np.array(
                [midpoint, midpoint + horizontal_direction * normal_scale])
        ]  #these are two points for visualization

        for i in range(nb_directions_to_test - 1):
            directions_to_test.append(
                normalize(np.matmul(rot_mat, directions_to_test[-1])))
            approach_directions.append(
                np.array([
                    midpoint, midpoint + directions_to_test[-1] * normal_scale
                ]))

        ## computing the palm position for each approach direction ##
        palm_positions = []
        for i in range(nb_directions_to_test):
            palm_positions.append(midpoint +
                                  finger_length * directions_to_test[i])

        if visualize:
            ## plotting the whole mesh ##
            vis3d.mesh(mesh, style='wireframe')

            ## computing and plotting midpoint and gripper position ##
            dirs = (grasp_vertices[0] - grasp_vertices[1]
                    ) / np.linalg.norm(grasp_vertices[0] - grasp_vertices[1])
            grasp_endpoints = np.zeros(grasp_vertices.shape)
            grasp_endpoints[0] = midpoint + dirs * MAX_HAND_DISTANCE / 2
            grasp_endpoints[1] = midpoint - dirs * MAX_HAND_DISTANCE / 2

            color = [
                min(1, 2 * (1 - grasp_quality)),
                min(1, 2 * grasp_quality), 0, 1
            ]
            vis3d.plot3d(grasp_endpoints, color=color, tube_radius=.001)
            vis3d.points(midpoint, scale=0.003)

            ## computing and plotting normals at contact points ##
            n0 = np.zeros(grasp_endpoints.shape)
            n1 = np.zeros(grasp_endpoints.shape)
            n0[0] = grasp_vertices[0]
            n0[1] = grasp_vertices[0] + normal_scale * grasp_normals[0]
            n1[0] = grasp_vertices[1]
            n1[1] = grasp_vertices[1] + normal_scale * grasp_normals[1]
            vis3d.plot3d(n0, color=(0, 0, 0), tube_radius=.002)
            vis3d.plot3d(n1, color=(0, 0, 0), tube_radius=.002)

            ## plotting normals the palm positions for each potential approach direction ##
            for i in range(nb_directions_to_test):
                vis3d.points(palm_positions[i], scale=0.003)

            vis3d.show()

        directions_to_test = [
            directions_to_test[3], directions_to_test[2],
            directions_to_test[4], directions_to_test[1],
            directions_to_test[5], directions_to_test[0]
        ]
        palm_positions = [
            palm_positions[3], palm_positions[2], palm_positions[4],
            palm_positions[1], palm_positions[5], palm_positions[0]
        ]

        ## checking if some approach direction is valid ##
        for i in range(nb_directions_to_test):
            if len(
                    trimesh.intersections.mesh_plane(mesh,
                                                     directions_to_test[i],
                                                     palm_positions[i])) == 0:
                # it means the palm won't bump with part
                return directions_to_test[i]

        # it means all approach directions will bump with part
        return -1
Esempio n. 21
0
def visualize_normals(mesh, vertices, normals):
    scale = 0.01
    normals_scaled = normals * scale

    vis3d.pose(RigidTransform(),
               alpha=0.01,
               tube_radius=0.001,
               center_scale=0.002)
    vis3d.mesh(mesh, style='wireframe')
    vis3d.points(mesh.centroid, color=(0, 0, 0), scale=0.003)
    vis3d.points(vertices, color=(1, 0, 0), scale=0.001)

    for v, n in zip(vertices, normals_scaled):
        vis3d.plot3d([v, v + n], color=(1, 0, 0), tube_radius=0.0005)

    vis3d.show()
Esempio n. 22
0
    clip_start = time.time()
    low_indices = np.where(point_cloud_world.data[2, :] < min_height)[0]
    point_cloud_filtered.data[2, low_indices] = min_height

    # filter high
    high_indices = np.where(point_cloud_world.data[2, :] > max_height)[0]
    point_cloud_filtered.data[2, high_indices] = max_height

    # re-project and update depth im
    #depth_im_filtered = camera_intr.project_to_image(T_camera_world.inverse() * point_cloud_filtered)
    logging.info('Clipping took %.3f sec' % (time.time() - clip_start))

    # vis
    focal_point = np.mean(point_cloud_filtered.data, axis=1)
    if vis_clipping:
        vis3d.figure(camera_pose=T_camera_world.as_frames('camera', 'world'),
                     focal_point=focal_point)
        vis3d.points(point_cloud_world,
                     scale=0.001,
                     color=(1, 0, 0),
                     subsample=10)
        vis3d.points(point_cloud_filtered,
                     scale=0.001,
                     color=(0, 0, 1),
                     subsample=10)
        vis3d.show()

    pcl_start = time.time()

    # subsample point cloud
    #rate = int(1.0 / rescale_factor)**2
    #point_cloud_filtered = point_cloud_filtered.subsample(rate, random=False)
Esempio n. 23
0
		

		contact1 = vertices[indeces[i][0]]
		contact2 = vertices[indeces[i][1]]
		
		i=i+1
		# #####For center of mass test
		# contact1 = np.array([0, .015, 0])
		# contact2 = np.array([0, -.015, 0])
		# normal1 = normals[0]
		# normal2 = normals[1]
		


		# visualize the mesh and contacts
		vis.figure()
		vis.mesh(mesh)
		vis.normals(NormalCloud(np.hstack((normal1.reshape(-1, 1), normal2.reshape(-1, 1))), frame='test'),
			PointCloud(np.hstack((contact1.reshape(-1, 1), contact2.reshape(-1, 1))), frame='test'))
		# vis.pose(T_obj_gripper, alpha=0.05)
		vis.show()
		done_looking = True if raw_input("Execute grasp?") == "y" else False

		if done_looking and BAXTER_CONNECTED:
			repeat_grasp = True
			while repeat_grasp:
				open_gripper()
				
				T = contacts_to_baxter_hand_pose(contact1, contact2, pawn = False)
				# T = np.dot(tfs.euler_matrix(math.pi, 0, 0), T)
				T2 = np.dot(T_ar_object, T)
Esempio n. 24
0
di = DepthImage(image, frame=ci.frame)
pc = ci.deproject(di)

## Visualize the depth image
#vis2d.figure()
#vis2d.imshow(di)
#vis2d.show()

# Make and display a PCL type point cloud from the image
p = pcl.PointCloud(pc.data.T.astype(np.float32))

# Make a segmenter and segment the point cloud.
seg = p.make_segmenter()
seg.set_model_type(pcl.SACMODEL_PARALLEL_PLANE)
seg.set_method_type(pcl.SAC_RANSAC)
seg.set_distance_threshold(0.005)
indices, model = seg.segment()
print(model)

#pdb.set_trace()
vis3d.figure()
pc_plane = pc.data.T[indices]
pc_plane = pc_plane[np.where(pc_plane[::, 1] < 0.16)]

maxes = np.max(pc_plane, axis=0)
mins = np.min(pc_plane, axis=0)
print('maxes are :', maxes, '\nmins are : ', mins)

vis3d.points(pc_plane, color=(1, 0, 0))
vis3d.show()
Esempio n. 25
0
            T_cb_world.save(config['chessboard_tf'])
            
            # vis
            if config['vis_points']:
                _, depth_im, _ = sensor.frames()
                points_world = T_camera_world * ir_intrinsics.deproject(depth_im)
                true_robot_points_world = PointCloud(np.array([T.translation for T in robot_poses]).T,
                                                     frame=ir_intrinsics.frame)
                est_robot_points_world = T_camera_world * PointCloud(np.array(robot_points_camera).T,
                                                                     frame=ir_intrinsics.frame)
                mean_est_robot_point = np.mean(est_robot_points_world.data, axis=1).reshape(3,1)
                est_robot_points_world._data = est_robot_points_world._data - mean_est_robot_point + mean_true_robot_point
                fixed_robot_points_world = T_corrected_cb_world * est_robot_points_world
                mean_fixed_robot_point = np.mean(fixed_robot_points_world.data, axis=1).reshape(3,1)
                fixed_robot_points_world._data = fixed_robot_points_world._data - mean_fixed_robot_point + mean_true_robot_point
                vis3d.figure()
                vis3d.points(points_world, color=(0,1,0), subsample=10, random=True, scale=0.001)
                vis3d.points(true_robot_points_world, color=(0,0,1), scale=0.001)
                vis3d.points(fixed_robot_points_world, color=(1,1,0), scale=0.001)
                vis3d.points(est_robot_points_world, color=(1,0,0), scale=0.001)
                vis3d.pose(T_camera_world)
                vis3d.show()

        # save tranformation arrays based on setup
        output_dir = os.path.join(config['calib_dir'], sensor_frame)
        if not os.path.exists(output_dir):
            os.makedirs(output_dir)
        pose_filename = os.path.join(output_dir, '%s_to_world.tf' %(sensor_frame))
        T_camera_world.save(pose_filename)
        intr_filename = os.path.join(output_dir, '%s.intr' %(sensor_frame))
        ir_intrinsics.save(intr_filename)
    if "input_images" in policy_config["vis"] and policy_config["vis"][
            "input_images"]:
        vis.figure(size=(10, 10))
        num_plot = 3
        vis.subplot(1, num_plot, 1)
        vis.imshow(depth_im)
        vis.subplot(1, num_plot, 2)
        vis.imshow(segmask)
        vis.subplot(1, num_plot, 3)
        vis.imshow(obj_segmask)
        vis.show()

        from visualization import Visualizer3D as vis3d
        point_cloud = camera_intr.deproject(depth_im)
        vis3d.figure()
        vis3d.points(point_cloud,
                     subsample=3,
                     random=True,
                     color=(0, 0, 1),
                     scale=0.001)
        vis3d.pose(RigidTransform())
        vis3d.pose(T_camera_world.inverse())
        vis3d.show()

    # Create state.
    rgbd_im = RgbdImage.from_color_and_depth(color_im, depth_im)
    state = RgbdImageState(rgbd_im, camera_intr, segmask=segmask)

    # Init policy.
    policy_type = "cem"
Esempio n. 27
0
    output_filename = os.path.join(
        output_path, '{0}_to_world.tf'.format(T_world_obj.from_frame))
    print T_world_obj
    T_world_obj.save(output_filename)

    if config['vis'] and VIS_SUPPORTED:

        _, depth_im, _ = sensor.frames()
        pc_cam = ir_intrinsics.deproject(depth_im)
        pc_world = T_world_cam * pc_cam

        mesh_file = ObjFile(
            os.path.join(object_path, '{0}.obj'.format(args.object_name)))
        mesh = mesh_file.read()

        vis.figure(bgcolor=(0.7, 0.7, 0.7))
        vis.mesh(mesh, T_world_obj.as_frames('obj', 'world'), style='surface')
        vis.pose(T_world_obj, alpha=0.04, tube_radius=0.002, center_scale=0.01)
        vis.pose(RigidTransform(from_frame='origin'),
                 alpha=0.04,
                 tube_radius=0.002,
                 center_scale=0.01)
        vis.pose(T_world_cam, alpha=0.04, tube_radius=0.002, center_scale=0.01)
        vis.pose(T_world_cam * T_cb_cam.inverse(),
                 alpha=0.04,
                 tube_radius=0.002,
                 center_scale=0.01)
        vis.points(pc_world, subsample=20)
        vis.show()
    sensor.stop()
Esempio n. 28
0
    vertices = mesh.vertices
    triangles = mesh.triangles
    normals = mesh.normals

    print 'Num vertices:', len(vertices)
    print 'Num triangles:', len(triangles)
    print 'Num normals:', len(normals)

    # 1. Generate candidate pairs of contact points

    # 2. Check for force closure

    # 3. Convert each grasp to a hand pose
    contact1 = vertices[500]
    contact2 = vertices[2000]
    T_obj_gripper = contacts_to_baxter_hand_pose(contact1, contact2)
    print 'Translation', T_obj_gripper.translation
    print 'Rotation', T_obj_gripper.quaternion

    pose_msg = T_obj_gripper.pose_msg

    # 3d visualization to help debug
    vis.figure()
    vis.mesh(mesh)
    vis.points(Point(contact1, frame='test'))
    vis.points(Point(contact2, frame='test'))
    vis.pose(T_obj_gripper, alpha=0.05)
    vis.show()

    # 4. Execute on the actual robot
Esempio n. 29
0
def rescale_callback(viewer, key, rot, stf, adder):
    if stf.scale + adder <= 0:
        return
    stf.scale += adder
    vis.get_object(key).T_obj_world = rot.dot(stf)
Esempio n. 30
0
    def read_grasps(self,
                    object_name,
                    stable_pose_id,
                    max_grasps=10,
                    visualize=False):
        # load gripper
        gripper = RobotGripper.load(
            GRIPPER_NAME, gripper_dir='/home/silvia/dex-net/data/grippers')

        # open Dex-Net API
        dexnet_handle = DexNet()
        dexnet_handle.open_database(self.database_path)
        dexnet_handle.open_dataset(self.dataset_name)

        # read the most robust grasp
        sorted_grasps, metrics = dexnet_handle.dataset.sorted_grasps(
            object_name,
            stable_pose_id=('pose_' + str(stable_pose_id)),
            metric='force_closure',
            gripper=gripper.name)

        if (len(sorted_grasps)) == 0:
            print('no grasps for this stable pose')
            if visualize:
                stable_pose = dexnet_handle.dataset.stable_pose(
                    object_name,
                    stable_pose_id=('pose_' + str(stable_pose_id)))
                obj = dexnet_handle.dataset.graspable(object_name)
                vis.figure()
                T_table_world = RigidTransform(from_frame='table',
                                               to_frame='world')
                T_obj_world = Visualizer3D.mesh_stable_pose(
                    obj.mesh.trimesh,
                    stable_pose.T_obj_world,
                    T_table_world=T_table_world,
                    color=(0.5, 0.5, 0.5),
                    style='surface',
                    plot_table=True,
                    dim=0.15)
                vis.show(False)
            return None, None

        contact_points = map(get_contact_points, sorted_grasps)

        # ------------------------ VISUALIZATION CODE ------------------------
        if visualize:
            low = np.min(metrics)
            high = np.max(metrics)
            if low == high:
                q_to_c = lambda quality: CONFIG['quality_scale']
            else:
                q_to_c = lambda quality: CONFIG['quality_scale'] * (
                    quality - low) / (high - low)

            stable_pose = dexnet_handle.dataset.stable_pose(
                object_name, stable_pose_id=('pose_' + str(stable_pose_id)))
            obj = dexnet_handle.dataset.graspable(object_name)
            vis.figure()

            T_table_world = RigidTransform(from_frame='table',
                                           to_frame='world')
            T_obj_world = Visualizer3D.mesh_stable_pose(
                obj.mesh.trimesh,
                stable_pose.T_obj_world,
                T_table_world=T_table_world,
                color=(0.5, 0.5, 0.5),
                style='surface',
                plot_table=True,
                dim=0.15)
            for grasp, metric in zip(sorted_grasps, metrics):
                color = plt.get_cmap('hsv')((q_to_c(metric)))[:-1]
                vis.grasp(grasp,
                          T_obj_world=stable_pose.T_obj_world,
                          grasp_axis_color=color,
                          endpoint_color=color)
            vis.show(False)
        # ------------------------ END VISUALIZATION CODE ---------------------------

        # ------------------------ START COLLISION CHECKING ---------------------------
        #stable_pose = dexnet_handle.dataset.stable_pose(object_name, stable_pose_id=('pose_'+str(stable_pose_id)))
        #graspable = dexnet_handle.dataset.graspable(object_name)
        #cc = GraspCollisionChecker(gripper).set_graspable_object(graspable, stable_pose.T_obj_world)
        stable_pose_matrix = self.get_stable_pose(object_name, stable_pose_id)
        # CLOSE DATABASE
        dexnet_handle.close_database()
        gripper_poses = []

        for grasp in sorted_grasps[:max_grasps]:
            gripper_pose_matrix = np.eye(4, 4)
            center_world = np.matmul(
                stable_pose_matrix,
                [grasp.center[0], grasp.center[1], grasp.center[2], 1])
            axis_world = np.matmul(
                stable_pose_matrix,
                [grasp.axis_[0], grasp.axis_[1], grasp.axis_[2], 1])
            gripper_angle = math.atan2(axis_world[1], axis_world[0])
            gripper_pose_matrix[:3, 3] = center_world[:3]
            gripper_pose_matrix[0, 0] = math.cos(gripper_angle)
            gripper_pose_matrix[0, 1] = -math.sin(gripper_angle)
            gripper_pose_matrix[1, 0] = math.sin(gripper_angle)
            gripper_pose_matrix[1, 1] = math.cos(gripper_angle)
            #if visualize:
            #    vis.figure()
            #    vis.gripper_on_object(gripper, grasp, obj, stable_pose=stable_pose.T_obj_world)
            #    vis.show(False)
            gripper_poses.append(gripper_pose_matrix)

        return contact_points, gripper_poses