Пример #1
0
 def gripper_on_object(gripper,
                       grasp,
                       obj,
                       stable_pose=None,
                       T_table_world=RigidTransform(from_frame='table',
                                                    to_frame='world'),
                       gripper_color=(0.5, 0.5, 0.5),
                       object_color=(0.5, 0.5, 0.5),
                       style='surface',
                       plot_table=True,
                       table_dim=0.15):
     """ Visualize a gripper on an object.
     
     Parameters
     ----------
     gripper : :obj:`dexnet.grasping.RobotGripper`
         gripper to plot
     grasp : :obj:`dexnet.grasping.Grasp`
         grasp to plot the gripper in
     obj : :obj:`dexnet.grasping.GraspableObject3D`
         3D object to plot the gripper on
     stable_pose : :obj:`meshpy.StablePose` or :obj:`autolab_core.RigidTransform`
         stable pose of the object on a planar worksurface
     T_table_world : :obj:`autolab_core.RigidTransform`
         pose of table, specified as a transformation from mesh frame to world frame
     gripper_color : 3-tuple
         color of the gripper mesh
     object_color : 3-tuple
         color of the object mesh
     style : :obj:`str`
         color of the object mesh
     plot_table : bool
         whether or not to plot the table
     table_dim : float
         dimension of the table
     """
     if stable_pose is None:
         Visualizer3D.mesh(obj.mesh, color=object_color, style=style)
         T_obj_world = RigidTransform(from_frame='obj', to_frame='world')
     elif isinstance(stable_pose, StablePose):
         T_obj_world = Visualizer3D.mesh_stable_pose(
             obj.mesh,
             stable_pose,
             T_table_world=T_table_world,
             color=object_color,
             style=style,
             plot_table=plot_table,
             dim=table_dim)
     else:
         T_obj_world = Visualizer3D.mesh_table(obj.mesh,
                                               stable_pose,
                                               T_table_world=T_table_world,
                                               color=object_color,
                                               style=style,
                                               plot_table=plot_table,
                                               dim=table_dim)
     DexNetVisualizer3D.gripper(gripper,
                                grasp,
                                T_obj_world,
                                color=gripper_color)
Пример #2
0
    def gripper(gripper, grasp, T_obj_world, color=(0.5, 0.5, 0.5)):
        """ Plots a robotic gripper in a pose specified by a particular grasp object.

        Parameters
        ----------
        gripper : :obj:`dexnet.grasping.RobotGripper`
            the gripper to plot
        grasp : :obj:`dexnet.grasping.Grasp`
            the grasp to plot the gripper performing
        T_obj_world : :obj:`autolab_core.RigidTransform`
            the pose of the object that the grasp is referencing in world frame
        color : 3-tuple
            color of the gripper mesh
        """
        T_gripper_obj = grasp.gripper_pose(gripper)
        T_gripper_world = T_obj_world * T_gripper_obj
        T_mesh_world = T_gripper_world * gripper.T_mesh_gripper.inverse()
        T_mesh_world = T_mesh_world.as_frames('obj', 'world')
        Visualizer3D.mesh(gripper.mesh.trimesh,
                          T_mesh_world,
                          style='surface',
                          color=color)

        g1, g2 = grasp.endpoints
        center = grasp.center
        g1 = Point(np.array([0.1, 0.0, 0.0]), 'obj')
        g2 = Point(np.array([0.0, 0.0, 0.0]), 'obj')
        center = Point(np.array([0.0, 0.0, 0.0]), 'obj')

        g1_tf = T_mesh_world.apply(g1)
        g2_tf = T_mesh_world.apply(g2)
        center_tf = T_mesh_world.apply(center)
        grasp_axis_tf = np.array([g1_tf.data, g2_tf.data])

        points = [(x[0], x[1], x[2]) for x in grasp_axis_tf]
        Visualizer3D.plot3d(points, color=(0, 1, 0), tube_radius=0.004)

        g1 = Point(np.array([0.0, 0.0, 0.0]), 'obj')
        g2 = Point(np.array([0.0, 0.0, 0.1]), 'obj')
        center = Point(np.array([0.0, 0.0, 0.0]), 'obj')

        g1_tf = T_mesh_world.apply(g1)
        g2_tf = T_mesh_world.apply(g2)
        center_tf = T_mesh_world.apply(center)
        grasp_axis_tf = np.array([g1_tf.data, g2_tf.data])

        points = [(x[0], x[1], x[2]) for x in grasp_axis_tf]
        Visualizer3D.plot3d(points, color=(1, 0, 0), tube_radius=0.004)

        g1 = Point(np.array([0.0, 0.0, 0.0]), 'obj')
        g2 = Point(np.array([0.0, 0.1, 0.0]), 'obj')
        center = Point(np.array([0.0, 0.0, 0.0]), 'obj')

        g1_tf = T_mesh_world.apply(g1)
        g2_tf = T_mesh_world.apply(g2)
        center_tf = T_mesh_world.apply(center)
        grasp_axis_tf = np.array([g1_tf.data, g2_tf.data])

        points = [(x[0], x[1], x[2]) for x in grasp_axis_tf]
        Visualizer3D.plot3d(points, color=(0, 0, 1), tube_radius=0.004)
Пример #3
0
    def _round_and_show(self):
        for shape in self.shapes:
            si = self._shape_info[shape]
            xi = si.xi
            segset = zip(shape.segments, [x for x in range(shape.n_segments)],
                         xi)
            while len(segset) > 0:
                max_segtup = max(segset, key=lambda x: x[2])
                seg = max_segtup[0]
                index = max_segtup[1]
                xi[index] = 1

                new_segset = []
                for segtup in segset:
                    other_seg = segtup[0]
                    other_index = segtup[1]
                    if other_seg == seg:
                        continue
                    elif shape.segs_intersect(other_seg, seg):
                        xi[other_index] = 0.0
                    else:
                        new_segset.append(segtup)
                segset = new_segset

            Visualizer3D.figure(size=(400, 400))
            for i, segment in enumerate(shape.segments):
                if xi[i] == 1.0:
                    Visualizer3D.mesh(segment.mesh,
                                      style='surface',
                                      color=indexed_color(i))
            Visualizer3D.show()
Пример #4
0
    def vis(self, mesh, grasp_vertices, grasp_qualities):
        """
        Pass in any grasp and its associated grasp quality.  this function will plot
        each grasp on the object and plot the grasps as a bar between the points, with
        colored dots on the line endpoints representing the grasp quality associated
        with each grasp
        
        Parameters
        ----------
        mesh : :obj:`Trimesh`
        grasp_vertices : mx2x3 :obj:`numpy.ndarray`
            m grasps.  Each grasp containts two contact points.  Each contact point
            is a 3 dimensional vector, hence the shape mx2x3
        grasp_qualities : mx' :obj:`numpy.ndarray`
            vector of grasp qualities for each grasp
        """
        vis3d.mesh(mesh)

        dirs = normalize(grasp_vertices[:, 0] - grasp_vertices[:, 1], axis=1)
        midpoints = (grasp_vertices[:, 0] + grasp_vertices[:, 1]) / 2
        grasp_endpoints = np.zeros(grasp_vertices.shape)
        grasp_vertices[:, 0] = midpoints + dirs * MAX_HAND_DISTANCE / 2
        grasp_vertices[:, 1] = midpoints - dirs * MAX_HAND_DISTANCE / 2

        for grasp, quality in zip(grasp_vertices, grasp_qualities):
            color = [min(1, 2 * (1 - quality)), min(1, 2 * quality), 0, 1]
            if color != [1, 0, 0, 1]:
                good_grasp += 1
                # vis3d.plot3d(grasp, color=color, tube_radius=.0005)
            vis3d.plot3d(grasp, color=color, tube_radius=.0005)
        vis3d.show()
Пример #5
0
    def gripper_with_T_gripper_obj(gripper,
                                   grasp,
                                   T_gripper_obj,
                                   color=(0.5, 0.5, 0.5)):
        """ Plots a robotic gripper in a pose specified by a particular grasp object.

        Parameters
        ----------
        gripper : :obj:`dexnet.grasping.RobotGripper`
            the gripper to plot
        grasp : :obj:`dexnet.grasping.Grasp`
            the grasp to plot the gripper performing
        T_obj_world : :obj:`autolab_core.RigidTransform`
            the pose of the object that the grasp is referencing in world frame
        color : 3-tuple
            color of the gripper mesh
        """
        T_obj_world = RigidTransform(from_frame='obj', to_frame='world')
        T_gripper_world = T_obj_world * T_gripper_obj
        T_mesh_world = T_gripper_world * gripper.T_mesh_gripper.inverse()
        T_mesh_world = T_mesh_world.as_frames('obj', 'world')
        #   print 'T_mesh_world',T_mesh_world
        Visualizer3D.mesh(gripper.mesh.trimesh,
                          T_mesh_world,
                          style='surface',
                          color=color)
Пример #6
0
def view_segmentation(mesh, n_segs):
    seg_graph = SegmentGraph(FaceGraph(mesh))
    seg_graph.cut_to_k_segments(n_segs)
    seg_graph.reindex_segment_nodes()

    seg_graph.refine_all_edges(0.4)

    Visualizer3D.figure(size=(100, 100))
    for i, seg_node in enumerate(seg_graph.segment_nodes):
        segment = Segment(seg_node, mesh)
        Visualizer3D.mesh(segment.mesh,
                          style='surface',
                          color=indexed_color(i))
    Visualizer3D.show()

    seg_map = {}
    n_segs -= 1
    while n_segs > 2:
        n_segs -= 1
        seg_graph.cut_to_k_segments(n_segs)
        for seg_node in seg_graph.segment_nodes:
            if (frozenset(seg_node.segment_indices) not in seg_map):
                segment = Segment(seg_node, mesh)
                seg_map[frozenset(seg_node.segment_indices)] = segment

    for i, segment in enumerate(
            sorted(seg_map.values(), key=lambda x: -x.cut_cost / x.perimeter)):
        print segment.cut_cost / segment.perimeter
        Visualizer3D.mesh(
            segment.mesh,
            style='surface',
            color=indexed_color(i),
            T_mesh_world=RigidTransform(translation=[0.15, 0, 0]))
        Visualizer3D.show()
Пример #7
0
    def gripper(gripper, grasp, T_obj_world, color=(0.5, 0.5, 0.5), T_camera_world=None, point=None, point_color=None):
        """ Plots a robotic gripper in a pose specified by a particular grasp object.

        Parameters
        ----------
        gripper : :obj:`dexnet.grasping.RobotGripper`
            the gripper to plot
        grasp : :obj:`dexnet.grasping.Grasp`
            the grasp to plot the gripper performing
        T_obj_world : :obj:`autolab_core.RigidTransform`
            the pose of the object that the grasp is referencing in world frame
        color : 3-tuple
            color of the gripper mesh
        """
        T_gripper_obj = grasp.gripper_pose(gripper)
        T_gripper_world = T_obj_world * T_gripper_obj
        T_mesh_world = T_gripper_world * gripper.T_mesh_gripper.inverse()        
        T_mesh_world = T_mesh_world.as_frames('obj', 'world')
        Visualizer3D.mesh(gripper.mesh.trimesh, T_mesh_world=T_mesh_world, style='surface', color=color)
        Visualizer3D.pose(T_gripper_world)
        if T_camera_world is not None:
            Visualizer3D.pose(T_camera_world)
        if point is not None:
            world_points = np.dot(T_obj_world.matrix, point.T).T
            Visualizer3D.points(world_points[:, :3], scale=0.002, color=point_color)
Пример #8
0
def visualize_plan(mesh, T_world_obj, T_world_grasp):
    # visualize the plan in the world frame
    mesh.apply_transform(T_world_obj.matrix)

    o = np.array([0., 0., 0.])
    obj = apply_transform(o, T_world_obj)
    grasp = apply_transform(o, T_world_grasp)

    # base frame
    vis3d.points(o, color=(1, 0, 0), scale=0.005)
    vis3d.pose(RigidTransform(),
               alpha=0.03,
               tube_radius=0.002,
               center_scale=0.001)

    # object
    vis3d.mesh(mesh)
    vis3d.points(obj, color=(0, 0, 0), scale=0.005)
    vis3d.pose(T_world_obj, alpha=0.03, tube_radius=0.002, center_scale=0.001)

    # grasp
    vis3d.points(grasp, color=(0, 1, 1), scale=0.005)
    vis3d.pose(T_world_grasp,
               alpha=0.03,
               tube_radius=0.002,
               center_scale=0.001)

    vis3d.show()
Пример #9
0
def visualize_scene(mesh, T_world_ar, T_ar_cam, T_cam_obj):
    T_cam_cam = RigidTransform()
    T_obj_cam = T_cam_obj.inverse()
    T_cam_ar = T_ar_cam.inverse()

    Twc = np.matmul(T_world_ar.matrix, T_ar_cam.matrix)
    T_world_cam = RigidTransform(Twc[:3, :3], Twc[:3, 3])
    T_cam_world = T_world_cam.inverse()

    o = np.array([0., 0., 0.])
    centroid_cam = apply_transform(o, T_cam_obj)
    tag_cam = apply_transform(o, T_cam_ar)
    robot_cam = apply_transform(o, T_cam_world)

    # camera
    vis3d.points(o, color=(0, 1, 0), scale=0.01)
    vis3d.pose(T_cam_cam, alpha=0.05, tube_radius=0.005, center_scale=0.002)

    # object
    vis3d.mesh(mesh)
    vis3d.points(centroid_cam, color=(0, 0, 0), scale=0.01)
    vis3d.pose(T_cam_obj, alpha=0.05, tube_radius=0.005, center_scale=0.002)

    # AR tag
    vis3d.points(tag_cam, color=(1, 0, 1), scale=0.01)
    vis3d.pose(T_cam_ar, alpha=0.05, tube_radius=0.005, center_scale=0.002)
    vis3d.table(T_cam_ar, dim=0.074)

    # robot
    vis3d.points(robot_cam, color=(1, 0, 0), scale=0.01)
    vis3d.pose(T_cam_world, alpha=0.05, tube_radius=0.005, center_scale=0.002)

    vis3d.show()
Пример #10
0
    def visualize_alignment(self, T_obj_camera_est):
        mesh = self.salient_edge_set.mesh
        m_true = mesh.copy().apply_transform(self.T_obj_camera.matrix)
        m_est = mesh.copy().apply_transform(T_obj_camera_est.matrix)

        vis3d.figure()
        vis3d.mesh(m_true, color=(0.0, 1.0, 0.0))
        vis3d.mesh(m_est, color=(0.0, 0.0, 1.0))
        vis3d.show()
Пример #11
0
 def show(self):
     """Display the current segmentation with mayavi.
     """
     for i, segment in enumerate(self.segments):
         color = indexed_color(i)
         Visualizer3D.mesh(segment.mesh,
                           style='surface',
                           opacity=1.0,
                           color=color)
     Visualizer3D.show()
Пример #12
0
 def visualize(self):
     """Visualize the salient edges of the mesh.
     """
     vis3d.figure()
     for edge in self.salient_edges:
         vis3d.plot3d(self.mesh.vertices[edge],
                      color=(0.0, 1.0, 0.0),
                      tube_radius=0.0005)
     vis3d.mesh(self.mesh)
     vis3d.show()
Пример #13
0
    def vis(self, key):
        """Show all the models for a given Thing.

        Parameters
        ----------
        key : str
            The key for the target Thing.
        """
        for model in self[key].models:
            vis.figure()
            vis.mesh(model.mesh, style='surface')
            vis.show()
Пример #14
0
    def pairwise_joint_segmentation(self, shape1, shape2):
        A, B = self._create_AB(shape1, shape2)
        G, H = self._create_GH(shape1, shape2)
        C = self._create_C(shape1, shape2)

        pjs = PairwiseJointSeg([shape1, shape2], A, B, G, H, C)

        segmentations = pjs.segmentations
        for segmentation in segmentations:
            for i, segment in enumerate(segmentation):
                Visualizer3D.mesh(segment.mesh,
                                  style='surface',
                                  color=indexed_color(i))
            Visualizer3D.show()
Пример #15
0
def main():
    # initialize logging
    logging.getLogger().setLevel(31)

    parser = argparse.ArgumentParser(
        description='Annotate Thingiverse Dataset Models',
        epilog='Written by Matthew Matl (mmatl)')
    parser.add_argument('--config',
                        help='config filename',
                        default='cfg/tools/annotater.yaml')
    args = parser.parse_args()

    config_filename = args.config
    config = YamlConfig(config_filename)

    target_key = config['target_key']
    default_value = config['default_value']
    set_value = config['set_value']
    override = config['override']

    ds = ThingiverseDataset(config['dataset_dir'])

    for i, thing_id in enumerate(ds.keys):
        thing = None
        thing_metadata = ds.metadata(thing_id)

        for model_id in thing_metadata['models']:
            model_data = thing_metadata['models'][model_id]
            if override or target_key not in model_data['metadata']:
                if thing is None:
                    thing = ds[thing_id]
                model = thing[model_id]
                logging.log(
                    31, u"{} ({}): {} ({})".format(thing.name, thing.id,
                                                   model.name,
                                                   model.id).encode('utf-8'))
                model.metadata[target_key] = default_value
                vis.figure()
                vis.mesh(model.mesh, style='surface')
                vis.show(animate=True,
                         registered_keys={
                             'g': (good_label_callback,
                                   [model, target_key, set_value])
                         })

        if thing:
            ds.save(thing, only_metadata=True)
        logging.log(31, '{}/{} things...'.format(i, len(ds.keys)))
Пример #16
0
def visualize_normals(mesh, vertices, normals):
    scale = 0.01
    normals_scaled = normals * scale

    vis3d.pose(RigidTransform(),
               alpha=0.01,
               tube_radius=0.001,
               center_scale=0.002)
    vis3d.mesh(mesh, style='wireframe')
    vis3d.points(mesh.centroid, color=(0, 0, 0), scale=0.003)
    vis3d.points(vertices, color=(1, 0, 0), scale=0.001)

    for v, n in zip(vertices, normals_scaled):
        vis3d.plot3d([v, v + n], color=(1, 0, 0), tube_radius=0.0005)

    vis3d.show()
Пример #17
0
    def vis(self, mesh, grasp_vertices, grasp_qualities, grasp_normals):
        """
        Pass in any grasp and its associated grasp quality.  this function will plot
        each grasp on the object and plot the grasps as a bar between the points, with
        colored dots on the line endpoints representing the grasp quality associated
        with each grasp

        Parameters
        ----------
        mesh : :obj:`Trimesh`
        grasp_vertices : mx2x3 :obj:`numpy.ndarray`
            m grasps.  Each grasp containts two contact points.  Each contact point
            is a 3 dimensional vector, hence the shape mx2x3
        grasp_qualities : mx' :obj:`numpy.ndarray`
            vector of grasp qualities for each grasp
        """
        vis3d.mesh(mesh)

        middle_of_part = np.mean(np.mean(grasp_vertices, axis=1), axis=0)
        print(middle_of_part)
        vis3d.points(middle_of_part, scale=0.003)

        dirs = normalize(grasp_vertices[:, 0] - grasp_vertices[:, 1], axis=1)

        midpoints = (grasp_vertices[:, 0] + grasp_vertices[:, 1]) / 2
        grasp_endpoints = np.zeros(grasp_vertices.shape)
        grasp_endpoints[:, 0] = midpoints + dirs * MAX_HAND_DISTANCE / 2
        grasp_endpoints[:, 1] = midpoints - dirs * MAX_HAND_DISTANCE / 2

        n0 = np.zeros(grasp_endpoints.shape)
        n1 = np.zeros(grasp_endpoints.shape)

        normal_scale = 0.01
        n0[:, 0] = grasp_vertices[:, 0]
        n0[:, 1] = grasp_vertices[:, 0] + normal_scale * grasp_normals[:, 0]
        n1[:, 0] = grasp_vertices[:, 1]
        n1[:, 1] = grasp_vertices[:, 1] + normal_scale * grasp_normals[:, 1]

        for grasp, quality, normal0, normal1 in zip(grasp_endpoints,
                                                    grasp_qualities, n0, n1):
            color = [min(1, 2 * (1 - quality)), min(1, 2 * quality), 0, 1]
            vis3d.plot3d(grasp, color=color, tube_radius=.001)
            vis3d.plot3d(normal0, color=(0, 0, 0), tube_radius=.002)
            vis3d.plot3d(normal1, color=(0, 0, 0), tube_radius=.002)
        vis3d.show()
Пример #18
0
def visualize_grasps(mesh, vertices, metrics):
    vis3d.pose(RigidTransform(),
               alpha=0.01,
               tube_radius=0.001,
               center_scale=0.002)
    vis3d.mesh(mesh, style='wireframe')
    vis3d.points(mesh.centroid, color=(0, 0, 0), scale=0.003)

    min_score = float(np.min(metrics))
    max_score = float(np.max(metrics))
    metrics_normalized = (metrics.astype(float) - min_score) / (max_score -
                                                                min_score)

    for v, m in zip(vertices, metrics_normalized):
        vis3d.points(v, color=(1 - m, m, 0), scale=0.001)
        vis3d.plot3d(v, color=(1 - m, m, 0), tube_radius=0.0003)

    vis3d.show()
Пример #19
0
def visualize_gripper(mesh, T_obj_grasp, vertices):
    o = np.array([0., 0., 0.])
    grasp = apply_transform(o, T_obj_grasp)

    # object
    vis3d.mesh(mesh)
    vis3d.points(o, color=(0, 0, 0), scale=0.002)
    vis3d.pose(RigidTransform(),
               alpha=0.01,
               tube_radius=0.001,
               center_scale=0.001)

    # gripper
    vis3d.points(grasp, color=(1, 0, 0), scale=0.002)
    vis3d.points(vertices, color=(1, 0, 0), scale=0.002)
    vis3d.pose(T_obj_grasp, alpha=0.01, tube_radius=0.001, center_scale=0.001)

    vis3d.show()
Пример #20
0
def main():
    start_time = time.time()
    img_file = '/nfs/diskstation/projects/dex-net/placing/datasets/real/sample_ims_05_22/depth_ims_numpy/image_000001.npy'
    ci_file = '/nfs/diskstation/projects/dex-net/placing/datasets/real/sample_ims_05_22/camera_intrinsics.intr'
    mesh_file = 'demon_helmet.obj'

    indices, model, image, pc = largest_planar_surface(img_file, ci_file)
    mesh, best_pose, rt = find_stable_poses(mesh_file)
    shadow = find_shadow(mesh, best_pose, model[0:3])

    vis3d.figure()
    vis3d.points(pc, color=(1, 0, 0))
    vis3d.mesh(shadow, rt)
    vis3d.show()

    scores, split_size = score_cells(pc, indices, model, shadow, ci_file)
    ind = best_cell(scores)
    # print("Scores: \n" + str(scores))
    # print("\nBest cell = " + str(ind))

    print("--- %s seconds ---" % (time.time() - start_time))
Пример #21
0
import numpy as np
import trimesh
import operator

from autolab_core import RigidTransform
from visualization import Visualizer2D as vis2d, Visualizer3D as vis3d

# Load the mesh and compute stable poses
bc = trimesh.load('demon_helmet.obj')
stable_poses, probs = bc.compute_stable_poses()
assert len(stable_poses) == len(probs)

# Find the most probable stable pose
i, value = max(enumerate(probs), key=operator.itemgetter(1))
best_pose = stable_poses[i]
print("Most probable pose:")
print(best_pose)
print("Probability = " + str(probs[i]))

# Visualize the mesh in the most probable stable state
rotation = np.asarray(best_pose[:3, :3])
translation = np.asarray(best_pose[:3, 3])
rt = RigidTransform(rotation, translation, from_frame='obj', to_frame='world')
vis3d.mesh(bc, rt)
vis3d.show()

# TODO: Project the mesh onto the plane defined by the largest flat surface in the bin
Пример #22
0
    def vis_transform(self, mesh, G_transform, vertices):
        """
        Pass in any grasp and its associated grasp quality.  this function will plot
        each grasp on the object and plot the grasps as a bar between the points, with
        colored dots on the line endpoints representing the grasp quality associated
        with each grasp

        Parameters
        ----------
        mesh : :obj:`Trimesh`
        grasp_vertices : mx2x3 :obj:`numpy.ndarray`
            m grasps.  Each grasp containts two contact points.  Each contact point
            is a 3 dimensional vector, hence the shape mx2x3
        grasp_qualities : mx' :obj:`numpy.ndarray`
            vector of grasp qualities for each grasp
        """
        L = MAX_HAND_DISTANCE / 2  # gripper half width

        # transform from gripper to contact 1
        G_gc1 = np.array([[1, 0, 0, 0], [0, 0, 1, -1 * L], [0, -1, 0, 0],
                          [0, 0, 0, 1]])

        # transform from gripper to contact 2
        G_gc2 = np.array([[1, 0, 0, 0], [0, 0, -1, L], [0, 1, 0, 0],
                          [0, 0, 0, 1]])

        G = G_transform.matrix

        print('G')
        print(G)

        G_oc1 = np.matmul(G, G_gc1)
        G_oc2 = np.matmul(G, G_gc2)

        scale = 0.01
        o = np.array([0, 0, 0, 1])
        x = np.array([scale, 0, 0, 1])
        y = np.array([0, scale, 0, 1])
        z = np.array([0, 0, scale, 1])

        ot = np.matmul(G, o)
        xt = np.matmul(G, x)
        yt = np.matmul(G, y)
        zt = np.matmul(G, z)

        o1 = np.matmul(G_oc1, o)
        x1 = np.matmul(G_oc1, x)
        y1 = np.matmul(G_oc1, y)
        z1 = np.matmul(G_oc1, z)

        o2 = np.matmul(G_oc2, o)
        x2 = np.matmul(G_oc2, x)
        y2 = np.matmul(G_oc2, y)
        z2 = np.matmul(G_oc2, z)

        vis3d.mesh(mesh, style='wireframe')

        #Plot origin axes
        x_axis = np.array([o, x])[:, :3]
        y_axis = np.array([o, y])[:, :3]
        z_axis = np.array([o, z])[:, :3]

        x_axis_t = np.array([ot, xt])[:, :3]
        y_axis_t = np.array([ot, yt])[:, :3]
        z_axis_t = np.array([ot, zt])[:, :3]

        x_axis_1 = np.array([o1, x1])[:, :3]
        y_axis_1 = np.array([o1, y1])[:, :3]
        z_axis_1 = np.array([o1, z1])[:, :3]

        x_axis_2 = np.array([o2, x2])[:, :3]
        y_axis_2 = np.array([o2, y2])[:, :3]
        z_axis_2 = np.array([o2, z2])[:, :3]

        vis3d.plot3d(x_axis, color=(0.5, 0, 0), tube_radius=0.001)
        vis3d.plot3d(y_axis, color=(0, 0.5, 0), tube_radius=0.001)
        vis3d.plot3d(z_axis, color=(0, 0, 0.5), tube_radius=0.001)

        vis3d.plot3d(x_axis_t, color=(255, 0, 0), tube_radius=0.001)
        vis3d.plot3d(y_axis_t, color=(0, 255, 0), tube_radius=0.001)
        vis3d.plot3d(z_axis_t, color=(0, 0, 255), tube_radius=0.001)

        vis3d.plot3d(x_axis_1, color=(255, 0, 0), tube_radius=0.001)
        vis3d.plot3d(y_axis_1, color=(0, 255, 0), tube_radius=0.001)
        vis3d.plot3d(z_axis_1, color=(0, 0, 255), tube_radius=0.001)

        vis3d.plot3d(x_axis_2, color=(255, 0, 0), tube_radius=0.001)
        vis3d.plot3d(y_axis_2, color=(0, 255, 0), tube_radius=0.001)
        vis3d.plot3d(z_axis_2, color=(0, 0, 255), tube_radius=0.001)

        vis3d.points(vertices[0], scale=0.003)
        vis3d.points(vertices[1], scale=0.003)

        vis3d.show()
Пример #23
0
    def compute_approach_direction(self, mesh, grasp_vertices, grasp_quality,
                                   grasp_normals):

        ## initalizing stuff ##
        visualize = True
        nb_directions_to_test = 6
        normal_scale = 0.01
        plane_normal = normalize(grasp_vertices[0] - grasp_vertices[1])

        midpoint = (grasp_vertices[0] + grasp_vertices[1]) / 2

        ## generating a certain number of approach directions ##
        theta = np.pi / nb_directions_to_test
        rot_mat = rotation_3d(-plane_normal, theta)

        horizontal_direction = normalize(
            np.cross(plane_normal, np.array([0, 0, 1])))
        directions_to_test = [horizontal_direction]  #these are vectors
        approach_directions = [
            np.array(
                [midpoint, midpoint + horizontal_direction * normal_scale])
        ]  #these are two points for visualization

        for i in range(nb_directions_to_test - 1):
            directions_to_test.append(
                normalize(np.matmul(rot_mat, directions_to_test[-1])))
            approach_directions.append(
                np.array([
                    midpoint, midpoint + directions_to_test[-1] * normal_scale
                ]))

        ## computing the palm position for each approach direction ##
        palm_positions = []
        for i in range(nb_directions_to_test):
            palm_positions.append(midpoint +
                                  finger_length * directions_to_test[i])

        if visualize:
            ## plotting the whole mesh ##
            vis3d.mesh(mesh, style='wireframe')

            ## computing and plotting midpoint and gripper position ##
            dirs = (grasp_vertices[0] - grasp_vertices[1]
                    ) / np.linalg.norm(grasp_vertices[0] - grasp_vertices[1])
            grasp_endpoints = np.zeros(grasp_vertices.shape)
            grasp_endpoints[0] = midpoint + dirs * MAX_HAND_DISTANCE / 2
            grasp_endpoints[1] = midpoint - dirs * MAX_HAND_DISTANCE / 2

            color = [
                min(1, 2 * (1 - grasp_quality)),
                min(1, 2 * grasp_quality), 0, 1
            ]
            vis3d.plot3d(grasp_endpoints, color=color, tube_radius=.001)
            vis3d.points(midpoint, scale=0.003)

            ## computing and plotting normals at contact points ##
            n0 = np.zeros(grasp_endpoints.shape)
            n1 = np.zeros(grasp_endpoints.shape)
            n0[0] = grasp_vertices[0]
            n0[1] = grasp_vertices[0] + normal_scale * grasp_normals[0]
            n1[0] = grasp_vertices[1]
            n1[1] = grasp_vertices[1] + normal_scale * grasp_normals[1]
            vis3d.plot3d(n0, color=(0, 0, 0), tube_radius=.002)
            vis3d.plot3d(n1, color=(0, 0, 0), tube_radius=.002)

            ## plotting normals the palm positions for each potential approach direction ##
            for i in range(nb_directions_to_test):
                vis3d.points(palm_positions[i], scale=0.003)

            vis3d.show()

        directions_to_test = [
            directions_to_test[3], directions_to_test[2],
            directions_to_test[4], directions_to_test[1],
            directions_to_test[5], directions_to_test[0]
        ]
        palm_positions = [
            palm_positions[3], palm_positions[2], palm_positions[4],
            palm_positions[1], palm_positions[5], palm_positions[0]
        ]

        ## checking if some approach direction is valid ##
        for i in range(nb_directions_to_test):
            if len(
                    trimesh.intersections.mesh_plane(mesh,
                                                     directions_to_test[i],
                                                     palm_positions[i])) == 0:
                # it means the palm won't bump with part
                return directions_to_test[i]

        # it means all approach directions will bump with part
        return -1
Пример #24
0
    vertices = mesh.vertices
    triangles = mesh.triangles
    normals = mesh.normals

    print 'Num vertices:', len(vertices)
    print 'Num triangles:', len(triangles)
    print 'Num normals:', len(normals)

    # 1. Generate candidate pairs of contact points

    # 2. Check for force closure

    # 3. Convert each grasp to a hand pose
    contact1 = vertices[500]
    contact2 = vertices[2000]
    T_obj_gripper = contacts_to_baxter_hand_pose(contact1, contact2)
    print 'Translation', T_obj_gripper.translation
    print 'Rotation', T_obj_gripper.quaternion

    pose_msg = T_obj_gripper.pose_msg

    # 3d visualization to help debug
    vis.figure()
    vis.mesh(mesh)
    vis.points(Point(contact1, frame='test'))
    vis.points(Point(contact2, frame='test'))
    vis.pose(T_obj_gripper, alpha=0.05)
    vis.show()

    # 4. Execute on the actual robot
    #T_obj_table = RigidTransform(rotation=[0.92275663,  0.13768089,  0.35600924, -0.05311874],
    #                             from_frame='obj', to_frame='table')
    T_obj_table = RigidTransform(
        rotation=[-0.1335021, 0.87671711, 0.41438141, 0.20452958],
        from_frame='obj',
        to_frame='table')

    stable_pose = mesh.resting_pose(T_obj_table)
    #print stable_pose.r

    table_dim = 0.3
    T_obj_table_plot = mesh.get_T_surface_obj(T_obj_table)
    T_obj_table_plot.translation[0] += 0.1
    vis.figure()
    vis.mesh(mesh, T_obj_table_plot, color=(1, 0, 0), style='wireframe')
    vis.points(Point(mesh.center_of_mass, 'obj'),
               T_obj_table_plot,
               color=(1, 0, 1),
               scale=0.01)
    vis.pose(T_obj_table_plot, alpha=0.1)
    vis.mesh_stable_pose(mesh,
                         stable_pose,
                         dim=table_dim,
                         color=(0, 1, 0),
                         style='surface')
    vis.pose(stable_pose.T_obj_table, alpha=0.1)
    vis.show()
    exit(0)

    # compute stable poses
Пример #26
0
    def vis(self, mesh, grasp_vertices, grasp_qualities, top_n_grasp_vertices,
            approach_directions, rs, TG):
        """
        Pass in any grasp and its associated grasp quality.  this function will plot
        each grasp on the object and plot the grasps as a bar between the points, with
        colored dots on the line endpoints representing the grasp quality associated
        with each grasp

        Parameters
        ----------
        mesh : :obj:`Trimesh`
        grasp_vertices : mx2x3 :obj:`numpy.ndarray`
            m grasps.  Each grasp containts two contact points.  Each contact point
            is a 3 dimensional vector, hence the shape mx2x3
        grasp_qualities : mx' :obj:`numpy.ndarray`
            vector of grasp qualities for each grasp
        """
        vis3d.mesh(mesh)

        dirs = normalize(grasp_vertices[:, 0] - grasp_vertices[:, 1], axis=1)
        midpoints = (grasp_vertices[:, 0] + grasp_vertices[:, 1]) / 2
        grasp_endpoints = np.zeros(grasp_vertices.shape)
        grasp_vertices[:, 0] = midpoints + dirs * MAX_HAND_DISTANCE / 2
        grasp_vertices[:, 1] = midpoints - dirs * MAX_HAND_DISTANCE / 2

        for i, (grasp,
                quality) in enumerate(zip(grasp_vertices, grasp_qualities)):
            color = [min(1, 2 * (1 - quality)), min(1, 2 * quality), 0, 1]
            vis3d.plot3d(grasp, color=color, tube_radius=.001)

        blue = [0, 0, 255]
        light_blue = [50, 50, 200]

        for i, (grasp, approach_direction) in enumerate(
                zip(top_n_grasp_vertices, approach_directions)):
            midpoint = np.mean(grasp, axis=0)
            approach_direction = np.asarray(
                [midpoint, midpoint - 0.1 * approach_direction])
            if (i == 0):
                vis3d.plot3d(approach_direction, color=blue, tube_radius=.005)
                vis3d.points(grasp, color=blue, scale=.005)
            else:
                vis3d.plot3d(approach_direction,
                             color=light_blue,
                             tube_radius=.001)
                vis3d.points(grasp, color=light_blue, scale=.001)

        midpoint = np.mean(top_n_grasp_vertices[0], axis=0)
        x_purp = [204, 0, 204]
        x_axis = np.asarray([midpoint, midpoint + 0.1 * rs[:, 0]])
        y_cyan = [0, 204, 204]
        y_axis = np.asarray([midpoint, midpoint + 0.1 * rs[:, 1]])
        z_black = [0, 0, 0]
        z_axis = np.asarray([midpoint, midpoint + 0.3 * rs[:, 2]])

        vis3d.plot3d(x_axis, color=x_purp, tube_radius=.005)
        vis3d.plot3d(y_axis, color=y_cyan, tube_radius=.005)
        vis3d.plot3d(z_axis, color=z_black, tube_radius=.005)

        origin = np.asarray([0, 0, 0])
        x_axis = np.asarray([origin, 0.2 * np.array([1, 0, 0])])
        y_axis = np.asarray([origin, 0.2 * np.array([0, 1, 0])])
        z_axis = np.asarray([origin, 0.2 * np.array([0, 0, 1])])

        vis3d.plot3d(x_axis, color=x_purp, tube_radius=.005)
        vis3d.plot3d(y_axis, color=y_cyan, tube_radius=.005)
        vis3d.plot3d(z_axis, color=z_black, tube_radius=.005)

        # eucl_orien = np.asarray(tfs.euler_from_quaternion(TG.quaternion))
        intermediate_pos = TG.position - np.reshape(
            np.matmul(TG.rotation, np.array([[0], [0], [0.2]])), (1, 3))
        print(intermediate_pos, np.shape(intermediate_pos))
        red = [255, 0, 0]
        vis3d.points(intermediate_pos, color=red, scale=.01)
        vis3d.points(TG.position, color=red, scale=.01)

        vis3d.show()
Пример #27
0
    output_filename = os.path.join(
        output_path, '{0}_to_world.tf'.format(T_world_obj.from_frame))
    print T_world_obj
    T_world_obj.save(output_filename)

    if config['vis'] and VIS_SUPPORTED:

        _, depth_im, _ = sensor.frames()
        pc_cam = ir_intrinsics.deproject(depth_im)
        pc_world = T_world_cam * pc_cam

        mesh_file = ObjFile(
            os.path.join(object_path, '{0}.obj'.format(args.object_name)))
        mesh = mesh_file.read()

        vis.figure(bgcolor=(0.7, 0.7, 0.7))
        vis.mesh(mesh, T_world_obj.as_frames('obj', 'world'), style='surface')
        vis.pose(T_world_obj, alpha=0.04, tube_radius=0.002, center_scale=0.01)
        vis.pose(RigidTransform(from_frame='origin'),
                 alpha=0.04,
                 tube_radius=0.002,
                 center_scale=0.01)
        vis.pose(T_world_cam, alpha=0.04, tube_radius=0.002, center_scale=0.01)
        vis.pose(T_world_cam * T_cb_cam.inverse(),
                 alpha=0.04,
                 tube_radius=0.002,
                 center_scale=0.01)
        vis.points(pc_world, subsample=20)
        vis.show()
    sensor.stop()
Пример #28
0
def main():
    # initialize logging
    logging.getLogger().setLevel(31)

    # parse args
    parser = argparse.ArgumentParser(
        description='Annotate Thingiverse Dataset Models',
        epilog='Written by Matthew Matl (mmatl)')
    parser.add_argument('--config',
                        help='config filename',
                        default='cfg/tools/rescaler.yaml')
    args = parser.parse_args()

    # read config
    config_filename = args.config
    config = YamlConfig(config_filename)

    # get gripper mesh
    gripper_filename = config['gripper_filename']
    gripper_mesh = trimesh.load_mesh(gripper_filename)

    # get metadata information
    identifier_key = config['identifier_key']
    identifier_value = config['identifier_value']
    scale_key = config['scale_key']
    default_scale = config['default_scale']
    override = config['override']

    ds = ThingiverseDataset(config['dataset_dir'])

    for i, thing_id in enumerate(ds.keys):
        thing = None
        thing_metadata = ds.metadata(thing_id)

        changed_model_keys = []

        for model_id in thing_metadata['models']:
            model_data = thing_metadata['models'][model_id]

            # If the identifier isn't in the model's metadata, skip it
            if identifier_key not in model_data['metadata'] or model_data[
                    'metadata'][identifier_key] != identifier_value:
                continue

            # If we're overriding or the scale key hasn't been set, modify the model
            if override or scale_key not in model_data['metadata']:

                # Load the model
                if thing is None:
                    thing = ds[thing_id]
                model = thing[model_id]
                logging.log(
                    31, u"{} ({}): {} ({})".format(thing.name, thing.id,
                                                   model.name,
                                                   model.id).encode('utf-8'))
                changed_model_keys.append(model.id)

                # Rescale back to original dimensions if overriding
                if scale_key in model.metadata:
                    model.mesh.apply_scale(1.0 / model.metadata[scale_key])

                model.metadata[scale_key] = default_scale

                # Visualize the model, registering the grow/shrink callbacks
                stf = SimilarityTransform(from_frame='world', to_frame='world')
                rot = RigidTransform(from_frame='world', to_frame='world')

                registered_keys = {
                    'j': (rescale_callback, ['model', rot, stf, 0.1]),
                    'k': (rescale_callback, ['model', rot, stf, -0.1]),
                    'u': (rescale_callback, ['model', rot, stf, 1.0]),
                    'i': (rescale_callback, ['model', rot, stf, -1.0]),
                    'h': (rotate_callback, ['model', rot, stf])
                }
                vis.figure()
                vis.mesh(gripper_mesh,
                         T_mesh_world=RigidTransform(translation=(0, 0, -0.08),
                                                     from_frame='obj',
                                                     to_frame='world'),
                         style='surface',
                         color=(0.3, 0.3, 0.3),
                         name='gripper')
                vis.mesh(model.mesh, style='surface', name='model')
                vis.show(animate=True, registered_keys=registered_keys)
                # Transform the model and update its metadata
                model.mesh.apply_transform(stf.matrix)
                model.metadata[scale_key] = stf.scale

        if thing:
            ds.save(thing, only_metadata=False, model_keys=changed_model_keys)
        logging.log(31, '{}/{} things...'.format(i, len(ds.keys)))
    output_filename = os.path.join(
        output_path, "{0}_to_world.tf".format(T_world_obj.from_frame))
    logging.info(T_world_obj)
    T_world_obj.save(output_filename)

    if config["vis"] and VIS_SUPPORTED:

        _, depth_im, _ = sensor.frames()
        pc_cam = ir_intrinsics.deproject(depth_im)
        pc_world = T_world_cam * pc_cam

        mesh = trimesh.load(
            os.path.join(object_path, "{0}.obj".format(args.object_name)))

        vis.figure(bgcolor=(0.7, 0.7, 0.7))
        vis.mesh(mesh, T_world_obj.as_frames("obj", "world"), style="surface")
        vis.pose(T_world_obj, alpha=0.04, tube_radius=0.002, center_scale=0.01)
        vis.pose(
            RigidTransform(from_frame="origin"),
            alpha=0.04,
            tube_radius=0.002,
            center_scale=0.01,
        )
        vis.pose(T_world_cam, alpha=0.04, tube_radius=0.002, center_scale=0.01)
        vis.pose(
            T_world_cam * T_cb_cam.inverse(),
            alpha=0.04,
            tube_radius=0.002,
            center_scale=0.01,
        )
        vis.points(pc_world, subsample=20)
Пример #30
0
    of = ObjFile(MESH_FILENAME)
    mesh = of.read()
    ar_tag = lookup_tag(TAG)
    # find the transformation from the object coordinates to world coordinates... somehow

    grasp_indices, best_metric_indices = sorted_contacts(
        vertices, normals, T_ar_object)

    for indices in best_metric_indices[0:5]:
        a = grasp_indices[indices][0]
        b = grasp_indices[indices][1]
        normal1, normal2 = normals[a], normals[b]
        contact1, contact2 = vertices[a], vertices[b]
        # visualize the mesh and contacts
        vis.figure()
        vis.mesh(mesh)
        vis.normals(
            NormalCloud(np.hstack(
                (normal1.reshape(-1, 1), normal2.reshape(-1, 1))),
                        frame='test'),
            PointCloud(np.hstack(
                (contact1.reshape(-1, 1), contact2.reshape(-1, 1))),
                       frame='test'))
        # vis.pose(T_obj_gripper, alpha=0.05)
        vis.show()
        if BAXTER_CONNECTED:
            repeat = True
            while repeat:
                # come in from the top...
                T_obj_gripper = contacts_to_baxter_hand_pose(
                    contact1, contact2, np.array([0, 0, -1]))
Пример #31
0
def visualize_vertices(mesh, vertices):
    vis3d.mesh(mesh, style='wireframe')
    vis3d.points(mesh.centroid, color=(0, 0, 0), scale=0.003)
    vis3d.points(vertices, color=(1, 0, 0), scale=0.001)
    vis3d.show()