Ejemplo n.º 1
0
    def _round_and_show(self):
        for shape in self.shapes:
            si = self._shape_info[shape]
            xi = si.xi
            segset = zip(shape.segments, [x for x in range(shape.n_segments)],
                         xi)
            while len(segset) > 0:
                max_segtup = max(segset, key=lambda x: x[2])
                seg = max_segtup[0]
                index = max_segtup[1]
                xi[index] = 1

                new_segset = []
                for segtup in segset:
                    other_seg = segtup[0]
                    other_index = segtup[1]
                    if other_seg == seg:
                        continue
                    elif shape.segs_intersect(other_seg, seg):
                        xi[other_index] = 0.0
                    else:
                        new_segset.append(segtup)
                segset = new_segset

            Visualizer3D.figure(size=(400, 400))
            for i, segment in enumerate(shape.segments):
                if xi[i] == 1.0:
                    Visualizer3D.mesh(segment.mesh,
                                      style='surface',
                                      color=indexed_color(i))
            Visualizer3D.show()
Ejemplo n.º 2
0
def main(args):
    # set logging
    logging.getLogger().setLevel(logging.INFO)
    rospy.init_node("ensenso_reader", anonymous=True)

    num_frames = 10
    sensor = RgbdSensorFactory("ensenso", cfg={"frame": "ensenso"})
    sensor.start()

    total_time = 0
    for i in range(num_frames):
        if i > 0:
            start_time = time.time()

        _, depth_im, _ = sensor.frames()

        if i > 0:
            total_time += time.time() - start_time
            print("Frame %d" % (i))
            print("Avg FPS: %.5f" % (float(i) / total_time))

    depth_im = sensor.median_depth_img(num_img=5)
    point_cloud = sensor.ir_intrinsics.deproject(depth_im)
    point_cloud.remove_zero_points()

    sensor.stop()

    vis2d.figure()
    vis2d.imshow(depth_im)
    vis2d.title("Ensenso - Raw")
    vis2d.show()

    vis3d.figure()
    vis3d.points(point_cloud, random=True, subsample=10, scale=0.0025)
    vis3d.show()
Ejemplo n.º 3
0
def visualize_scene(mesh, T_world_ar, T_ar_cam, T_cam_obj):
    T_cam_cam = RigidTransform()
    T_obj_cam = T_cam_obj.inverse()
    T_cam_ar = T_ar_cam.inverse()

    Twc = np.matmul(T_world_ar.matrix, T_ar_cam.matrix)
    T_world_cam = RigidTransform(Twc[:3, :3], Twc[:3, 3])
    T_cam_world = T_world_cam.inverse()

    o = np.array([0., 0., 0.])
    centroid_cam = apply_transform(o, T_cam_obj)
    tag_cam = apply_transform(o, T_cam_ar)
    robot_cam = apply_transform(o, T_cam_world)

    # camera
    vis3d.points(o, color=(0, 1, 0), scale=0.01)
    vis3d.pose(T_cam_cam, alpha=0.05, tube_radius=0.005, center_scale=0.002)

    # object
    vis3d.mesh(mesh)
    vis3d.points(centroid_cam, color=(0, 0, 0), scale=0.01)
    vis3d.pose(T_cam_obj, alpha=0.05, tube_radius=0.005, center_scale=0.002)

    # AR tag
    vis3d.points(tag_cam, color=(1, 0, 1), scale=0.01)
    vis3d.pose(T_cam_ar, alpha=0.05, tube_radius=0.005, center_scale=0.002)
    vis3d.table(T_cam_ar, dim=0.074)

    # robot
    vis3d.points(robot_cam, color=(1, 0, 0), scale=0.01)
    vis3d.pose(T_cam_world, alpha=0.05, tube_radius=0.005, center_scale=0.002)

    vis3d.show()
Ejemplo n.º 4
0
def visualize_plan(mesh, T_world_obj, T_world_grasp):
    # visualize the plan in the world frame
    mesh.apply_transform(T_world_obj.matrix)

    o = np.array([0., 0., 0.])
    obj = apply_transform(o, T_world_obj)
    grasp = apply_transform(o, T_world_grasp)

    # base frame
    vis3d.points(o, color=(1, 0, 0), scale=0.005)
    vis3d.pose(RigidTransform(),
               alpha=0.03,
               tube_radius=0.002,
               center_scale=0.001)

    # object
    vis3d.mesh(mesh)
    vis3d.points(obj, color=(0, 0, 0), scale=0.005)
    vis3d.pose(T_world_obj, alpha=0.03, tube_radius=0.002, center_scale=0.001)

    # grasp
    vis3d.points(grasp, color=(0, 1, 1), scale=0.005)
    vis3d.pose(T_world_grasp,
               alpha=0.03,
               tube_radius=0.002,
               center_scale=0.001)

    vis3d.show()
Ejemplo n.º 5
0
    def vis(self, mesh, grasp_vertices, grasp_qualities):
        """
        Pass in any grasp and its associated grasp quality.  this function will plot
        each grasp on the object and plot the grasps as a bar between the points, with
        colored dots on the line endpoints representing the grasp quality associated
        with each grasp
        
        Parameters
        ----------
        mesh : :obj:`Trimesh`
        grasp_vertices : mx2x3 :obj:`numpy.ndarray`
            m grasps.  Each grasp containts two contact points.  Each contact point
            is a 3 dimensional vector, hence the shape mx2x3
        grasp_qualities : mx' :obj:`numpy.ndarray`
            vector of grasp qualities for each grasp
        """
        vis3d.mesh(mesh)

        dirs = normalize(grasp_vertices[:, 0] - grasp_vertices[:, 1], axis=1)
        midpoints = (grasp_vertices[:, 0] + grasp_vertices[:, 1]) / 2
        grasp_endpoints = np.zeros(grasp_vertices.shape)
        grasp_vertices[:, 0] = midpoints + dirs * MAX_HAND_DISTANCE / 2
        grasp_vertices[:, 1] = midpoints - dirs * MAX_HAND_DISTANCE / 2

        for grasp, quality in zip(grasp_vertices, grasp_qualities):
            color = [min(1, 2 * (1 - quality)), min(1, 2 * quality), 0, 1]
            if color != [1, 0, 0, 1]:
                good_grasp += 1
                # vis3d.plot3d(grasp, color=color, tube_radius=.0005)
            vis3d.plot3d(grasp, color=color, tube_radius=.0005)
        vis3d.show()
Ejemplo n.º 6
0
def view_segmentation(mesh, n_segs):
    seg_graph = SegmentGraph(FaceGraph(mesh))
    seg_graph.cut_to_k_segments(n_segs)
    seg_graph.reindex_segment_nodes()

    seg_graph.refine_all_edges(0.4)

    Visualizer3D.figure(size=(100, 100))
    for i, seg_node in enumerate(seg_graph.segment_nodes):
        segment = Segment(seg_node, mesh)
        Visualizer3D.mesh(segment.mesh,
                          style='surface',
                          color=indexed_color(i))
    Visualizer3D.show()

    seg_map = {}
    n_segs -= 1
    while n_segs > 2:
        n_segs -= 1
        seg_graph.cut_to_k_segments(n_segs)
        for seg_node in seg_graph.segment_nodes:
            if (frozenset(seg_node.segment_indices) not in seg_map):
                segment = Segment(seg_node, mesh)
                seg_map[frozenset(seg_node.segment_indices)] = segment

    for i, segment in enumerate(
            sorted(seg_map.values(), key=lambda x: -x.cut_cost / x.perimeter)):
        print segment.cut_cost / segment.perimeter
        Visualizer3D.mesh(
            segment.mesh,
            style='surface',
            color=indexed_color(i),
            T_mesh_world=RigidTransform(translation=[0.15, 0, 0]))
        Visualizer3D.show()
Ejemplo n.º 7
0
def show_points(points, color=(0,1,0), scale=0.005, frame_size=0.2, frame_radius=0.02):
    vis.figure(bgcolor=(1,1,1), size=(500,500))
    vis.points(np.array(points), color=color, scale=scale)

    vis.plot3d(np.array(([0, 0, 0], [frame_size, 0, 0])).astype(np.float32), color=(1,0,0), tube_radius=frame_radius)
    vis.plot3d(np.array(([0, 0, 0], [0, frame_size, 0])).astype(np.float32), color=(0,1,0), tube_radius=frame_radius)
    vis.plot3d(np.array(([0, 0, 0], [0, 0, frame_size])).astype(np.float32), color=(0,0,1), tube_radius=frame_radius)

    vis.show()
Ejemplo n.º 8
0
    def visualize_alignment(self, T_obj_camera_est):
        mesh = self.salient_edge_set.mesh
        m_true = mesh.copy().apply_transform(self.T_obj_camera.matrix)
        m_est = mesh.copy().apply_transform(T_obj_camera_est.matrix)

        vis3d.figure()
        vis3d.mesh(m_true, color=(0.0, 1.0, 0.0))
        vis3d.mesh(m_est, color=(0.0, 0.0, 1.0))
        vis3d.show()
Ejemplo n.º 9
0
 def show(self):
     """Display the current segmentation with mayavi.
     """
     for i, segment in enumerate(self.segments):
         color = indexed_color(i)
         Visualizer3D.mesh(segment.mesh,
                           style='surface',
                           opacity=1.0,
                           color=color)
     Visualizer3D.show()
Ejemplo n.º 10
0
 def visualize(self):
     """Visualize the salient edges of the mesh.
     """
     vis3d.figure()
     for edge in self.salient_edges:
         vis3d.plot3d(self.mesh.vertices[edge],
                      color=(0.0, 1.0, 0.0),
                      tube_radius=0.0005)
     vis3d.mesh(self.mesh)
     vis3d.show()
Ejemplo n.º 11
0
def fine_grid_search(pc, indices, model, shadow, splits):
    length, width, height = shadow.extents
    split_size = max(length, width)
    pc_data, ind = get_pc_data(pc, indices)
    maxes = np.max(pc_data, axis=0)
    mins = np.min(pc_data, axis=0)
    bin_base = mins[2]
    plane_normal = model[0:3]
    #splits = 3
    step_size = split_size / splits
    
    plane_data = get_plane_data(pc, indices)
    plane_pc = PointCloud(plane_data.T, pc.frame)
    plane_pc = cp.inverse().apply(plane_pc)
    di = ci.project_to_image(plane_pc)
    bi = di.to_binary()
    bi = bi.inverse()

    scene = Scene()
    camera = VirtualCamera(ci, cp)
    scene.camera = camera
    shadow_obj = SceneObject(shadow)
    scene.add_object('shadow', shadow_obj)
    orig_tow = shadow_obj.T_obj_world

    numx = (int(np.round((maxes[0]-mins[0])/split_size)) - 1) * splits + 1
    numy = (int(np.round((maxes[1]-mins[1])/split_size)) - 1) * splits + 1
    scores = np.zeros((numx, numy))
    for i in range(numx):
        x = mins[0] + i*step_size
        for j in range(numy):
            y = mins[1] + j*step_size

            for tow in transforms(pc, pc_data, shadow, x, y, x+split_size, y+split_size, 8, orig_tow):
                shadow_obj.T_obj_world = tow
                scores[i][j] = under_shadow(scene, bi)
                shadow_obj.T_obj_world = orig_tow

    print("\nScores: \n" + str(scores))
    best = best_cell(scores)
    print("\nBest Cell: " + str(best) + ", with score = " + str(scores[best[0]][best[1]]))
    #-------
    # Visualize best placement
    vis3d.figure()
    x = mins[0] + best[0]*step_size
    y = mins[1] + best[1]*step_size
    cell_indices = np.where((x < pc_data[:,0]) & (pc_data[:,0] < x+split_size) & (y < pc_data[:,1]) & (pc_data[:,1] < y+split_size))[0]
    points = pc_data[cell_indices]
    rest = pc_data[np.setdiff1d(np.arange(len(pc_data)), cell_indices)]
    vis3d.points(points, color=(0,1,1))
    vis3d.points(rest, color=(1,0,1))
    vis3d.show()
    #--------
    return best, scene
Ejemplo n.º 12
0
    def vis(self, key):
        """Show all the models for a given Thing.

        Parameters
        ----------
        key : str
            The key for the target Thing.
        """
        for model in self[key].models:
            vis.figure()
            vis.mesh(model.mesh, style='surface')
            vis.show()
Ejemplo n.º 13
0
    def pairwise_joint_segmentation(self, shape1, shape2):
        A, B = self._create_AB(shape1, shape2)
        G, H = self._create_GH(shape1, shape2)
        C = self._create_C(shape1, shape2)

        pjs = PairwiseJointSeg([shape1, shape2], A, B, G, H, C)

        segmentations = pjs.segmentations
        for segmentation in segmentations:
            for i, segment in enumerate(segmentation):
                Visualizer3D.mesh(segment.mesh,
                                  style='surface',
                                  color=indexed_color(i))
            Visualizer3D.show()
Ejemplo n.º 14
0
def main():
    # initialize logging
    logging.getLogger().setLevel(31)

    parser = argparse.ArgumentParser(
        description='Annotate Thingiverse Dataset Models',
        epilog='Written by Matthew Matl (mmatl)')
    parser.add_argument('--config',
                        help='config filename',
                        default='cfg/tools/annotater.yaml')
    args = parser.parse_args()

    config_filename = args.config
    config = YamlConfig(config_filename)

    target_key = config['target_key']
    default_value = config['default_value']
    set_value = config['set_value']
    override = config['override']

    ds = ThingiverseDataset(config['dataset_dir'])

    for i, thing_id in enumerate(ds.keys):
        thing = None
        thing_metadata = ds.metadata(thing_id)

        for model_id in thing_metadata['models']:
            model_data = thing_metadata['models'][model_id]
            if override or target_key not in model_data['metadata']:
                if thing is None:
                    thing = ds[thing_id]
                model = thing[model_id]
                logging.log(
                    31, u"{} ({}): {} ({})".format(thing.name, thing.id,
                                                   model.name,
                                                   model.id).encode('utf-8'))
                model.metadata[target_key] = default_value
                vis.figure()
                vis.mesh(model.mesh, style='surface')
                vis.show(animate=True,
                         registered_keys={
                             'g': (good_label_callback,
                                   [model, target_key, set_value])
                         })

        if thing:
            ds.save(thing, only_metadata=True)
        logging.log(31, '{}/{} things...'.format(i, len(ds.keys)))
Ejemplo n.º 15
0
def visualize_normals(mesh, vertices, normals):
    scale = 0.01
    normals_scaled = normals * scale

    vis3d.pose(RigidTransform(),
               alpha=0.01,
               tube_radius=0.001,
               center_scale=0.002)
    vis3d.mesh(mesh, style='wireframe')
    vis3d.points(mesh.centroid, color=(0, 0, 0), scale=0.003)
    vis3d.points(vertices, color=(1, 0, 0), scale=0.001)

    for v, n in zip(vertices, normals_scaled):
        vis3d.plot3d([v, v + n], color=(1, 0, 0), tube_radius=0.0005)

    vis3d.show()
Ejemplo n.º 16
0
def main():
    logging.getLogger().setLevel(logging.INFO)

    # parse args
    parser = argparse.ArgumentParser(description='Register a webcam to the Photoneo PhoXi')
    parser.add_argument('--config_filename', type=str, default='cfg/tools/colorize_phoxi.yaml', help='filename of a YAML configuration for registration')
    args = parser.parse_args()
    config_filename = args.config_filename
    config = YamlConfig(config_filename)

    sensor_data = config['sensors']
    phoxi_config = sensor_data['phoxi']
    phoxi_config['frame'] = 'phoxi'

    # Initialize ROS node
    rospy.init_node('colorize_phoxi', anonymous=True)
    logging.getLogger().addHandler(rl.RosStreamHandler())

    # Get PhoXi sensor set up
    phoxi = RgbdSensorFactory.sensor(phoxi_config['type'], phoxi_config)
    phoxi.start()

    # Capture PhoXi and webcam images
    phoxi_color_im, phoxi_depth_im, _ = phoxi.frames()

    # vis2d.figure()
    # vis2d.subplot(121)
    # vis2d.imshow(phoxi_color_im)
    # vis2d.subplot(122)
    # vis2d.imshow(phoxi_depth_im)
    # vis2d.show()

    phoxi_pc = phoxi.ir_intrinsics.deproject(phoxi_depth_im)
    colors = phoxi_color_im.data.reshape((phoxi_color_im.shape[0] * phoxi_color_im.shape[1], phoxi_color_im.shape[2])) / 255.0
    vis3d.figure()
    vis3d.points(phoxi_pc.data.T[::3], color=colors[::3], scale=0.001)
    vis3d.show()

    # Export to PLY file
    vertices = phoxi.ir_intrinsics.deproject(phoxi_depth_im).data.T
    colors = phoxi_color_im.data.reshape(phoxi_color_im.data.shape[0] * phoxi_color_im.data.shape[1], phoxi_color_im.data.shape[2])
    f = open('pcloud.ply', 'w')
    f.write('ply\nformat ascii 1.0\nelement vertex {}\nproperty float x\nproperty float y\nproperty float z\nproperty uchar red\n'.format(len(vertices)) +
            'property uchar green\nproperty uchar blue\nend_header\n')
    for v, c in zip(vertices,colors):
        f.write('{} {} {} {} {} {}\n'.format(v[0], v[1], v[2], c[0], c[1], c[2]))
    f.close()
Ejemplo n.º 17
0
def grid_search(pc, indices, model, shadow, img_file):
    length, width, height = shadow.extents
    split_size = max(length, width)
    pc_data, ind = get_pc_data(pc, indices)
    maxes = np.max(pc_data, axis=0)
    mins = np.min(pc_data, axis=0)
    bin_base = mins[2]
    plane_normal = model[0:3]

    scores = np.zeros((int(np.round((maxes[0] - mins[0]) / split_size)),
                       int(np.round((maxes[1] - mins[1]) / split_size))))
    for i in range(int(np.round((maxes[0] - mins[0]) / split_size))):
        x = mins[0] + i * split_size
        for j in range(int(np.round((maxes[1] - mins[1]) / split_size))):
            y = mins[1] + j * split_size

            #binarized_overlap_image(pc, x, y, x+split_size, y+split_size, shadow, plane_normal, indices, model)

            for sh in rotations(shadow, 8):
                #overlap_size = binarized_overlap_image(pc, x, y, x+split_size, y+split_size, sh, plane_normal, indices, model)
                #scores[i][j] = -1*overlap_size
                scene = Scene()
                camera = VirtualCamera(ci, cp)
                scene.camera = camera
                scores[i][j] = under_shadow(pc, pc_data, indices, model, sh, x,
                                            x + split_size, y, y + split_size,
                                            scene)

    print("\nScores: \n" + str(scores))
    best = best_cell(scores)
    print("\nBest Cell: " + str(best) + ", with score = " +
          str(scores[best[0]][best[1]]))
    #-------
    # Visualize best placement
    vis3d.figure()
    x = mins[0] + best[0] * split_size
    y = mins[1] + best[1] * split_size
    cell_indices = np.where((x < pc_data[:, 0])
                            & (pc_data[:, 0] < x + split_size)
                            & (y < pc_data[:, 1])
                            & (pc_data[:, 1] < y + split_size))[0]
    points = pc_data[cell_indices]
    rest = pc_data[np.setdiff1d(np.arange(len(pc_data)), cell_indices)]
    vis3d.points(points, color=(0, 1, 1))
    vis3d.points(rest, color=(1, 0, 1))
    vis3d.show()
Ejemplo n.º 18
0
    def vis(self, mesh, grasp_vertices, grasp_qualities, grasp_normals):
        """
        Pass in any grasp and its associated grasp quality.  this function will plot
        each grasp on the object and plot the grasps as a bar between the points, with
        colored dots on the line endpoints representing the grasp quality associated
        with each grasp

        Parameters
        ----------
        mesh : :obj:`Trimesh`
        grasp_vertices : mx2x3 :obj:`numpy.ndarray`
            m grasps.  Each grasp containts two contact points.  Each contact point
            is a 3 dimensional vector, hence the shape mx2x3
        grasp_qualities : mx' :obj:`numpy.ndarray`
            vector of grasp qualities for each grasp
        """
        vis3d.mesh(mesh)

        middle_of_part = np.mean(np.mean(grasp_vertices, axis=1), axis=0)
        print(middle_of_part)
        vis3d.points(middle_of_part, scale=0.003)

        dirs = normalize(grasp_vertices[:, 0] - grasp_vertices[:, 1], axis=1)

        midpoints = (grasp_vertices[:, 0] + grasp_vertices[:, 1]) / 2
        grasp_endpoints = np.zeros(grasp_vertices.shape)
        grasp_endpoints[:, 0] = midpoints + dirs * MAX_HAND_DISTANCE / 2
        grasp_endpoints[:, 1] = midpoints - dirs * MAX_HAND_DISTANCE / 2

        n0 = np.zeros(grasp_endpoints.shape)
        n1 = np.zeros(grasp_endpoints.shape)

        normal_scale = 0.01
        n0[:, 0] = grasp_vertices[:, 0]
        n0[:, 1] = grasp_vertices[:, 0] + normal_scale * grasp_normals[:, 0]
        n1[:, 0] = grasp_vertices[:, 1]
        n1[:, 1] = grasp_vertices[:, 1] + normal_scale * grasp_normals[:, 1]

        for grasp, quality, normal0, normal1 in zip(grasp_endpoints,
                                                    grasp_qualities, n0, n1):
            color = [min(1, 2 * (1 - quality)), min(1, 2 * quality), 0, 1]
            vis3d.plot3d(grasp, color=color, tube_radius=.001)
            vis3d.plot3d(normal0, color=(0, 0, 0), tube_radius=.002)
            vis3d.plot3d(normal1, color=(0, 0, 0), tube_radius=.002)
        vis3d.show()
Ejemplo n.º 19
0
def visualize_gripper(mesh, T_obj_grasp, vertices):
    o = np.array([0., 0., 0.])
    grasp = apply_transform(o, T_obj_grasp)

    # object
    vis3d.mesh(mesh)
    vis3d.points(o, color=(0, 0, 0), scale=0.002)
    vis3d.pose(RigidTransform(),
               alpha=0.01,
               tube_radius=0.001,
               center_scale=0.001)

    # gripper
    vis3d.points(grasp, color=(1, 0, 0), scale=0.002)
    vis3d.points(vertices, color=(1, 0, 0), scale=0.002)
    vis3d.pose(T_obj_grasp, alpha=0.01, tube_radius=0.001, center_scale=0.001)

    vis3d.show()
Ejemplo n.º 20
0
def visualize_grasps(mesh, vertices, metrics):
    vis3d.pose(RigidTransform(),
               alpha=0.01,
               tube_radius=0.001,
               center_scale=0.002)
    vis3d.mesh(mesh, style='wireframe')
    vis3d.points(mesh.centroid, color=(0, 0, 0), scale=0.003)

    min_score = float(np.min(metrics))
    max_score = float(np.max(metrics))
    metrics_normalized = (metrics.astype(float) - min_score) / (max_score -
                                                                min_score)

    for v, m in zip(vertices, metrics_normalized):
        vis3d.points(v, color=(1 - m, m, 0), scale=0.001)
        vis3d.plot3d(v, color=(1 - m, m, 0), tube_radius=0.0003)

    vis3d.show()
Ejemplo n.º 21
0
def visualize_test():
    I = RigidTransform()
    g_ab = RigidTransform()
    g_ab.translation = np.array([0.05, 0, 0])
    g_ab.rotation = np.array([[0, -1, 0], [1, 0, 0], [0, 0, 1]])

    q_a = np.array([0., 0., 0.])

    p_b = np.array([0., 0., 0.])
    p_a = apply_transform(p_b, g_ab)

    print('g_ab = \n{}'.format(g_ab.matrix))

    vis3d.pose(I, alpha=0.01, tube_radius=0.001, center_scale=0.001)
    vis3d.points(q_a, color=(1, 0, 0), scale=0.005)

    vis3d.pose(g_ab, alpha=0.01, tube_radius=0.001, center_scale=0.001)
    vis3d.points(p_a, color=(0, 1, 0), scale=0.005)
    vis3d.show()
Ejemplo n.º 22
0
def main():
    start_time = time.time()
    img_file = '/nfs/diskstation/projects/dex-net/placing/datasets/real/sample_ims_05_22/depth_ims_numpy/image_000001.npy'
    ci_file = '/nfs/diskstation/projects/dex-net/placing/datasets/real/sample_ims_05_22/camera_intrinsics.intr'
    mesh_file = 'demon_helmet.obj'

    indices, model, image, pc = largest_planar_surface(img_file, ci_file)
    mesh, best_pose, rt = find_stable_poses(mesh_file)
    shadow = find_shadow(mesh, best_pose, model[0:3])

    vis3d.figure()
    vis3d.points(pc, color=(1, 0, 0))
    vis3d.mesh(shadow, rt)
    vis3d.show()

    scores, split_size = score_cells(pc, indices, model, shadow, ci_file)
    ind = best_cell(scores)
    # print("Scores: \n" + str(scores))
    # print("\nBest cell = " + str(ind))

    print("--- %s seconds ---" % (time.time() - start_time))
Ejemplo n.º 23
0
def main(args):
    # set logging
    logging.getLogger().setLevel(logging.INFO)
    rospy.init_node('ensenso_reader', anonymous=True)

    num_frames = 10
    #sensor = PhoXiSensor(frame='phoxi',
    #                     size='small')
    sensor = EnsensoSensor(frame='ensenso')
    sensor.start()

    total_time = 0
    for i in range(num_frames):
        if i > 0:
            start_time = time.time()

        _, depth_im, _ = sensor.frames()

        if i > 0:
            total_time += time.time() - start_time
            print('Frame %d' % (i))
            print('Avg FPS: %.5f' % (float(i) / total_time))

    depth_im = sensor.median_depth_img(num_img=5)
    point_cloud = sensor.ir_intrinsics.deproject(depth_im)
    point_cloud.remove_zero_points()

    sensor.stop()

    vis2d.figure()
    vis2d.imshow(depth_im)
    vis2d.title('PhoXi - Raw')
    vis2d.show()

    vis3d.figure()
    vis3d.points(point_cloud, random=True, subsample=10, scale=0.0025)
    vis3d.show()
Ejemplo n.º 24
0
    grasp_indices, best_metric_indices = sorted_contacts(
        vertices, normals, T_ar_object)

    for indices in best_metric_indices[0:5]:
        a = grasp_indices[indices][0]
        b = grasp_indices[indices][1]
        normal1, normal2 = normals[a], normals[b]
        contact1, contact2 = vertices[a], vertices[b]
        # visualize the mesh and contacts
        vis.figure()
        vis.mesh(mesh)
        vis.normals(
            NormalCloud(np.hstack(
                (normal1.reshape(-1, 1), normal2.reshape(-1, 1))),
                        frame='test'),
            PointCloud(np.hstack(
                (contact1.reshape(-1, 1), contact2.reshape(-1, 1))),
                       frame='test'))
        # vis.pose(T_obj_gripper, alpha=0.05)
        vis.show()
        if BAXTER_CONNECTED:
            repeat = True
            while repeat:
                # come in from the top...
                T_obj_gripper = contacts_to_baxter_hand_pose(
                    contact1, contact2, np.array([0, 0, -1]))
                execute_grasp(T_obj_gripper, T_ar_object, ar_tag)
                repeat = bool(raw_input("repeat?"))

    exit()
Ejemplo n.º 25
0
    def vis(self, mesh, grasp_vertices, grasp_qualities, top_n_grasp_vertices,
            approach_directions, rs, TG):
        """
        Pass in any grasp and its associated grasp quality.  this function will plot
        each grasp on the object and plot the grasps as a bar between the points, with
        colored dots on the line endpoints representing the grasp quality associated
        with each grasp

        Parameters
        ----------
        mesh : :obj:`Trimesh`
        grasp_vertices : mx2x3 :obj:`numpy.ndarray`
            m grasps.  Each grasp containts two contact points.  Each contact point
            is a 3 dimensional vector, hence the shape mx2x3
        grasp_qualities : mx' :obj:`numpy.ndarray`
            vector of grasp qualities for each grasp
        """
        vis3d.mesh(mesh)

        dirs = normalize(grasp_vertices[:, 0] - grasp_vertices[:, 1], axis=1)
        midpoints = (grasp_vertices[:, 0] + grasp_vertices[:, 1]) / 2
        grasp_endpoints = np.zeros(grasp_vertices.shape)
        grasp_vertices[:, 0] = midpoints + dirs * MAX_HAND_DISTANCE / 2
        grasp_vertices[:, 1] = midpoints - dirs * MAX_HAND_DISTANCE / 2

        for i, (grasp,
                quality) in enumerate(zip(grasp_vertices, grasp_qualities)):
            color = [min(1, 2 * (1 - quality)), min(1, 2 * quality), 0, 1]
            vis3d.plot3d(grasp, color=color, tube_radius=.001)

        blue = [0, 0, 255]
        light_blue = [50, 50, 200]

        for i, (grasp, approach_direction) in enumerate(
                zip(top_n_grasp_vertices, approach_directions)):
            midpoint = np.mean(grasp, axis=0)
            approach_direction = np.asarray(
                [midpoint, midpoint - 0.1 * approach_direction])
            if (i == 0):
                vis3d.plot3d(approach_direction, color=blue, tube_radius=.005)
                vis3d.points(grasp, color=blue, scale=.005)
            else:
                vis3d.plot3d(approach_direction,
                             color=light_blue,
                             tube_radius=.001)
                vis3d.points(grasp, color=light_blue, scale=.001)

        midpoint = np.mean(top_n_grasp_vertices[0], axis=0)
        x_purp = [204, 0, 204]
        x_axis = np.asarray([midpoint, midpoint + 0.1 * rs[:, 0]])
        y_cyan = [0, 204, 204]
        y_axis = np.asarray([midpoint, midpoint + 0.1 * rs[:, 1]])
        z_black = [0, 0, 0]
        z_axis = np.asarray([midpoint, midpoint + 0.3 * rs[:, 2]])

        vis3d.plot3d(x_axis, color=x_purp, tube_radius=.005)
        vis3d.plot3d(y_axis, color=y_cyan, tube_radius=.005)
        vis3d.plot3d(z_axis, color=z_black, tube_radius=.005)

        origin = np.asarray([0, 0, 0])
        x_axis = np.asarray([origin, 0.2 * np.array([1, 0, 0])])
        y_axis = np.asarray([origin, 0.2 * np.array([0, 1, 0])])
        z_axis = np.asarray([origin, 0.2 * np.array([0, 0, 1])])

        vis3d.plot3d(x_axis, color=x_purp, tube_radius=.005)
        vis3d.plot3d(y_axis, color=y_cyan, tube_radius=.005)
        vis3d.plot3d(z_axis, color=z_black, tube_radius=.005)

        # eucl_orien = np.asarray(tfs.euler_from_quaternion(TG.quaternion))
        intermediate_pos = TG.position - np.reshape(
            np.matmul(TG.rotation, np.array([[0], [0], [0.2]])), (1, 3))
        print(intermediate_pos, np.shape(intermediate_pos))
        red = [255, 0, 0]
        vis3d.points(intermediate_pos, color=red, scale=.01)
        vis3d.points(TG.position, color=red, scale=.01)

        vis3d.show()
Ejemplo n.º 26
0
def main():
    # initialize logging
    logging.getLogger().setLevel(31)

    # parse args
    parser = argparse.ArgumentParser(
        description='Annotate Thingiverse Dataset Models',
        epilog='Written by Matthew Matl (mmatl)')
    parser.add_argument('--config',
                        help='config filename',
                        default='cfg/tools/rescaler.yaml')
    args = parser.parse_args()

    # read config
    config_filename = args.config
    config = YamlConfig(config_filename)

    # get gripper mesh
    gripper_filename = config['gripper_filename']
    gripper_mesh = trimesh.load_mesh(gripper_filename)

    # get metadata information
    identifier_key = config['identifier_key']
    identifier_value = config['identifier_value']
    scale_key = config['scale_key']
    default_scale = config['default_scale']
    override = config['override']

    ds = ThingiverseDataset(config['dataset_dir'])

    for i, thing_id in enumerate(ds.keys):
        thing = None
        thing_metadata = ds.metadata(thing_id)

        changed_model_keys = []

        for model_id in thing_metadata['models']:
            model_data = thing_metadata['models'][model_id]

            # If the identifier isn't in the model's metadata, skip it
            if identifier_key not in model_data['metadata'] or model_data[
                    'metadata'][identifier_key] != identifier_value:
                continue

            # If we're overriding or the scale key hasn't been set, modify the model
            if override or scale_key not in model_data['metadata']:

                # Load the model
                if thing is None:
                    thing = ds[thing_id]
                model = thing[model_id]
                logging.log(
                    31, u"{} ({}): {} ({})".format(thing.name, thing.id,
                                                   model.name,
                                                   model.id).encode('utf-8'))
                changed_model_keys.append(model.id)

                # Rescale back to original dimensions if overriding
                if scale_key in model.metadata:
                    model.mesh.apply_scale(1.0 / model.metadata[scale_key])

                model.metadata[scale_key] = default_scale

                # Visualize the model, registering the grow/shrink callbacks
                stf = SimilarityTransform(from_frame='world', to_frame='world')
                rot = RigidTransform(from_frame='world', to_frame='world')

                registered_keys = {
                    'j': (rescale_callback, ['model', rot, stf, 0.1]),
                    'k': (rescale_callback, ['model', rot, stf, -0.1]),
                    'u': (rescale_callback, ['model', rot, stf, 1.0]),
                    'i': (rescale_callback, ['model', rot, stf, -1.0]),
                    'h': (rotate_callback, ['model', rot, stf])
                }
                vis.figure()
                vis.mesh(gripper_mesh,
                         T_mesh_world=RigidTransform(translation=(0, 0, -0.08),
                                                     from_frame='obj',
                                                     to_frame='world'),
                         style='surface',
                         color=(0.3, 0.3, 0.3),
                         name='gripper')
                vis.mesh(model.mesh, style='surface', name='model')
                vis.show(animate=True, registered_keys=registered_keys)
                # Transform the model and update its metadata
                model.mesh.apply_transform(stf.matrix)
                model.metadata[scale_key] = stf.scale

        if thing:
            ds.save(thing, only_metadata=False, model_keys=changed_model_keys)
        logging.log(31, '{}/{} things...'.format(i, len(ds.keys)))
Ejemplo n.º 27
0
    logging.info('Visualizing poses')
    _, depth_im, _ = sensor.frames()
    points_world = T_camera_world * intr.deproject(depth_im)

    if cfg['vis_detect']:
        vis3d.figure()
        vis3d.pose(RigidTransform())
        vis3d.points(subsample(points_world.data.T, 0.01),
                     color=(0, 1, 0),
                     scale=0.002)
        vis3d.pose(T_ready_world, length=0.05)
        vis3d.pose(T_camera_world, length=0.1)
        vis3d.pose(T_tag_world)
        vis3d.pose(T_grasp_world)
        vis3d.pose(T_lift_world)
        vis3d.show()

    #const_rotation=np.array([[1,0,0],[0,-1,0],[0,0,-1]])
    #test = RigidTransform(rotation=const_rotation,translation=T_tag_world.translation, from_frame='franka_tool', to_frame='world')
    #import pdb; pdb.set_trace()

    rotation = T_tag_world.rotation
    rotation[0:2, :] = -1 * rotation[0:2, :]

#    test = RigidTransform(rotation=rotation,translation=T_tag_world.translation, from_frame='franka_tool', to_frame='world')
#    fa.goto_pose_with_cartesian_control(test)
#    fa.close_gripper()

# if not args.no_grasp:
#     logging.info('Commanding robot')
#     fa.goto_pose_with_cartesian_control(T_lift_world)
Ejemplo n.º 28
0
    def vis_transform(self, mesh, G_transform, vertices):
        """
        Pass in any grasp and its associated grasp quality.  this function will plot
        each grasp on the object and plot the grasps as a bar between the points, with
        colored dots on the line endpoints representing the grasp quality associated
        with each grasp

        Parameters
        ----------
        mesh : :obj:`Trimesh`
        grasp_vertices : mx2x3 :obj:`numpy.ndarray`
            m grasps.  Each grasp containts two contact points.  Each contact point
            is a 3 dimensional vector, hence the shape mx2x3
        grasp_qualities : mx' :obj:`numpy.ndarray`
            vector of grasp qualities for each grasp
        """
        L = MAX_HAND_DISTANCE / 2  # gripper half width

        # transform from gripper to contact 1
        G_gc1 = np.array([[1, 0, 0, 0], [0, 0, 1, -1 * L], [0, -1, 0, 0],
                          [0, 0, 0, 1]])

        # transform from gripper to contact 2
        G_gc2 = np.array([[1, 0, 0, 0], [0, 0, -1, L], [0, 1, 0, 0],
                          [0, 0, 0, 1]])

        G = G_transform.matrix

        print('G')
        print(G)

        G_oc1 = np.matmul(G, G_gc1)
        G_oc2 = np.matmul(G, G_gc2)

        scale = 0.01
        o = np.array([0, 0, 0, 1])
        x = np.array([scale, 0, 0, 1])
        y = np.array([0, scale, 0, 1])
        z = np.array([0, 0, scale, 1])

        ot = np.matmul(G, o)
        xt = np.matmul(G, x)
        yt = np.matmul(G, y)
        zt = np.matmul(G, z)

        o1 = np.matmul(G_oc1, o)
        x1 = np.matmul(G_oc1, x)
        y1 = np.matmul(G_oc1, y)
        z1 = np.matmul(G_oc1, z)

        o2 = np.matmul(G_oc2, o)
        x2 = np.matmul(G_oc2, x)
        y2 = np.matmul(G_oc2, y)
        z2 = np.matmul(G_oc2, z)

        vis3d.mesh(mesh, style='wireframe')

        #Plot origin axes
        x_axis = np.array([o, x])[:, :3]
        y_axis = np.array([o, y])[:, :3]
        z_axis = np.array([o, z])[:, :3]

        x_axis_t = np.array([ot, xt])[:, :3]
        y_axis_t = np.array([ot, yt])[:, :3]
        z_axis_t = np.array([ot, zt])[:, :3]

        x_axis_1 = np.array([o1, x1])[:, :3]
        y_axis_1 = np.array([o1, y1])[:, :3]
        z_axis_1 = np.array([o1, z1])[:, :3]

        x_axis_2 = np.array([o2, x2])[:, :3]
        y_axis_2 = np.array([o2, y2])[:, :3]
        z_axis_2 = np.array([o2, z2])[:, :3]

        vis3d.plot3d(x_axis, color=(0.5, 0, 0), tube_radius=0.001)
        vis3d.plot3d(y_axis, color=(0, 0.5, 0), tube_radius=0.001)
        vis3d.plot3d(z_axis, color=(0, 0, 0.5), tube_radius=0.001)

        vis3d.plot3d(x_axis_t, color=(255, 0, 0), tube_radius=0.001)
        vis3d.plot3d(y_axis_t, color=(0, 255, 0), tube_radius=0.001)
        vis3d.plot3d(z_axis_t, color=(0, 0, 255), tube_radius=0.001)

        vis3d.plot3d(x_axis_1, color=(255, 0, 0), tube_radius=0.001)
        vis3d.plot3d(y_axis_1, color=(0, 255, 0), tube_radius=0.001)
        vis3d.plot3d(z_axis_1, color=(0, 0, 255), tube_radius=0.001)

        vis3d.plot3d(x_axis_2, color=(255, 0, 0), tube_radius=0.001)
        vis3d.plot3d(y_axis_2, color=(0, 255, 0), tube_radius=0.001)
        vis3d.plot3d(z_axis_2, color=(0, 0, 255), tube_radius=0.001)

        vis3d.points(vertices[0], scale=0.003)
        vis3d.points(vertices[1], scale=0.003)

        vis3d.show()
Ejemplo n.º 29
0
    def compute_approach_direction(self, mesh, grasp_vertices, grasp_quality,
                                   grasp_normals):

        ## initalizing stuff ##
        visualize = True
        nb_directions_to_test = 6
        normal_scale = 0.01
        plane_normal = normalize(grasp_vertices[0] - grasp_vertices[1])

        midpoint = (grasp_vertices[0] + grasp_vertices[1]) / 2

        ## generating a certain number of approach directions ##
        theta = np.pi / nb_directions_to_test
        rot_mat = rotation_3d(-plane_normal, theta)

        horizontal_direction = normalize(
            np.cross(plane_normal, np.array([0, 0, 1])))
        directions_to_test = [horizontal_direction]  #these are vectors
        approach_directions = [
            np.array(
                [midpoint, midpoint + horizontal_direction * normal_scale])
        ]  #these are two points for visualization

        for i in range(nb_directions_to_test - 1):
            directions_to_test.append(
                normalize(np.matmul(rot_mat, directions_to_test[-1])))
            approach_directions.append(
                np.array([
                    midpoint, midpoint + directions_to_test[-1] * normal_scale
                ]))

        ## computing the palm position for each approach direction ##
        palm_positions = []
        for i in range(nb_directions_to_test):
            palm_positions.append(midpoint +
                                  finger_length * directions_to_test[i])

        if visualize:
            ## plotting the whole mesh ##
            vis3d.mesh(mesh, style='wireframe')

            ## computing and plotting midpoint and gripper position ##
            dirs = (grasp_vertices[0] - grasp_vertices[1]
                    ) / np.linalg.norm(grasp_vertices[0] - grasp_vertices[1])
            grasp_endpoints = np.zeros(grasp_vertices.shape)
            grasp_endpoints[0] = midpoint + dirs * MAX_HAND_DISTANCE / 2
            grasp_endpoints[1] = midpoint - dirs * MAX_HAND_DISTANCE / 2

            color = [
                min(1, 2 * (1 - grasp_quality)),
                min(1, 2 * grasp_quality), 0, 1
            ]
            vis3d.plot3d(grasp_endpoints, color=color, tube_radius=.001)
            vis3d.points(midpoint, scale=0.003)

            ## computing and plotting normals at contact points ##
            n0 = np.zeros(grasp_endpoints.shape)
            n1 = np.zeros(grasp_endpoints.shape)
            n0[0] = grasp_vertices[0]
            n0[1] = grasp_vertices[0] + normal_scale * grasp_normals[0]
            n1[0] = grasp_vertices[1]
            n1[1] = grasp_vertices[1] + normal_scale * grasp_normals[1]
            vis3d.plot3d(n0, color=(0, 0, 0), tube_radius=.002)
            vis3d.plot3d(n1, color=(0, 0, 0), tube_radius=.002)

            ## plotting normals the palm positions for each potential approach direction ##
            for i in range(nb_directions_to_test):
                vis3d.points(palm_positions[i], scale=0.003)

            vis3d.show()

        directions_to_test = [
            directions_to_test[3], directions_to_test[2],
            directions_to_test[4], directions_to_test[1],
            directions_to_test[5], directions_to_test[0]
        ]
        palm_positions = [
            palm_positions[3], palm_positions[2], palm_positions[4],
            palm_positions[1], palm_positions[5], palm_positions[0]
        ]

        ## checking if some approach direction is valid ##
        for i in range(nb_directions_to_test):
            if len(
                    trimesh.intersections.mesh_plane(mesh,
                                                     directions_to_test[i],
                                                     palm_positions[i])) == 0:
                # it means the palm won't bump with part
                return directions_to_test[i]

        # it means all approach directions will bump with part
        return -1
Ejemplo n.º 30
0
    vertices = mesh.vertices
    triangles = mesh.triangles
    normals = mesh.normals

    print 'Num vertices:', len(vertices)
    print 'Num triangles:', len(triangles)
    print 'Num normals:', len(normals)

    # 1. Generate candidate pairs of contact points

    # 2. Check for force closure

    # 3. Convert each grasp to a hand pose
    contact1 = vertices[500]
    contact2 = vertices[2000]
    T_obj_gripper = contacts_to_baxter_hand_pose(contact1, contact2)
    print 'Translation', T_obj_gripper.translation
    print 'Rotation', T_obj_gripper.quaternion

    pose_msg = T_obj_gripper.pose_msg

    # 3d visualization to help debug
    vis.figure()
    vis.mesh(mesh)
    vis.points(Point(contact1, frame='test'))
    vis.points(Point(contact2, frame='test'))
    vis.pose(T_obj_gripper, alpha=0.05)
    vis.show()

    # 4. Execute on the actual robot
Ejemplo n.º 31
0
def visualize_vertices(mesh, vertices):
    vis3d.mesh(mesh, style='wireframe')
    vis3d.points(mesh.centroid, color=(0, 0, 0), scale=0.003)
    vis3d.points(vertices, color=(1, 0, 0), scale=0.001)
    vis3d.show()