예제 #1
0
    def _load_data(self, obj_id):
        self.grasp_metrics = json.load(
            open(self.data_dir + obj_id + '.json', 'r'))
        self.candidate_grasps_dict = pkl.load(
            open(self.data_dir + obj_id + '.pkl', 'rb'))

        self.object_mesh = ObjFile(self.data_dir + obj_id + '.obj').read()
        self.stable_poses = StablePoseFile(self.data_dir + obj_id +
                                           '.stp').read()
        self.tensor = self.file_arr['Tensor'][np.where(
            self.file_arr['Obj_id'] == obj_id)][0]
        self.array = self.file_arr['Array'][np.where(
            self.file_arr['Obj_id'] == obj_id)][0]
        depth = self.file_arr['Depth'][np.where(
            self.file_arr['Obj_id'] == obj_id)][0]
        self.config['env_rv_params']['min_radius'] = depth
        self.config['env_rv_params']['max_radius'] = depth
예제 #2
0
def load_mesh(mesh_id, config, rescale_mesh = False):
    # set up filepath from mesh id (this is where the service dumps the mesh
    filepath = os.path.join(consts.MESH_CACHE_DIR, mesh_id) + '.obj'

    # Initialize mesh processor.
    mesh_processor = mp.MeshProcessor(filepath, consts.MESH_CACHE_DIR)

    # Run through MP steps manually to make things easier
    mesh_processor._load_mesh()
    mesh_processor.mesh_.density = config['obj_density']
    # _clean_mesh
    mesh_processor._remove_bad_tris()
    mesh_processor._remove_unreferenced_vertices()
    # # standardize pose, recover transform
    # verts_old = mesh_processor.mesh_.vertices.copy()
    # mesh_processor._standardize_pose()
    # verts_new = mesh_processor.mesh_.vertices
    # # Transform recovery
    # MAT_SIZE = min(verts_old.shape[0], 300)
    # tmat_rec = np.dot(np.linalg.pinv(np.hstack((verts_old[:MAT_SIZE], np.ones((MAT_SIZE, 1)) ))),
    #                                  np.hstack((verts_new[:MAT_SIZE], np.ones((MAT_SIZE, 1)) ))).T
    # rotation = tmat_rec[:3, :3]
    # translation = tmat_rec[:3, 3]
    # transform = RigidTransform(rotation=rotation, translation=translation)
    # scale = 1.0

    if rescale_mesh: # config['rescale_objects'] <- local config, current use case is pass in as arg
        mesh_processor._standardize_pose()
        mesh_processor._rescale_vertices(config['obj_target_scale'], config['obj_scaling_mode'], config['use_uniform_com'])

    mesh_processor.sdf_ = None
    if config['generate_sdf']:
        mesh_processor._generate_sdf(config['path_to_sdfgen'], config['sdf_dim'], config['sdf_padding'])
    mesh_processor._generate_stable_poses(config['stp_min_prob'])

    mesh, sdf, stable_poses = (mesh_processor.mesh, mesh_processor.sdf, mesh_processor.stable_poses,)

    # Make graspable
    graspable = GraspableObject3D(sdf           = sdf,
                                  mesh          = mesh,
                                  key           = mesh_id,
                                  model_name    = mesh_processor.obj_filename,
                                  mass          = config['default_mass'],
                                  convex_pieces = None)
                                  
    # resave mesh to the proc file because the new CoM thing translates the mesh
    ObjFile(os.path.join(consts.MESH_CACHE_DIR, mesh_id) + '_proc.obj').write(graspable.mesh)
    
    return graspable, stable_poses
예제 #3
0
    def __init__(self, path, elev):
        if not os.path.exists(DATA_DIR):
            raise NameError(
                "Path %s is not specified. Is the docker file set up properly?"
                % DATA_DIR)
        table_file = DATA_DIR + '/meshes/table.obj'
        self.table_mesh = ObjFile(table_file).read()
        self.data_dir = DATA_DIR + '/meshes/dexnet/'
        self.output_dir = DATA_DIR + '/reprojections/'
        self.config = YamlConfig(
            './cfg/tools/generate_projected_gqcnn_dataset.yaml')

        self.random_positions = 1
        self.image_size = (300, 300)
        self.elev = 0
        self.show_images = False

        self.cur_obj_label = 0
        self.cur_image_label = 0
        self.cur_pose_label = 0

        if path is not None:
            self.output_dir = DATA_DIR + '/reprojections/' + path

        if elev is not None:
            print("Elevation angle is being set to %d" % elev)
            self.config['env_rv_params']['min_elev'] = elev
            self.config['env_rv_params']['max_elev'] = elev
            self.elev = elev

        tensor_config = self.config['tensors']
        tensor_config['fields']['depth_ims_tf_table']['height'] = 32
        tensor_config['fields']['depth_ims_tf_table']['width'] = 32
        tensor_config['fields']['robust_ferrari_canny'] = {}
        tensor_config['fields']['robust_ferrari_canny']['dtype'] = 'float32'
        tensor_config['fields']['ferrari_canny'] = {}
        tensor_config['fields']['ferrari_canny']['dtype'] = 'float32'
        tensor_config['fields']['force_closure'] = {}
        tensor_config['fields']['force_closure']['dtype'] = 'float32'

        self.tensor_dataset = TensorDataset(self.output_dir, tensor_config)
        self.tensor_datapoint = self.tensor_dataset.datapoint_template
        if not os.path.exists(self.output_dir + '/images'):
            os.makedirs(self.output_dir + '/images')
        if not os.path.exists(self.output_dir + '/meshes'):
            os.makedirs(self.output_dir + '/meshes')
예제 #4
0
        right_gripper = baxter_gripper.Gripper('right')
        right_gripper.calibrate()

        listener = tf.TransformListener()
        from_frame = 'base'
        time.sleep(1)

    # Main Code
    br = tf.TransformBroadcaster()

    # SETUP
    mesh = trimesh.load(MESH_FILENAME)
    vertices = mesh.vertices
    triangles = mesh.triangles
    normals = mesh.vertex_normals
    of = ObjFile(MESH_FILENAME)
    mesh = of.read()
    ar_tag = lookup_tag(TAG)
    # find the transformation from the object coordinates to world coordinates... somehow

    grasp_indices, best_metric_indices = sorted_contacts(
        vertices, normals, T_ar_object)

    for indices in best_metric_indices[0:5]:
        a = grasp_indices[indices][0]
        b = grasp_indices[indices][1]
        normal1, normal2 = normals[a], normals[b]
        contact1, contact2 = vertices[a], vertices[b]
        # visualize the mesh and contacts
        vis.figure()
        vis.mesh(mesh)
예제 #5
0
class Reprojection:
    def __init__(self, path, elev):
        if not os.path.exists(DATA_DIR):
            raise NameError(
                "Path %s is not specified. Is the docker file set up properly?"
                % DATA_DIR)
        table_file = DATA_DIR + '/meshes/table.obj'
        self.table_mesh = ObjFile(table_file).read()
        self.data_dir = DATA_DIR + '/meshes/dexnet/'
        self.output_dir = DATA_DIR + '/reprojections/'
        self.config = YamlConfig(
            './cfg/tools/generate_projected_gqcnn_dataset.yaml')

        self.random_positions = 1
        self.image_size = (300, 300)
        self.elev = 0
        self.show_images = False

        self.cur_obj_label = 0
        self.cur_image_label = 0
        self.cur_pose_label = 0

        if path is not None:
            self.output_dir = DATA_DIR + '/reprojections/' + path

        if elev is not None:
            print("Elevation angle is being set to %d" % elev)
            self.config['env_rv_params']['min_elev'] = elev
            self.config['env_rv_params']['max_elev'] = elev
            self.elev = elev

        tensor_config = self.config['tensors']
        tensor_config['fields']['depth_ims_tf_table']['height'] = 32
        tensor_config['fields']['depth_ims_tf_table']['width'] = 32
        tensor_config['fields']['robust_ferrari_canny'] = {}
        tensor_config['fields']['robust_ferrari_canny']['dtype'] = 'float32'
        tensor_config['fields']['ferrari_canny'] = {}
        tensor_config['fields']['ferrari_canny']['dtype'] = 'float32'
        tensor_config['fields']['force_closure'] = {}
        tensor_config['fields']['force_closure']['dtype'] = 'float32'

        self.tensor_dataset = TensorDataset(self.output_dir, tensor_config)
        self.tensor_datapoint = self.tensor_dataset.datapoint_template
        if not os.path.exists(self.output_dir + '/images'):
            os.makedirs(self.output_dir + '/images')
        if not os.path.exists(self.output_dir + '/meshes'):
            os.makedirs(self.output_dir + '/meshes')

    def _load_file_ids(self):
        dtype = [('Obj_id', (np.str_, 40)), ('Tensor', int), ('Array', int),
                 ('Depth', float)]

        with open(self.data_dir + 'files.csv', 'r') as csv_file:
            data = []
            csv_reader = csv.reader(csv_file, delimiter=',')
            for row in csv_reader:
                data.append(
                    tuple([
                        int(value) if value.isdigit() else value
                        for value in row
                    ]))
            self.file_arr = np.array(data, dtype=dtype)
        files = os.listdir(self.data_dir)
        files = [name.split('.')[0] for name in files]
        files.remove('files')
        self.all_objects = list(set(files))

    def _load_data(self, obj_id):
        self.grasp_metrics = json.load(
            open(self.data_dir + obj_id + '.json', 'r'))
        self.candidate_grasps_dict = pkl.load(
            open(self.data_dir + obj_id + '.pkl', 'rb'))

        self.object_mesh = ObjFile(self.data_dir + obj_id + '.obj').read()
        self.stable_poses = StablePoseFile(self.data_dir + obj_id +
                                           '.stp').read()
        self.tensor = self.file_arr['Tensor'][np.where(
            self.file_arr['Obj_id'] == obj_id)][0]
        self.array = self.file_arr['Array'][np.where(
            self.file_arr['Obj_id'] == obj_id)][0]
        depth = self.file_arr['Depth'][np.where(
            self.file_arr['Obj_id'] == obj_id)][0]
        self.config['env_rv_params']['min_radius'] = depth
        self.config['env_rv_params']['max_radius'] = depth

    def start_rendering(self):
        self._load_file_ids()

        for object_id in self.all_objects:
            self._load_data(object_id)

            for i, stable_pose in enumerate(self.stable_poses):
                try:
                    candidate_grasp_info = self.candidate_grasps_dict[
                        stable_pose.id]
                except KeyError:
                    continue

                if not candidate_grasp_info:
                    Warning("Candidate grasp info of object id %s empty" %
                            object_id)
                    Warning("Continue.")
                    continue
                T_obj_stp = stable_pose.T_obj_table.as_frames('obj', 'stp')
                T_obj_stp = self.object_mesh.get_T_surface_obj(T_obj_stp)

                T_table_obj = RigidTransform(from_frame='table',
                                             to_frame='obj')
                scene_objs = {
                    'table': SceneObject(self.table_mesh, T_table_obj)
                }

                urv = UniformPlanarWorksurfaceImageRandomVariable(
                    self.object_mesh,
                    [RenderMode.DEPTH_SCENE, RenderMode.SEGMASK],
                    'camera',
                    self.config['env_rv_params'],
                    scene_objs=scene_objs,
                    stable_pose=stable_pose)
                render_sample = urv.rvs(size=self.random_positions)
                # for render_sample in render_samples:

                binary_im = render_sample.renders[RenderMode.SEGMASK].image
                depth_im = render_sample.renders[
                    RenderMode.DEPTH_SCENE].image.crop(300, 300)
                orig_im = Image.fromarray(self._scale_image(depth_im.data))
                if self.show_images:
                    orig_im.show()
                orig_im.convert('RGB').save(self.output_dir + '/images/' +
                                            object_id + '_elev_' +
                                            str(self.elev) + '_original.png')
                print("Saved original")

                T_stp_camera = render_sample.camera.object_to_camera_pose
                shifted_camera_intr = render_sample.camera.camera_intr.crop(
                    300, 300, 240, 320)
                depth_points = self._reproject_to_3D(depth_im,
                                                     shifted_camera_intr)

                transformed_points, T_camera = self._transformation(
                    depth_points)

                camera_dir = np.dot(T_camera.rotation,
                                    np.array([0.0, 0.0, -1.0]))

                pcd = o3d.geometry.PointCloud()
                # print(camera_dir)
                pcd.points = o3d.utility.Vector3dVector(transformed_points.T)
                # TODO check normals!!
                #  pcd.estimate_normals(search_param=o3d.geometry.KDTreeSearchParamHybrid(radius=0.1, max_nn=30))
                #  pcd.normals = o3d.utility.Vector3dVector(-np.asarray(pcd.normals))
                normals = np.repeat([camera_dir],
                                    len(transformed_points.T),
                                    axis=0)
                pcd.normals = o3d.utility.Vector3dVector(normals)

                # cs_points = [[0, 0, 0], [1, 0, 0], [0, 1, 0], [0, 0, 1]]
                # cs_lines = [[0, 1], [0, 2], [0, 3]]
                # colors = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]
                # cs = o3d.geometry.LineSet(points=o3d.utility.Vector3dVector(cs_points),
                #                           lines=o3d.utility.Vector2iVector(cs_lines))
                # cs.colors = o3d.utility.Vector3dVector(colors)
                # o3d.visualization.draw_geometries([pcd])

                depth = self._o3d_meshing(pcd)

                # projected_depth_im,new_camera_intr,table_height = self._projection(new_points,shifted_camera_intr)
                new_camera_intr = shifted_camera_intr
                new_camera_intr.cx = 150
                new_camera_intr.cy = 150
                projected_depth_im = np.asarray(depth)
                projected_depth_im[projected_depth_im == 0.0] = -1.0
                table_height = np.median(
                    projected_depth_im[projected_depth_im != -1.0].flatten())
                print("Minimum depth:", min(projected_depth_im.flatten()))
                print("Maximum depth:", max(projected_depth_im.flatten()))

                im = Image.fromarray(self._scale_image(projected_depth_im))

                projected_depth_im = DepthImage(projected_depth_im,
                                                frame='new_camera')

                cx = projected_depth_im.center[1]
                cy = projected_depth_im.center[0]

                # Grasp conversion
                T_obj_old_camera = T_stp_camera * T_obj_stp.as_frames(
                    'obj', T_stp_camera.from_frame)
                T_obj_camera = T_camera.dot(T_obj_old_camera)
                for grasp_info in candidate_grasp_info:
                    grasp = grasp_info.grasp
                    collision_free = grasp_info.collision_free

                    grasp_2d = grasp.project_camera(T_obj_camera,
                                                    new_camera_intr)
                    dx = cx - grasp_2d.center.x
                    dy = cy - grasp_2d.center.y
                    translation = np.array([dy, dx])

                    # Project 3D old_camera_cs contact points into new camera cs

                    contact_points = np.append(grasp_info.contact_point1, 1).T
                    new_cam = np.dot(T_obj_camera.matrix, contact_points)
                    c1 = new_camera_intr.project(
                        Point(new_cam[0:3], frame=new_camera_intr.frame))
                    contact_points = np.append(grasp_info.contact_point2, 1).T
                    new_cam = np.dot(T_obj_camera.matrix, contact_points)
                    c2 = new_camera_intr.project(
                        Point(new_cam[0:3], frame=new_camera_intr.frame))

                    # Check if there are occlusions at contact points
                    if projected_depth_im.data[
                            c1.x, c1.y] == -1.0 or projected_depth_im.data[
                                c2.x, c2.y] == -1.0:
                        print("Contact point at occlusion")
                        contact_occlusion = True
                    else:
                        contact_occlusion = False
                    # Mark contact points in image
                    im = im.convert('RGB')
                    if False:
                        im_draw = ImageDraw.Draw(im)
                        im_draw.line([(c1[0], c1[1] - 10),
                                      (c1[0], c1[1] + 10)],
                                     fill=(255, 0, 0, 255))
                        im_draw.line([(c1[0] - 10, c1[1]),
                                      (c1[0] + 10, c1[1])],
                                     fill=(255, 0, 0, 255))
                        im_draw.line([(c2[0], c2[1] - 10),
                                      (c2[0], c2[1] + 10)],
                                     fill=(255, 0, 0, 255))
                        im_draw.line([(c2[0] - 10, c2[1]),
                                      (c2[0] + 10, c2[1])],
                                     fill=(255, 0, 0, 255))
                    if self.show_images:
                        im.show()
                    im.save(self.output_dir + '/images/' + object_id +
                            '_elev_' + str(self.elev) + '_reprojected.png')

                    # Transform and crop image

                    depth_im_tf = projected_depth_im.transform(
                        translation, grasp_2d.angle)
                    depth_im_tf = depth_im_tf.crop(96, 96)

                    # Apply transformation to contact points
                    trans_map = np.array([[1, 0, dx], [0, 1, dy]])
                    rot_map = cv2.getRotationMatrix2D(
                        (cx, cy), np.rad2deg(grasp_2d.angle), 1)
                    trans_map_aff = np.r_[trans_map, [[0, 0, 1]]]
                    rot_map_aff = np.r_[rot_map, [[0, 0, 1]]]
                    full_map = rot_map_aff.dot(trans_map_aff)
                    # print("Full map",full_map)
                    c1_rotated = (np.dot(full_map, np.r_[c1.vector, [1]]) -
                                  np.array([150 - 48, 150 - 48, 0])) / 3
                    c2_rotated = (np.dot(full_map, np.r_[c2.vector, [1]]) -
                                  np.array([150 - 48, 150 - 48, 0])) / 3

                    grasp_line = depth_im_tf.data[48]
                    occlusions = len(np.where(np.squeeze(grasp_line) == -1)[0])

                    # Set occlusions to table height for resizing image
                    depth_im_tf.data[depth_im_tf.data == -1.0] = table_height

                    depth_image = Image.fromarray(np.asarray(depth_im_tf.data))\
                        .resize((32, 32), resample=Image.BILINEAR)
                    depth_im_tf_table = np.asarray(depth_image).reshape(
                        32, 32, 1)

                    # depth_im_tf_table = depth_im_tf.resize((32, 32,), interp='bilinear')

                    im = Image.fromarray(
                        self._scale_image(depth_im_tf_table.reshape(
                            32, 32))).convert('RGB')
                    draw = ImageDraw.Draw(im)
                    draw.line([(c1_rotated[0], c1_rotated[1] - 3),
                               (c1_rotated[0], c1_rotated[1] + 3)],
                              fill=(255, 0, 0, 255))
                    draw.line([(c1_rotated[0] - 3, c1_rotated[1]),
                               (c1_rotated[0] + 3, c1_rotated[1])],
                              fill=(255, 0, 0, 255))
                    draw.line([(c2_rotated[0], c2_rotated[1] - 3),
                               (c2_rotated[0], c2_rotated[1] + 3)],
                              fill=(255, 0, 0, 255))
                    draw.line([(c2_rotated[0] - 3, c2_rotated[1]),
                               (c2_rotated[0] + 3, c2_rotated[1])],
                              fill=(255, 0, 0, 255))
                    if self.show_images:
                        im.show()
                    im.save(self.output_dir + '/images/' + object_id +
                            '_elev_' + str(self.elev) + '_transformed.png')

                    hand_pose = np.r_[grasp_2d.center.y, grasp_2d.center.x,
                                      grasp_2d.depth, grasp_2d.angle,
                                      grasp_2d.center.y - new_camera_intr.cy,
                                      grasp_2d.center.x - new_camera_intr.cx,
                                      grasp_2d.width_px / 3]

                    self.tensor_datapoint[
                        'depth_ims_tf_table'] = depth_im_tf_table
                    self.tensor_datapoint['hand_poses'] = hand_pose
                    self.tensor_datapoint['obj_labels'] = self.cur_obj_label
                    self.tensor_datapoint['collision_free'] = collision_free
                    self.tensor_datapoint['pose_labels'] = self.cur_pose_label
                    self.tensor_datapoint[
                        'image_labels'] = self.cur_image_label
                    self.tensor_datapoint['files'] = [self.tensor, self.array]
                    self.tensor_datapoint['occlusions'] = occlusions
                    self.tensor_datapoint[
                        'contact_occlusion'] = contact_occlusion

                    for metric_name, metric_val in self.grasp_metrics[str(
                            grasp.id)].iteritems():
                        coll_free_metric = (1 * collision_free) * metric_val
                        self.tensor_datapoint[metric_name] = coll_free_metric
                    self.tensor_dataset.add(self.tensor_datapoint)
                    print("Saved dataset point")
                    self.cur_image_label += 1
                self.cur_pose_label += 1
                gc.collect()
            self.cur_obj_label += 1

        self.tensor_dataset.flush()

    def _o3d_meshing(self, pcd):
        mesh, densities = o3d.geometry.TriangleMesh.create_from_point_cloud_poisson(
            pcd, depth=15)
        densities = np.asarray(densities)

        # print('visualize densities')
        # densities = np.asarray(densities)
        # density_colors = plt.get_cmap('plasma')(
        #     (densities - densities.min()) / (densities.max() - densities.min()))
        # density_colors = density_colors[:, :3]
        # density_mesh = o3d.geometry.TriangleMesh()
        # density_mesh.vertices = mesh.vertices
        # density_mesh.triangles = mesh.triangles
        # density_mesh.triangle_normals = mesh.triangle_normals
        # density_mesh.vertex_colors = o3d.utility.Vector3dVector(density_colors)
        # o3d.visualization.draw_geometries([density_mesh])

        vertices_to_remove = densities < 7.0  # np.quantile(densities, 0.01)
        mesh.remove_vertices_by_mask(vertices_to_remove)
        mesh.compute_vertex_normals()
        mesh.paint_uniform_color([0.6, 0.6, 0.6])
        o3d.io.write_triangle_mesh(
            self.output_dir + '/meshes/' + "%05d_%03d.ply" %
            (self.tensor, self.array), mesh)

        if visualise_mesh:
            o3d.visualization.draw_geometries([mesh])
        vis = o3d.visualization.Visualizer()
        vis.create_window(height=300, width=300, visible=False)
        vis.get_render_option().load_from_json("./data/renderconfig.json")
        vis.add_geometry(mesh)
        vic = vis.get_view_control()
        params = vic.convert_to_pinhole_camera_parameters()
        (fx, fy) = params.intrinsic.get_focal_length()
        (cx, cy) = params.intrinsic.get_principal_point()
        params.intrinsic.set_intrinsics(300, 300, 525, 525, cx, cy)
        params.extrinsic = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0],
                                     [0, 0, 0, 1]])
        vic.convert_from_pinhole_camera_parameters(params)
        vis.poll_events()
        vis.update_renderer()
        depth = vis.capture_depth_float_buffer(do_render=True)
        # vis.destroy_window()
        # del vis
        return depth

    def _projection(self, transformed_points, camera_intr):
        # Use Camera intrinsics
        new_camera_intr = camera_intr
        new_camera_intr.cx = 150
        new_camera_intr.cy = 150
        K = new_camera_intr.proj_matrix

        projected_points = np.dot(K, transformed_points)

        point_depths = projected_points[2, :]
        table = np.median(point_depths)
        point_z = np.tile(point_depths, [3, 1])
        points_proj = np.divide(projected_points, point_z)

        # Rounding
        points_proj = np.round(points_proj)
        points_proj = points_proj[:2, :].astype(np.int16)

        valid_ind = np.where((points_proj[0, :] >= 0)
                             & (points_proj[0, :] < self.image_size[0])
                             & (points_proj[1, :] >= 0)
                             & (points_proj[1, :] < self.image_size[0]))[0]
        # Fill new image with NaN's
        fill_value = -1.0
        depth_data = np.full([self.image_size[0], self.image_size[1]],
                             fill_value)
        for ind in valid_ind:
            prev_depth = depth_data[points_proj[1, ind], points_proj[0, ind]]
            if prev_depth == fill_value or prev_depth >= point_depths[ind]:
                depth_data[points_proj[1, ind],
                           points_proj[0, ind]] = point_depths[ind]

        return depth_data, new_camera_intr, table

    def _transformation(self, points):
        # Points are given in camera frame. Transform to new camera frame!
        camera_new_position = np.array([[0], [0], [0]])

        ang = np.deg2rad(self.elev)
        Rot = np.array([[1, 0, 0], [0, np.cos(ang), -np.sin(ang)],
                        [0, np.sin(ang), np.cos(ang)]])
        dist = self.config['env_rv_params']['min_radius'] * np.sin(ang)
        height = self.config['env_rv_params']['min_radius'] - self.config[
            'env_rv_params']['min_radius'] * np.cos(ang)
        # Rotation around x axis, therefore translation back to object center alongside y axis
        trans = np.array([0, dist, height])
        Rt = np.column_stack((Rot, trans))
        # Apply transformation to homogeneous coordinates of the points
        homogeneous_points = np.append(np.transpose(points),
                                       np.ones((1, len(points))),
                                       axis=0)
        transformed_points = np.dot(Rt, homogeneous_points)
        return transformed_points, RigidTransform(rotation=Rot,
                                                  translation=trans,
                                                  from_frame='camera',
                                                  to_frame='new_camera')

    # def _PCA(self, points, sorting=True):
    #     mean = np.mean(points, axis=0)
    #     data_adjusted = points - mean
    #
    #     matrix = np.cov(data_adjusted.T)
    #     eigenvalues, eigenvectors = np.linalg.eig(matrix)
    #
    #     if sorting:
    #         sort = eigenvalues.argsort()[::-1]
    #         eigenvalues = eigenvalues[sort]
    #         eigenvectors = eigenvectors[:, sort]
    #     return eigenvalues, eigenvectors

    def _reproject_to_3D(self, depth_im, camera_intr):
        # depth points will be given in camera frame!
        depth_points = camera_intr.deproject(depth_im).data.T
        return depth_points

    def _scale_image(self, depth):
        size = depth.shape
        flattend = depth.flatten()
        scaled = np.interp(flattend, (0.5, 0.75), (0, 255))
        integ = scaled.astype(np.uint8)
        integ.resize(size)
        return integ
예제 #6
0
def generate_gqcnn_dataset(dataset_path, database, target_object_keys,
                           env_rv_params, gripper_name, config):
    """
    Generates a GQ-CNN TensorDataset for training models with new grippers, quality metrics, objects, and cameras.

    Parameters
    ----------
    dataset_path : str
        path to save the dataset to
    database : :obj:`Hdf5Database`
        Dex-Net database containing the 3D meshes, grasps, and grasp metrics
    target_object_keys : :obj:`OrderedDict`
        dictionary mapping dataset names to target objects (either 'all' or a list of specific object keys)
    env_rv_params : :obj:`OrderedDict`
        parameters of the camera and object random variables used in sampling (see meshpy.UniformPlanarWorksurfaceImageRandomVariable for more info)
    gripper_name : str
        name of the gripper to use
    config : :obj:`autolab_core.YamlConfig`
        other parameters for dataset generation

    Notes
    -----
    Required parameters of config are specified in Other Parameters

    Other Parameters
    ----------------    
    images_per_stable_pose : int
        number of object and camera poses to sample for each stable pose
    stable_pose_min_p : float
        minimum probability of occurrence for a stable pose to be used in data generation (used to prune bad stable poses
    
    gqcnn/crop_width : int
        width, in pixels, of crop region around each grasp center, before resize (changes the size of the region seen by the GQ-CNN)
    gqcnn/crop_height : int
        height, in pixels,  of crop region around each grasp center, before resize (changes the size of the region seen by the GQ-CNN)
    gqcnn/final_width : int
        width, in pixels,  of final transformed grasp image for input to the GQ-CNN (defaults to 32)
    gqcnn/final_height : int
        height, in pixels,  of final transformed grasp image for input to the GQ-CNN (defaults to 32)

    table_alignment/max_approach_table_angle : float
        max angle between the grasp axis and the table normal when the grasp approach is maximally aligned with the table normal
    table_alignment/max_approach_offset : float
        max deviation from perpendicular approach direction to use in grasp collision checking
    table_alignment/num_approach_offset_samples : int
        number of approach samples to use in collision checking

    collision_checking/table_offset : float
        max allowable interpenetration between the gripper and table to be considered collision free
    collision_checking/table_mesh_filename : str
        path to a table mesh for collision checking (default data/meshes/table.obj)
    collision_checking/approach_dist : float
        distance, in meters, between the approach pose and final grasp pose along the grasp axis
    collision_checking/delta_approach : float
        amount, in meters, to discretize the straight-line path from the gripper approach pose to the final grasp pose

    tensors/datapoints_per_file : int
        number of datapoints to store in each unique tensor file on disk
    tensors/fields : :obj:`dict`
        dictionary mapping field names to dictionaries specifying the data type, height, width, and number of channels for each tensor

    debug : bool
        True (or 1) if the random seed should be set to enforce deterministic behavior, False (0) otherwise
    vis/candidate_grasps : bool
        True (or 1) if the collision free candidate grasps should be displayed in 3D (for debugging)
    vis/rendered_images : bool
        True (or 1) if the rendered images for each stable pose should be displayed (for debugging)
    vis/grasp_images : bool
        True (or 1) if the transformed grasp images should be displayed (for debugging)
    """
    # read data gen params
    output_dir = dataset_path
    gripper = RobotGripper.load(gripper_name)
    image_samples_per_stable_pose = config['images_per_stable_pose']
    stable_pose_min_p = config['stable_pose_min_p']

    # read gqcnn params
    gqcnn_params = config['gqcnn']
    im_crop_height = gqcnn_params['crop_height']
    im_crop_width = gqcnn_params['crop_width']
    im_final_height = gqcnn_params['final_height']
    im_final_width = gqcnn_params['final_width']
    cx_crop = float(im_crop_width) / 2
    cy_crop = float(im_crop_height) / 2

    # open database
    dataset_names = target_object_keys.keys()
    datasets = [database.dataset(dn) for dn in dataset_names]

    # set target objects
    for dataset in datasets:
        if target_object_keys[dataset.name] == 'all':
            target_object_keys[dataset.name] = dataset.object_keys

    # setup grasp params
    table_alignment_params = config['table_alignment']
    min_grasp_approach_offset = -np.deg2rad(
        table_alignment_params['max_approach_offset'])
    max_grasp_approach_offset = np.deg2rad(
        table_alignment_params['max_approach_offset'])
    max_grasp_approach_table_angle = np.deg2rad(
        table_alignment_params['max_approach_table_angle'])
    num_grasp_approach_samples = table_alignment_params[
        'num_approach_offset_samples']

    phi_offsets = []
    if max_grasp_approach_offset == min_grasp_approach_offset:
        phi_inc = 1
    elif num_grasp_approach_samples == 1:
        phi_inc = max_grasp_approach_offset - min_grasp_approach_offset + 1
    else:
        phi_inc = (max_grasp_approach_offset - min_grasp_approach_offset) / (
            num_grasp_approach_samples - 1)

    phi = min_grasp_approach_offset
    while phi <= max_grasp_approach_offset:
        phi_offsets.append(phi)
        phi += phi_inc

    # setup collision checking
    coll_check_params = config['collision_checking']
    approach_dist = coll_check_params['approach_dist']
    delta_approach = coll_check_params['delta_approach']
    table_offset = coll_check_params['table_offset']

    table_mesh_filename = coll_check_params['table_mesh_filename']
    if not os.path.isabs(table_mesh_filename):
        table_mesh_filename = os.path.join(
            os.path.dirname(os.path.realpath(__file__)), '..',
            table_mesh_filename)
    table_mesh = ObjFile(table_mesh_filename).read()

    # set tensor dataset config
    tensor_config = config['tensors']
    tensor_config['fields']['depth_ims_tf_table']['height'] = im_final_height
    tensor_config['fields']['depth_ims_tf_table']['width'] = im_final_width
    tensor_config['fields']['obj_masks']['height'] = im_final_height
    tensor_config['fields']['obj_masks']['width'] = im_final_width

    # add available metrics (assuming same are computed for all objects)
    metric_names = []
    dataset = datasets[0]
    obj_keys = dataset.object_keys
    if len(obj_keys) == 0:
        raise ValueError('No valid objects in dataset %s' % (dataset.name))

    obj = dataset[obj_keys[0]]
    grasps = dataset.grasps(obj.key, gripper=gripper.name)
    grasp_metrics = dataset.grasp_metrics(obj.key,
                                          grasps,
                                          gripper=gripper.name)
    metric_names = grasp_metrics[grasp_metrics.keys()[0]].keys()
    for metric_name in metric_names:
        tensor_config['fields'][metric_name] = {}
        tensor_config['fields'][metric_name]['dtype'] = 'float32'

    # init tensor dataset
    tensor_dataset = TensorDataset(output_dir, tensor_config)
    tensor_datapoint = tensor_dataset.datapoint_template

    # setup log file
    experiment_log_filename = os.path.join(output_dir,
                                           'dataset_generation.log')
    formatter = logging.Formatter('%(asctime)s %(levelname)s: %(message)s')
    hdlr = logging.FileHandler(experiment_log_filename)
    hdlr.setFormatter(formatter)
    logging.getLogger().addHandler(hdlr)
    root_logger = logging.getLogger()

    # copy config
    out_config_filename = os.path.join(output_dir, 'dataset_generation.json')
    ordered_dict_config = collections.OrderedDict()
    for key in config.keys():
        ordered_dict_config[key] = config[key]
    with open(out_config_filename, 'w') as outfile:
        json.dump(ordered_dict_config, outfile)

    # 1. Precompute the set of valid grasps for each stable pose:
    #    i) Perpendicular to the table
    #   ii) Collision-free along the approach direction

    # load grasps if they already exist
    grasp_cache_filename = os.path.join(output_dir, CACHE_FILENAME)
    if os.path.exists(grasp_cache_filename):
        logging.info('Loading grasp candidates from file')
        candidate_grasps_dict = pkl.load(open(grasp_cache_filename, 'rb'))
    # otherwise re-compute by reading from the database and enforcing constraints
    else:
        # create grasps dict
        candidate_grasps_dict = {}

        # loop through datasets and objects
        for dataset in datasets:
            logging.info('Reading dataset %s' % (dataset.name))
            for obj in dataset:
                if obj.key not in target_object_keys[dataset.name]:
                    continue

                # init candidate grasp storage
                candidate_grasps_dict[obj.key] = {}

                # setup collision checker
                collision_checker = GraspCollisionChecker(gripper)
                collision_checker.set_graspable_object(obj)

                # read in the stable poses of the mesh
                stable_poses = dataset.stable_poses(obj.key)
                for i, stable_pose in enumerate(stable_poses):
                    # render images if stable pose is valid
                    if stable_pose.p > stable_pose_min_p:
                        candidate_grasps_dict[obj.key][stable_pose.id] = []

                        # setup table in collision checker
                        T_obj_stp = stable_pose.T_obj_table.as_frames(
                            'obj', 'stp')
                        T_obj_table = obj.mesh.get_T_surface_obj(
                            T_obj_stp,
                            delta=table_offset).as_frames('obj', 'table')
                        T_table_obj = T_obj_table.inverse()
                        collision_checker.set_table(table_mesh_filename,
                                                    T_table_obj)

                        # read grasp and metrics
                        grasps = dataset.grasps(obj.key, gripper=gripper.name)
                        logging.info(
                            'Aligning %d grasps for object %s in stable %s' %
                            (len(grasps), obj.key, stable_pose.id))

                        # align grasps with the table
                        aligned_grasps = [
                            grasp.perpendicular_table(stable_pose)
                            for grasp in grasps
                        ]

                        # check grasp validity
                        logging.info(
                            'Checking collisions for %d grasps for object %s in stable %s'
                            % (len(grasps), obj.key, stable_pose.id))
                        for aligned_grasp in aligned_grasps:
                            # check angle with table plane and skip unaligned grasps
                            _, grasp_approach_table_angle, _ = aligned_grasp.grasp_angles_from_stp_z(
                                stable_pose)
                            perpendicular_table = (
                                np.abs(grasp_approach_table_angle) <
                                max_grasp_approach_table_angle)
                            if not perpendicular_table:
                                continue

                            # check whether any valid approach directions are collision free
                            collision_free = False
                            for phi_offset in phi_offsets:
                                rotated_grasp = aligned_grasp.grasp_y_axis_offset(
                                    phi_offset)
                                collides = collision_checker.collides_along_approach(
                                    rotated_grasp, approach_dist,
                                    delta_approach)
                                if not collides:
                                    collision_free = True
                                    break

                            # store if aligned to table
                            candidate_grasps_dict[obj.key][
                                stable_pose.id].append(
                                    GraspInfo(aligned_grasp, collision_free))

                            # visualize if specified
                            if collision_free and config['vis'][
                                    'candidate_grasps']:
                                logging.info('Grasp %d' % (aligned_grasp.id))
                                vis.figure()
                                vis.gripper_on_object(gripper, aligned_grasp,
                                                      obj,
                                                      stable_pose.T_obj_world)
                                vis.show()

        # save to file
        logging.info('Saving to file')
        pkl.dump(candidate_grasps_dict, open(grasp_cache_filename, 'wb'))

    # 2. Render a dataset of images and associate the gripper pose with image coordinates for each grasp in the Dex-Net database

    # setup variables
    obj_category_map = {}
    pose_category_map = {}

    cur_pose_label = 0
    cur_obj_label = 0
    cur_image_label = 0

    # render images for each stable pose of each object in the dataset
    render_modes = [RenderMode.SEGMASK, RenderMode.DEPTH_SCENE]
    for dataset in datasets:
        logging.info('Generating data for dataset %s' % (dataset.name))

        # iterate through all object keys
        object_keys = dataset.object_keys
        for obj_key in object_keys:
            obj = dataset[obj_key]
            if obj.key not in target_object_keys[dataset.name]:
                continue

            # read in the stable poses of the mesh
            stable_poses = dataset.stable_poses(obj.key)
            for i, stable_pose in enumerate(stable_poses):

                # render images if stable pose is valid
                if stable_pose.p > stable_pose_min_p:
                    # log progress
                    logging.info('Rendering images for object %s in %s' %
                                 (obj.key, stable_pose.id))

                    # add to category maps
                    if obj.key not in obj_category_map.keys():
                        obj_category_map[obj.key] = cur_obj_label
                    pose_category_map['%s_%s' %
                                      (obj.key,
                                       stable_pose.id)] = cur_pose_label

                    # read in candidate grasps and metrics
                    candidate_grasp_info = candidate_grasps_dict[obj.key][
                        stable_pose.id]
                    candidate_grasps = [g.grasp for g in candidate_grasp_info]
                    grasp_metrics = dataset.grasp_metrics(obj.key,
                                                          candidate_grasps,
                                                          gripper=gripper.name)

                    # compute object pose relative to the table
                    T_obj_stp = stable_pose.T_obj_table.as_frames('obj', 'stp')
                    T_obj_stp = obj.mesh.get_T_surface_obj(T_obj_stp)

                    # sample images from random variable
                    T_table_obj = RigidTransform(from_frame='table',
                                                 to_frame='obj')
                    scene_objs = {
                        'table': SceneObject(table_mesh, T_table_obj)
                    }
                    urv = UniformPlanarWorksurfaceImageRandomVariable(
                        obj.mesh,
                        render_modes,
                        'camera',
                        env_rv_params,
                        stable_pose=stable_pose,
                        scene_objs=scene_objs)

                    render_start = time.time()
                    render_samples = urv.rvs(
                        size=image_samples_per_stable_pose)
                    render_stop = time.time()
                    logging.info('Rendering images took %.3f sec' %
                                 (render_stop - render_start))

                    # visualize
                    if config['vis']['rendered_images']:
                        d = int(np.ceil(
                            np.sqrt(image_samples_per_stable_pose)))

                        # binary
                        vis2d.figure()
                        for j, render_sample in enumerate(render_samples):
                            vis2d.subplot(d, d, j + 1)
                            vis2d.imshow(render_sample.renders[
                                RenderMode.SEGMASK].image)

                        # depth table
                        vis2d.figure()
                        for j, render_sample in enumerate(render_samples):
                            vis2d.subplot(d, d, j + 1)
                            vis2d.imshow(render_sample.renders[
                                RenderMode.DEPTH_SCENE].image)
                        vis2d.show()

                    # tally total amount of data
                    num_grasps = len(candidate_grasps)
                    num_images = image_samples_per_stable_pose
                    num_save = num_images * num_grasps
                    logging.info('Saving %d datapoints' % (num_save))

                    # for each candidate grasp on the object compute the projection
                    # of the grasp into image space
                    for render_sample in render_samples:
                        # read images
                        binary_im = render_sample.renders[
                            RenderMode.SEGMASK].image
                        depth_im_table = render_sample.renders[
                            RenderMode.DEPTH_SCENE].image
                        # read camera params
                        T_stp_camera = render_sample.camera.object_to_camera_pose
                        shifted_camera_intr = render_sample.camera.camera_intr

                        # read pixel offsets
                        cx = depth_im_table.center[1]
                        cy = depth_im_table.center[0]

                        # compute intrinsics for virtual camera of the final
                        # cropped and rescaled images
                        camera_intr_scale = float(im_final_height) / float(
                            im_crop_height)
                        cropped_camera_intr = shifted_camera_intr.crop(
                            im_crop_height, im_crop_width, cy, cx)
                        final_camera_intr = cropped_camera_intr.resize(
                            camera_intr_scale)

                        # create a thumbnail for each grasp
                        for grasp_info in candidate_grasp_info:
                            # read info
                            grasp = grasp_info.grasp
                            collision_free = grasp_info.collision_free

                            # get the gripper pose
                            T_obj_camera = T_stp_camera * T_obj_stp.as_frames(
                                'obj', T_stp_camera.from_frame)
                            grasp_2d = grasp.project_camera(
                                T_obj_camera, shifted_camera_intr)

                            # center images on the grasp, rotate to image x axis
                            dx = cx - grasp_2d.center.x
                            dy = cy - grasp_2d.center.y
                            translation = np.array([dy, dx])

                            binary_im_tf = binary_im.transform(
                                translation, grasp_2d.angle)
                            depth_im_tf_table = depth_im_table.transform(
                                translation, grasp_2d.angle)

                            # crop to image size
                            binary_im_tf = binary_im_tf.crop(
                                im_crop_height, im_crop_width)
                            depth_im_tf_table = depth_im_tf_table.crop(
                                im_crop_height, im_crop_width)

                            # resize to image size
                            binary_im_tf = binary_im_tf.resize(
                                (im_final_height, im_final_width),
                                interp='nearest')
                            depth_im_tf_table = depth_im_tf_table.resize(
                                (im_final_height, im_final_width))

                            # visualize the transformed images
                            if config['vis']['grasp_images']:
                                grasp_center = Point(
                                    depth_im_tf_table.center,
                                    frame=final_camera_intr.frame)
                                tf_grasp_2d = Grasp2D(
                                    grasp_center,
                                    0,
                                    grasp_2d.depth,
                                    width=gripper.max_width,
                                    camera_intr=final_camera_intr)

                                # plot 2D grasp image
                                vis2d.figure()
                                vis2d.subplot(2, 2, 1)
                                vis2d.imshow(binary_im)
                                vis2d.grasp(grasp_2d)
                                vis2d.subplot(2, 2, 2)
                                vis2d.imshow(depth_im_table)
                                vis2d.grasp(grasp_2d)
                                vis2d.subplot(2, 2, 3)
                                vis2d.imshow(binary_im_tf)
                                vis2d.grasp(tf_grasp_2d)
                                vis2d.subplot(2, 2, 4)
                                vis2d.imshow(depth_im_tf_table)
                                vis2d.grasp(tf_grasp_2d)
                                vis2d.title('Coll Free? %d' %
                                            (grasp_info.collision_free))
                                vis2d.show()

                                # plot 3D visualization
                                vis.figure()
                                T_obj_world = vis.mesh_stable_pose(
                                    obj.mesh,
                                    stable_pose.T_obj_world,
                                    style='surface',
                                    dim=0.5)
                                vis.gripper(gripper,
                                            grasp,
                                            T_obj_world,
                                            color=(0.3, 0.3, 0.3))
                                vis.show()

                            # form hand pose array
                            hand_pose = np.r_[grasp_2d.center.y,
                                              grasp_2d.center.x,
                                              grasp_2d.depth, grasp_2d.angle,
                                              grasp_2d.center.y -
                                              shifted_camera_intr.cy,
                                              grasp_2d.center.x -
                                              shifted_camera_intr.cx,
                                              grasp_2d.width_px]

                            # store to data buffers
                            tensor_datapoint[
                                'depth_ims_tf_table'] = depth_im_tf_table.raw_data
                            tensor_datapoint[
                                'obj_masks'] = binary_im_tf.raw_data
                            tensor_datapoint['hand_poses'] = hand_pose
                            tensor_datapoint['collision_free'] = collision_free
                            tensor_datapoint['obj_labels'] = cur_obj_label
                            tensor_datapoint['pose_labels'] = cur_pose_label
                            tensor_datapoint['image_labels'] = cur_image_label

                            for metric_name, metric_val in grasp_metrics[
                                    grasp.id].iteritems():
                                coll_free_metric = (
                                    1 * collision_free) * metric_val
                                tensor_datapoint[
                                    metric_name] = coll_free_metric
                            tensor_dataset.add(tensor_datapoint)

                        # update image label
                        cur_image_label += 1

                    # update pose label
                    cur_pose_label += 1

                    # force clean up
                    gc.collect()

            # update object label
            cur_obj_label += 1

            # force clean up
            gc.collect()

    # save last file
    tensor_dataset.flush()

    # save category mappings
    obj_cat_filename = os.path.join(output_dir, 'object_category_map.json')
    json.dump(obj_category_map, open(obj_cat_filename, 'w'))
    pose_cat_filename = os.path.join(output_dir, 'pose_category_map.json')
    json.dump(pose_category_map, open(pose_cat_filename, 'w'))
예제 #7
0
def save_dexnet_objects(output_path, database, target_object_keys, config,
                        pointers, num):
    slice_dataset = False
    files = []

    if not os.path.exists(output_path):
        os.mkdir(output_path)
    for each_file in os.listdir(output_path):
        os.remove(output_path + '/' + each_file)
    gripper = RobotGripper.load(config['gripper'])

    # Setup grasp params:
    table_alignment_params = config['table_alignment']
    min_grasp_approach_offset = -np.deg2rad(
        table_alignment_params['max_approach_offset'])
    max_grasp_approach_offset = np.deg2rad(
        table_alignment_params['max_approach_offset'])
    max_grasp_approach_table_angle = np.deg2rad(
        table_alignment_params['max_approach_table_angle'])
    num_grasp_approach_samples = table_alignment_params[
        'num_approach_offset_samples']

    phi_offsets = []
    if max_grasp_approach_offset == min_grasp_approach_offset:
        phi_inc = 1
    elif num_grasp_approach_samples == 1:
        phi_inc = max_grasp_approach_offset - min_grasp_approach_offset + 1
    else:
        phi_inc = (max_grasp_approach_offset - min_grasp_approach_offset) / (
            num_grasp_approach_samples - 1)

    phi = min_grasp_approach_offset
    while phi <= max_grasp_approach_offset:
        phi_offsets.append(phi)
        phi += phi_inc

    # Setup collision checking
    coll_check_params = config['collision_checking']
    approach_dist = coll_check_params['approach_dist']
    delta_approach = coll_check_params['delta_approach']
    table_offset = coll_check_params['table_offset']
    stable_pose_min_p = config['stable_pose_min_p']

    table_mesh_filename = coll_check_params['table_mesh_filename']
    if not os.path.isabs(table_mesh_filename):
        table_mesh_filename = os.path.join(DATA_DIR, table_mesh_filename)
    #     #table_mesh_filename = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', table_mesh_filename)
    # table_mesh = ObjFile(table_mesh_filename).read()

    dataset_names = target_object_keys.keys()
    datasets = [database.dataset(dn) for dn in dataset_names]

    if slice_dataset:
        datasets = [dataset.subset(0, 100) for dataset in datasets]

    start = 0
    for dataset in datasets:
        target_object_keys[dataset.name] = []
        end = start + len(dataset.object_keys)
        for cnt, _id in enumerate(pointers.obj_ids):
            if _id >= end or _id < start:
                continue
            target_object_keys[dataset.name].append(dataset.object_keys[_id -
                                                                        start])
            files.append(
                tuple([
                    dataset.object_keys[_id - start], pointers.tensor[cnt],
                    pointers.array[cnt], pointers.depths[cnt]
                ]))
        start += end
    print(target_object_keys)
    print("target object keys:", len(target_object_keys['3dnet']),
          len(target_object_keys['kit']))
    files = np.array(files,
                     dtype=[('Object_id', (np.str_, 40)), ('Tensor', int),
                            ('Array', int), ('Depth', float)])

    # Precompute set of valid grasps
    candidate_grasps_dict = {}

    counter = 0
    for dataset in datasets:
        for obj in dataset:
            if obj.key not in target_object_keys[dataset.name]:
                continue
            print("Object in subset")
            # Initiate candidate grasp storage
            candidate_grasps_dict[obj.key] = {}

            # Setup collision checker
            collision_checker = GraspCollisionChecker(gripper)
            collision_checker.set_graspable_object(obj)

            # Read in the stable poses of the mesh
            stable_poses = dataset.stable_poses(obj.key)
            try:
                stable_pose = stable_poses[pointers.pose_num[counter]]
            except IndexError:
                print(
                    "Problems with reading pose. Tensor %d, Array %d, Pose %d"
                    % (pointers.tensor[counter], pointers.array[counter],
                       pointers.pose_num[counter]))
                print("Stable poses:", stable_poses)
                print("Pointers pose:", pointers.pose_num[counter])
                counter += 1
                print("Continue.")
                continue
            print("Read in stable pose")
            candidate_grasps_dict[obj.key][stable_pose.id] = []

            # Setup table in collision checker
            T_obj_stp = stable_pose.T_obj_table.as_frames('obj', 'stp')
            T_obj_table = obj.mesh.get_T_surface_obj(
                T_obj_stp, delta=table_offset).as_frames('obj', 'table')
            T_table_obj = T_obj_table.inverse()

            collision_checker.set_table(table_mesh_filename, T_table_obj)

            # read grasp and metrics
            grasps = dataset.grasps(obj.key, gripper=gripper.name)

            # align grasps with the table
            aligned_grasps = [
                grasp.perpendicular_table(stable_pose) for grasp in grasps
            ]
            i = 0
            found = False
            if len(aligned_grasps) < pointers.grasp_num[counter]:
                raise IndexError
            print("pointers grasp num", pointers.grasp_num[counter])
            print("tensor", pointers.tensor[counter])
            print("array", pointers.array[counter])
            # Check grasp validity
            for aligned_grasp in aligned_grasps:
                # Check angle with table plane and skip unaligned grasps
                _, grasp_approach_table_angle, _ = aligned_grasp.grasp_angles_from_stp_z(
                    stable_pose)
                perpendicular_table = (np.abs(grasp_approach_table_angle) <
                                       max_grasp_approach_table_angle)
                if not perpendicular_table:
                    continue

                # Check whether any valid approach directions are collision free
                collision_free = False
                for phi_offset in phi_offsets:
                    rotated_grasp = aligned_grasp.grasp_y_axis_offset(
                        phi_offset)
                    collides = collision_checker.collides_along_approach(
                        rotated_grasp, approach_dist, delta_approach)
                    if not collides:
                        collision_free = True
                        break

                # Store if aligned to table
                if i == pointers.grasp_num[counter]:
                    found, contact_points = aligned_grasp.close_fingers(obj)
                    print("Contact points", contact_points)
                    if not found:
                        print("Could not find contact points. continue.")
                        break
                    else:
                        print("Original metric: ", pointers.metrics[counter])
                        print(
                            "Metrics mapped point: ",
                            dataset.grasp_metrics(obj.key, [aligned_grasp],
                                                  gripper=gripper.name))
                        candidate_grasps_dict[obj.key][stable_pose.id].append(
                            GraspInfo(aligned_grasp, collision_free, [
                                contact_points[0].point,
                                contact_points[1].point
                            ]))
                        # logging.info('Grasp %d' % (aligned_grasp.id))
                        # vis.figure()
                        # vis.gripper_on_object(gripper, aligned_grasp, obj, stable_pose.T_obj_world, plot_table=False)
                        # vis.show()
                        break

                i += 1

            if found:
                # Save files
                print("Saving file: ", obj.key)
                savefile = ObjFile(DATA_DIR + "/meshes/dexnet/" + obj.key +
                                   ".obj")
                savefile.write(obj.mesh)
                # print("Vertices:", obj.mesh.vertices.shape)
                # print("Triangles:", obj.mesh.triangles.shape)
                mesh = o3d.geometry.TriangleMesh(
                    o3d.utility.Vector3dVector(obj.mesh.vertices),
                    o3d.utility.Vector3iVector(obj.mesh.triangles))
                o3d.io.write_triangle_mesh(
                    DATA_DIR + '/PerfectPredictions/3D_meshes/' +
                    "%05d_%03d.ply" %
                    (pointers.tensor[counter], pointers.array[counter]), mesh)
                # Save stable poses
                save_stp = StablePoseFile(DATA_DIR + "/meshes/dexnet/" +
                                          obj.key + ".stp")
                save_stp.write(stable_poses)
                # Save candidate grasp info
                pkl.dump(
                    candidate_grasps_dict[obj.key],
                    open(DATA_DIR + "/meshes/dexnet/" + obj.key + ".pkl",
                         'wb'))
                # Save grasp metrics
                candidate_grasp_info = candidate_grasps_dict[obj.key][
                    stable_pose.id]
                candidate_grasps = [g.grasp for g in candidate_grasp_info]
                grasp_metrics = dataset.grasp_metrics(obj.key,
                                                      candidate_grasps,
                                                      gripper=gripper.name)
                write_metrics = json.dumps(grasp_metrics)
                f = open(DATA_DIR + "/meshes/dexnet/" + obj.key + ".json", "w")
                f.write(write_metrics)
                f.close()
            counter += 1
            if num is not None and counter >= num:
                break
    with open(DATA_DIR + '/meshes/dexnet/files.csv', 'w') as csv_file:
        csv_writer = csv.writer(csv_file, delimiter=',')
        for point in files:
            csv_writer.writerow(point)
예제 #8
0
from meshpy import VirtualCamera, ObjFile, SceneObject
from perception import CameraIntrinsics
from visualization import Visualizer2D
from core import RigidTransform
import numpy as np
from matplotlib import pyplot as plt
from perception.object_render import RenderMode

dexnet_path = "/home/chris/optimal_manipulation_simulator/dex-net-new-api"

if __name__ == "__main__":
    print "\n\n\n\n"
    camera_intr = CameraIntrinsics.load("../config/primesense_overhead.intr")
    camera = VirtualCamera(camera_intr)

    of = ObjFile(dexnet_path + "/data/meshes/chess_pieces/WizRook.obj")
    rook1 = of.read()

    T_world_camera = RigidTransform(rotation=np.array([[-1, 0, 0], [0, 1, 0],
                                                       [0, 0, -1]]),
                                    translation=np.array([0, 0, .2]).T,
                                    from_frame="world",
                                    to_frame="primesense_overhead")
    # T_world_camera = RigidTransform(rotation = np.array([[1,0,0],[0,1,0],[0,0,1]]),
    #                                 translation=np.array([-.2,0,0]).T,
    #                                 from_frame="world",
    #                                 to_frame="primesense_overhead")

    # squares = [(-1,-1), (-1,0), (-1,1), (0,-1), (0,1), (1,-1), (1,0), (1,1)]
    # for i, square in enumerate(squares):
예제 #9
0
    output_path = os.path.join(config['calib_dir'], T_world_obj.from_frame)
    if not os.path.exists(output_path):
        os.makedirs(output_path)

    output_filename = os.path.join(
        output_path, '{0}_to_world.tf'.format(T_world_obj.from_frame))
    print T_world_obj
    T_world_obj.save(output_filename)

    if config['vis'] and VIS_SUPPORTED:

        _, depth_im, _ = sensor.frames()
        pc_cam = ir_intrinsics.deproject(depth_im)
        pc_world = T_world_cam * pc_cam

        mesh_file = ObjFile(
            os.path.join(object_path, '{0}.obj'.format(args.object_name)))
        mesh = mesh_file.read()

        vis.figure(bgcolor=(0.7, 0.7, 0.7))
        vis.mesh(mesh, T_world_obj.as_frames('obj', 'world'), style='surface')
        vis.pose(T_world_obj, alpha=0.04, tube_radius=0.002, center_scale=0.01)
        vis.pose(RigidTransform(from_frame='origin'),
                 alpha=0.04,
                 tube_radius=0.002,
                 center_scale=0.01)
        vis.pose(T_world_cam, alpha=0.04, tube_radius=0.002, center_scale=0.01)
        vis.pose(T_world_cam * T_cb_cam.inverse(),
                 alpha=0.04,
                 tube_radius=0.002,
                 center_scale=0.01)
        vis.points(pc_world, subsample=20)
import os
import random
import sys

from autolab_core import Point, RigidTransform
from meshpy import ObjFile, Mesh3D
from visualization import Visualizer3D as vis

if __name__ == '__main__':
    mesh_name = sys.argv[1]

    #np.random.seed(111)
    #random.seed(111)

    # read mesh
    mesh = ObjFile(mesh_name).read()

    mesh.vertices_ = np.load('../dex-net/data/meshes/lego_vertices.npy')
    mesh.center_of_mass = np.load('../dex-net/data/meshes/lego_com.npy')

    #T_obj_table = RigidTransform(rotation=[0.92275663,  0.13768089,  0.35600924, -0.05311874],
    #                             from_frame='obj', to_frame='table')
    T_obj_table = RigidTransform(
        rotation=[-0.1335021, 0.87671711, 0.41438141, 0.20452958],
        from_frame='obj',
        to_frame='table')

    stable_pose = mesh.resting_pose(T_obj_table)
    #print stable_pose.r

    table_dim = 0.3
예제 #11
0
    y_axis = c2 - c1
    y_axis = y_axis / np.linalg.norm(y_axis)
    z_axis = np.array([y_axis[1], -y_axis[0], 0]) # the z axis will always be in the table plane for now
    z_axis = z_axis / np.linalg.norm(z_axis)
    x_axis = np.cross(y_axis, z_axis)

    # convert to hand pose
    R_obj_gripper = np.array([x_axis, y_axis, z_axis]).T
    t_obj_gripper = center
    return RigidTransform(rotation=R_obj_gripper, 
                          translation=t_obj_gripper,
                          from_frame='gripper',
                          to_frame='obj')

if __name__ == '__main__':
    of = ObjFile(SPRAY_BOTTLE_MESH_FILENAME)
    mesh = of.read()

    vertices = mesh.vertices
    triangles = mesh.triangles
    normals = mesh.normals

    print 'Num vertices:', len(vertices)
    print 'Num triangles:', len(triangles)
    print 'Num normals:', len(normals)

    # 1. Generate candidate pairs of contact points

    # 2. Check for force closure

    # 3. Convert each grasp to a hand pose
예제 #12
0
    logging.getLogger().setLevel(logging.INFO)

    # read args
    parser = argparse.ArgumentParser(description='Convert a mesh to a URDF')
    parser.add_argument('mesh_filename', type=str, help='OBJ filename of the mesh to convert')
    parser.add_argument('output_dir', type=str, help='directory to store output urdf in')
    parser.add_argument('--config', type=str, default='cfg/tools/convex_decomposition.yaml',
                        help='config file for urdf conversion')
    args = parser.parse_args()

    # open config
    config_filename = args.config
    config = YamlConfig(config_filename)

    # check valid mesh filename
    mesh_filename = args.mesh_filename
    mesh_root, mesh_ext = os.path.splitext(mesh_filename)
    if mesh_ext.lower() != OBJ_EXT:
        logging.error('Extension %s not supported' %(mesh_ext))
        exit(0)

    # open mesh
    of = ObjFile(mesh_filename)
    mesh = of.read()
    mesh.density = config['object_density']

    # create output dir for urdf
    output_dir = args.output_dir
    writer = UrdfWriter(output_dir)
    writer.write(mesh)
예제 #13
0
 def _set_table_mesh(self):
     return ObjFile(self._table_mesh_filename).read()
예제 #14
0
config = YamlConfig(config_filename)
database = Hdf5Database(config['database_name'], access_level=READ_ONLY_ACCESS)

target_object_keys = config['target_objects']

# Setup collision checking
table_offset = config['collision_checking']['table_offset']

table_mesh_filename = config['collision_checking']['table_mesh_filename']
if not os.path.isabs(table_mesh_filename):
    table_mesh_filename = os.path.join(DATA_DIR, table_mesh_filename)

dataset_names = target_object_keys.keys()
datasets = [database.dataset(dn) for dn in dataset_names]

table_mesh = ObjFile(table_mesh_filename).read()

# iterate through all object keys
dataset = datasets[0]
obj_key = dataset.object_keys[0]
obj = dataset[obj_key]

# read in the stable poses of the mesh
stable_poses = dataset.stable_poses(obj.key)

stable_pose = stable_poses[0]
# setup table in collision checker
T_obj_stp = stable_pose.T_obj_table.as_frames('obj', 'stp')
T_obj_table = obj.mesh.get_T_surface_obj(T_obj_stp,
                                         delta=table_offset).as_frames(
                                             'obj', 'table')
예제 #15
0
    args = parser.parse_args()
    image_filename = args.input_image
    extrusion = args.extrusion
    scale_factor = args.scale_factor
    output_filename = args.output_filename

    # read the image
    binary_im = BinaryImage.open(image_filename)
    sdf = binary_im.to_sdf()
    #plt.figure()
    #plt.imshow(sdf)
    #plt.show()

    # convert to a mesh
    mesh = ImageToMeshConverter.binary_image_to_mesh(binary_im,
                                                     extrusion=extrusion,
                                                     scale_factor=scale_factor)
    vis.figure()
    vis.mesh(mesh)
    vis.show()

    # optionally save
    if output_filename is not None:
        file_root, file_ext = os.path.splitext(output_filename)
        binary_im.save(file_root + '.jpg')
        ObjFile(file_root + '.obj').write(mesh)
        np.savetxt(file_root + '.csv',
                   sdf,
                   delimiter=',',
                   header='%d %d' % (sdf.shape[0], sdf.shape[1]))
예제 #16
0
    # parse args
    logging.getLogger().setLevel(logging.INFO)
    parser = argparse.ArgumentParser()
    parser.add_argument('mesh_filename',
                        type=str,
                        help='filename for .OBJ mesh file to render')
    args = parser.parse_args()

    vis_normals = False

    # read data
    mesh_filename = args.mesh_filename
    _, mesh_ext = os.path.splitext(mesh_filename)
    if mesh_ext != '.obj':
        raise ValueError('Must provide mesh in Wavefront .OBJ format!')
    orig_mesh = ObjFile(mesh_filename).read()
    mesh = orig_mesh.subdivide(min_tri_length=0.01)
    mesh.compute_vertex_normals()
    stable_poses = mesh.stable_poses()

    if vis_normals:
        vis3d.figure()
        vis3d.mesh(mesh)
        vis3d.normals(NormalCloud(mesh.normals.T),
                      PointCloud(mesh.vertices.T),
                      subsample=10)
        vis3d.show()

    d = utils.sqrt_ceil(len(stable_poses))
    vis.figure(size=(16, 16))