Exemplo n.º 1
0
    def save_samples(self, sample, grasps, T_obj_stp, collision_checker, grasp_metrics, stable_pose):
        self.tensor_datapoint['image_labels'] = self.cur_image_label
        T_stp_camera = sample.camera.object_to_camera_pose
        self.T_obj_camera = T_stp_camera * T_obj_stp.as_frames('obj', T_stp_camera.from_frame)
        aligned_grasps = self.align_grasps_with_camera(grasps)

        depth_im_table = sample.renders[RenderMode.DEPTH_SCENE].image
        cx = depth_im_table.center[1]
        cy = depth_im_table.center[0]
        camera_pose = self.get_camera_pose(sample)
        self.tensor_datapoint['camera_poses'] = camera_pose
        shifted_camera_intr = sample.camera.camera_intr
        self.tensor_datapoint['camera_intrs'] = np.r_[shifted_camera_intr.fx,
                                                      shifted_camera_intr.fy,
                                                      shifted_camera_intr.cx,
                                                      shifted_camera_intr.cy]
        self.save_orig_image(depth_im_table, camera_pose, stable_pose, shifted_camera_intr)

        grid_space = 200
        grids = 3
        centre = np.empty((grids, grids, 2))
        saved = np.ones((grids, grids)) * -1

        depth_images = np.empty((grids, grids, 32, 32, 1))
        # Crop images in a grid around the centre
        for x_grid in range(grids):
            x_pos = grid_space / 4 - x_grid * grid_space / (grids + 1)
            for y_grid in range(grids):
                y_pos = grid_space / 4 - y_grid * grid_space / (grids + 1)
                depth_im = self.prepare_images(depth_im_table, x_pos, y_pos)
                self._show_image(depth_im)
                depth_images[x_grid, y_grid, :, :, :] = depth_im
                centre[x_grid, y_grid, 0] = x_pos
                centre[x_grid, y_grid, 1] = y_pos

        for cnt, aligned_grasp in enumerate(aligned_grasps):
            collision_free = self.is_grasp_collision_free(aligned_grasp, collision_checker)
            # Project grasp coordinates in image
            grasp_2d = aligned_grasp.project_camera(self.T_obj_camera, shifted_camera_intr)

            grasp_point = aligned_grasp.close_fingers(self.obj, vis=False, check_approach=False)

            if not grasp_point[0]:
                Warning("Could not find contact points")
                continue
            # Get grasp width in pixel from endpoints
            p_1 = np.dot(self.T_obj_camera.matrix, np.append(grasp_point[1][0].point, 1).T).T[:3]
            p_2 = np.dot(self.T_obj_camera.matrix, np.append(grasp_point[1][1].point, 1).T).T[:3]
            grasp_width_px = self.width_px(shifted_camera_intr, p_1, p_2)

            grasp_width = aligned_grasp.width_from_endpoints(grasp_point[1][0].point, grasp_point[1][1].point)

            pos_x = cx - grasp_2d.center.x
            pos_y = cy - grasp_2d.center.y

            distance = np.abs((centre[:, :, 0] - pos_x)**2 + (centre[:, :, 1] - pos_y)**2)

            idx = np.where(distance == np.amin(distance))
            idx_x = idx[0][0]
            idx_y = idx[1][0]

            grasp_center_x = (300 - grasp_2d.center.x - centre[idx_x, idx_y, 0])//3
            grasp_center_y = (300 - grasp_2d.center.y - centre[idx_x, idx_y, 1])//3

            if saved[idx_x, idx_y] == -1:
                np.save(self.image_dir + "/depth_im_{:07d}.npy".format(self.cur_image_file),
                        depth_images[idx_x, idx_y, :, :, :])
                saved[idx_x, idx_y] = self.cur_image_file
                self.tensor_datapoint['image_files'] = self.cur_image_file
                self.cur_image_file += 1
            else:
                self.tensor_datapoint['image_files'] = saved[idx_x, idx_y]

            T_grasp_camera = aligned_grasp.gripper_pose(self.gripper).inverse() * self.T_obj_camera.inverse()

            if VISUALISE_3D:
                z_axis = T_grasp_camera.z_axis
                theta = np.rad2deg(np.arccos((z_axis[2]) /
                                             (np.sqrt((z_axis[0] ** 2 + z_axis[1] ** 2 + z_axis[2] ** 2)))))
                vis.figure()
                T_obj_world = vis.mesh_stable_pose(self.obj.mesh.trimesh,
                                                   stable_pose.T_obj_world, style='surface', dim=0.5, plot_table=True)
                T_camera_world = T_obj_world * self.T_obj_camera.inverse()
                vis.gripper(self.gripper, aligned_grasp, T_obj_world, color=(0.3, 0.3, 0.3),
                            T_camera_world=T_camera_world)
                print(theta)
                vis.show()

            hand_pose = self.get_hand_pose(grasp_center_x, grasp_center_y, grasp_2d, T_grasp_camera,
                                           grasp_width, grasp_width_px)

            self.add_datapoint(hand_pose,
                               collision_free, aligned_grasp, grasp_metrics)
        self.cur_image_label += 1
Exemplo n.º 2
0
    def save_samples(self, sample, grasps, T_obj_stp, collision_checker,
                     grasp_metrics, stable_pose):
        self.tensor_datapoint['image_labels'] = self.cur_image_label
        T_stp_camera = sample.camera.object_to_camera_pose
        self.T_obj_camera = T_stp_camera * T_obj_stp.as_frames(
            'obj', T_stp_camera.from_frame)

        depth_im_table = sample.renders[RenderMode.DEPTH_SCENE].image
        camera_pose = self.get_camera_pose(sample)
        shifted_camera_intr = sample.camera.camera_intr
        aligned_grasps = self.align_grasps_with_camera(grasps)

        cx = depth_im_table.center[1]
        cy = depth_im_table.center[0]

        self.tensor_datapoint['camera_intrs'] = np.r_[shifted_camera_intr.fx,
                                                      shifted_camera_intr.fy,
                                                      shifted_camera_intr.cx,
                                                      shifted_camera_intr.cy]

        for aligned_grasp in aligned_grasps:
            if not self.is_grasp_aligned(aligned_grasp):
                continue
            collision_free = self.is_grasp_collision_free(
                aligned_grasp, collision_checker)
            grasp_2d = aligned_grasp.project_camera(self.T_obj_camera,
                                                    shifted_camera_intr)

            depth_im_tf = self.prepare_images(depth_im_table, grasp_2d, cx, cy)
            T_grasp_camera = aligned_grasp.gripper_pose(
                self.gripper).inverse() * self.T_obj_camera.inverse()

            if VISUALISE_3D:
                z_axis = T_grasp_camera.z_axis
                theta = np.rad2deg(
                    np.arccos((z_axis[2]) / (np.sqrt(
                        (z_axis[0]**2 + z_axis[1]**2 + z_axis[2]**2)))))
                vis.figure()
                T_obj_world = vis.mesh_stable_pose(self.obj.mesh.trimesh,
                                                   stable_pose.T_obj_world,
                                                   style='surface',
                                                   dim=0.5,
                                                   plot_table=True)
                T_camera_world = T_obj_world * self.T_obj_camera.inverse()
                vis.gripper(self.gripper,
                            aligned_grasp,
                            T_obj_world,
                            color=(0.3, 0.3, 0.3),
                            T_camera_world=T_camera_world)
                vis.show()

            theta = grasp_2d.angle
            R = np.array([[np.cos(theta), -np.sin(theta), 0],
                          [np.sin(theta), np.cos(theta), 0], [0, 0, 1]])
            rot = RigidTransform(rotation=R,
                                 to_frame='camera',
                                 from_frame='camera')

            new_quaternion = T_grasp_camera * rot

            hand_pose = self.get_hand_pose(grasp_2d, new_quaternion.quaternion)
            self.add_datapoint(depth_im_tf, hand_pose, collision_free,
                               camera_pose, aligned_grasp, grasp_metrics)
        self.cur_image_label += 1
def generate_gqcnn_dataset(dataset_path,
                           database,
                           target_object_keys,
                           env_rv_params,
                           gripper_name,
                           config):
    """
    Generates a GQ-CNN TensorDataset for training models with new grippers, quality metrics, objects, and cameras.

    Parameters
    ----------
    dataset_path : str
        path to save the dataset to
    database : :obj:`Hdf5Database`
        Dex-Net database containing the 3D meshes, grasps, and grasp metrics
    target_object_keys : :obj:`OrderedDict`
        dictionary mapping dataset names to target objects (either 'all' or a list of specific object keys)
    env_rv_params : :obj:`OrderedDict`
        parameters of the camera and object random variables used in sampling (see meshpy.UniformPlanarWorksurfaceImageRandomVariable for more info)
    gripper_name : str
        name of the gripper to use
    config : :obj:`autolab_core.YamlConfig`
        other parameters for dataset generation

    Notes
    -----
    Required parameters of config are specified in Other Parameters

    Other Parameters
    ----------------    
    images_per_stable_pose : int
        number of object and camera poses to sample for each stable pose
    stable_pose_min_p : float
        minimum probability of occurrence for a stable pose to be used in data generation (used to prune bad stable poses
    
    gqcnn/crop_width : int
        width, in pixels, of crop region around each grasp center, before resize (changes the size of the region seen by the GQ-CNN)
    gqcnn/crop_height : int
        height, in pixels,  of crop region around each grasp center, before resize (changes the size of the region seen by the GQ-CNN)
    gqcnn/final_width : int
        width, in pixels,  of final transformed grasp image for input to the GQ-CNN (defaults to 32)
    gqcnn/final_height : int
        height, in pixels,  of final transformed grasp image for input to the GQ-CNN (defaults to 32)

    table_alignment/max_approach_table_angle : float
        max angle between the grasp axis and the table normal when the grasp approach is maximally aligned with the table normal
    table_alignment/max_approach_offset : float
        max deviation from perpendicular approach direction to use in grasp collision checking
    table_alignment/num_approach_offset_samples : int
        number of approach samples to use in collision checking

    collision_checking/table_offset : float
        max allowable interpenetration between the gripper and table to be considered collision free
    collision_checking/table_mesh_filename : str
        path to a table mesh for collision checking (default data/meshes/table.obj)
    collision_checking/approach_dist : float
        distance, in meters, between the approach pose and final grasp pose along the grasp axis
    collision_checking/delta_approach : float
        amount, in meters, to discretize the straight-line path from the gripper approach pose to the final grasp pose

    tensors/datapoints_per_file : int
        number of datapoints to store in each unique tensor file on disk
    tensors/fields : :obj:`dict`
        dictionary mapping field names to dictionaries specifying the data type, height, width, and number of channels for each tensor

    debug : bool
        True (or 1) if the random seed should be set to enforce deterministic behavior, False (0) otherwise
    vis/candidate_grasps : bool
        True (or 1) if the collision free candidate grasps should be displayed in 3D (for debugging)
    vis/rendered_images : bool
        True (or 1) if the rendered images for each stable pose should be displayed (for debugging)
    vis/grasp_images : bool
        True (or 1) if the transformed grasp images should be displayed (for debugging)
    """
    # read data gen params
    output_dir = dataset_path
    gripper = RobotGripper.load(gripper_name)
    image_samples_per_stable_pose = config['images_per_stable_pose']
    stable_pose_min_p = config['stable_pose_min_p']
    
    # read gqcnn params
    gqcnn_params = config['gqcnn']
    im_crop_height = gqcnn_params['crop_height']
    im_crop_width = gqcnn_params['crop_width']
    im_final_height = gqcnn_params['final_height']
    im_final_width = gqcnn_params['final_width']
    cx_crop = float(im_crop_width) / 2
    cy_crop = float(im_crop_height) / 2

    # open database
    dataset_names = target_object_keys.keys()
    datasets = [database.dataset(dn) for dn in dataset_names]

    # set target objects
    for dataset in datasets:
        if target_object_keys[dataset.name] == 'all':
            target_object_keys[dataset.name] = dataset.object_keys

    # setup grasp params
    table_alignment_params = config['table_alignment']
    min_grasp_approach_offset = -np.deg2rad(table_alignment_params['max_approach_offset'])
    max_grasp_approach_offset = np.deg2rad(table_alignment_params['max_approach_offset'])
    max_grasp_approach_table_angle = np.deg2rad(table_alignment_params['max_approach_table_angle'])
    num_grasp_approach_samples = table_alignment_params['num_approach_offset_samples']

    phi_offsets = []
    if max_grasp_approach_offset == min_grasp_approach_offset:
        phi_inc = 1
    elif num_grasp_approach_samples == 1:
        phi_inc = max_grasp_approach_offset - min_grasp_approach_offset + 1
    else:
        phi_inc = (max_grasp_approach_offset - min_grasp_approach_offset) / (num_grasp_approach_samples - 1)
                                                            
    phi = min_grasp_approach_offset
    while phi <= max_grasp_approach_offset:
        phi_offsets.append(phi)
        phi += phi_inc

    # setup collision checking
    coll_check_params = config['collision_checking']
    approach_dist = coll_check_params['approach_dist']
    delta_approach = coll_check_params['delta_approach']
    table_offset = coll_check_params['table_offset']

    table_mesh_filename = coll_check_params['table_mesh_filename']
    if not os.path.isabs(table_mesh_filename):
        table_mesh_filename = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', table_mesh_filename)
    table_mesh = ObjFile(table_mesh_filename).read()
    
    # set tensor dataset config
    tensor_config = config['tensors']
    tensor_config['fields']['depth_ims_tf_table']['height'] = im_final_height
    tensor_config['fields']['depth_ims_tf_table']['width'] = im_final_width
    tensor_config['fields']['obj_masks']['height'] = im_final_height
    tensor_config['fields']['obj_masks']['width'] = im_final_width

    # add available metrics (assuming same are computed for all objects)
    metric_names = []
    dataset = datasets[0]
    obj_keys = dataset.object_keys
    if len(obj_keys) == 0:
        raise ValueError('No valid objects in dataset %s' %(dataset.name))
    
    obj = dataset[obj_keys[0]]
    grasps = dataset.grasps(obj.key, gripper=gripper.name)
    grasp_metrics = dataset.grasp_metrics(obj.key, grasps, gripper=gripper.name)
    metric_names = grasp_metrics[grasp_metrics.keys()[0]].keys()
    for metric_name in metric_names:
        tensor_config['fields'][metric_name] = {}
        tensor_config['fields'][metric_name]['dtype'] = 'float32'

    # init tensor dataset
    tensor_dataset = TensorDataset(output_dir, tensor_config)
    tensor_datapoint = tensor_dataset.datapoint_template

    # setup log file
    experiment_log_filename = os.path.join(output_dir, 'dataset_generation.log')
    formatter = logging.Formatter('%(asctime)s %(levelname)s: %(message)s')
    hdlr = logging.FileHandler(experiment_log_filename)
    hdlr.setFormatter(formatter)
    logging.getLogger().addHandler(hdlr)
    root_logger = logging.getLogger()

    # copy config
    out_config_filename = os.path.join(output_dir, 'dataset_generation.json')
    ordered_dict_config = collections.OrderedDict()
    for key in config.keys():
        ordered_dict_config[key] = config[key]
    with open(out_config_filename, 'w') as outfile:
        json.dump(ordered_dict_config, outfile)

    # 1. Precompute the set of valid grasps for each stable pose:
    #    i) Perpendicular to the table
    #   ii) Collision-free along the approach direction

    # load grasps if they already exist
    grasp_cache_filename = os.path.join(output_dir, CACHE_FILENAME)
    if os.path.exists(grasp_cache_filename):
        logging.info('Loading grasp candidates from file')
        candidate_grasps_dict = pkl.load(open(grasp_cache_filename, 'rb'))
    # otherwise re-compute by reading from the database and enforcing constraints
    else:        
        # create grasps dict
        candidate_grasps_dict = {}
        
        # loop through datasets and objects
        for dataset in datasets:
            logging.info('Reading dataset %s' %(dataset.name))
            for obj in dataset:
                if obj.key not in target_object_keys[dataset.name]:
                    continue

                # init candidate grasp storage
                candidate_grasps_dict[obj.key] = {}

                # setup collision checker
                collision_checker = GraspCollisionChecker(gripper)
                collision_checker.set_graspable_object(obj)

                # read in the stable poses of the mesh
                stable_poses = dataset.stable_poses(obj.key)
                for i, stable_pose in enumerate(stable_poses):
                    # render images if stable pose is valid
                    if stable_pose.p > stable_pose_min_p:
                        candidate_grasps_dict[obj.key][stable_pose.id] = []

                        # setup table in collision checker
                        T_obj_stp = stable_pose.T_obj_table.as_frames('obj', 'stp')
                        T_obj_table = obj.mesh.get_T_surface_obj(T_obj_stp, delta=table_offset).as_frames('obj', 'table')
                        T_table_obj = T_obj_table.inverse()
                        collision_checker.set_table(table_mesh_filename, T_table_obj)

                        # read grasp and metrics
                        grasps = dataset.grasps(obj.key, gripper=gripper.name)
                        logging.info('Aligning %d grasps for object %s in stable %s' %(len(grasps), obj.key, stable_pose.id))

                        # align grasps with the table
                        aligned_grasps = [grasp.perpendicular_table(stable_pose) for grasp in grasps]

                        # check grasp validity
                        logging.info('Checking collisions for %d grasps for object %s in stable %s' %(len(grasps), obj.key, stable_pose.id))
                        for aligned_grasp in aligned_grasps:
                            # check angle with table plane and skip unaligned grasps
                            _, grasp_approach_table_angle, _ = aligned_grasp.grasp_angles_from_stp_z(stable_pose)
                            perpendicular_table = (np.abs(grasp_approach_table_angle) < max_grasp_approach_table_angle)
                            if not perpendicular_table: 
                                continue

                            # check whether any valid approach directions are collision free
                            collision_free = False
                            for phi_offset in phi_offsets:
                                rotated_grasp = aligned_grasp.grasp_y_axis_offset(phi_offset)
                                collides = collision_checker.collides_along_approach(rotated_grasp, approach_dist, delta_approach)
                                if not collides:
                                    collision_free = True
                                    break
                    
                            # store if aligned to table
                            candidate_grasps_dict[obj.key][stable_pose.id].append(GraspInfo(aligned_grasp, collision_free))

                            # visualize if specified
                            if collision_free and config['vis']['candidate_grasps']:
                                logging.info('Grasp %d' %(aligned_grasp.id))
                                vis.figure()
                                vis.gripper_on_object(gripper, aligned_grasp, obj, stable_pose.T_obj_world)
                                vis.show()
                                
        # save to file
        logging.info('Saving to file')
        pkl.dump(candidate_grasps_dict, open(grasp_cache_filename, 'wb'))

    # 2. Render a dataset of images and associate the gripper pose with image coordinates for each grasp in the Dex-Net database

    # setup variables
    obj_category_map = {}
    pose_category_map = {}

    cur_pose_label = 0
    cur_obj_label = 0
    cur_image_label = 0
                
    # render images for each stable pose of each object in the dataset
    render_modes = [RenderMode.SEGMASK, RenderMode.DEPTH_SCENE]
    for dataset in datasets:
        logging.info('Generating data for dataset %s' %(dataset.name))
        
        # iterate through all object keys
        object_keys = dataset.object_keys
        for obj_key in object_keys:
            obj = dataset[obj_key]
            if obj.key not in target_object_keys[dataset.name]:
                continue

            # read in the stable poses of the mesh
            stable_poses = dataset.stable_poses(obj.key)
            for i, stable_pose in enumerate(stable_poses):

                # render images if stable pose is valid
                if stable_pose.p > stable_pose_min_p:
                    # log progress
                    logging.info('Rendering images for object %s in %s' %(obj.key, stable_pose.id))

                    # add to category maps
                    if obj.key not in obj_category_map.keys():
                        obj_category_map[obj.key] = cur_obj_label
                    pose_category_map['%s_%s' %(obj.key, stable_pose.id)] = cur_pose_label

                    # read in candidate grasps and metrics
                    candidate_grasp_info = candidate_grasps_dict[obj.key][stable_pose.id]
                    candidate_grasps = [g.grasp for g in candidate_grasp_info]
                    grasp_metrics = dataset.grasp_metrics(obj.key, candidate_grasps, gripper=gripper.name)

                    # compute object pose relative to the table
                    T_obj_stp = stable_pose.T_obj_table.as_frames('obj', 'stp')
                    T_obj_stp = obj.mesh.get_T_surface_obj(T_obj_stp)

                    # sample images from random variable
                    T_table_obj = RigidTransform(from_frame='table', to_frame='obj')
                    scene_objs = {'table': SceneObject(table_mesh, T_table_obj)}
                    urv = UniformPlanarWorksurfaceImageRandomVariable(obj.mesh,
                                                                      render_modes,
                                                                      'camera',
                                                                      env_rv_params,
                                                                      stable_pose=stable_pose,
                                                                      scene_objs=scene_objs)
                    
                    render_start = time.time()
                    render_samples = urv.rvs(size=image_samples_per_stable_pose)
                    render_stop = time.time()
                    logging.info('Rendering images took %.3f sec' %(render_stop - render_start))

                    # visualize
                    if config['vis']['rendered_images']:
                        d = int(np.ceil(np.sqrt(image_samples_per_stable_pose)))

                        # binary
                        vis2d.figure()
                        for j, render_sample in enumerate(render_samples):
                            vis2d.subplot(d,d,j+1)
                            vis2d.imshow(render_sample.renders[RenderMode.SEGMASK].image)

                        # depth table
                        vis2d.figure()
                        for j, render_sample in enumerate(render_samples):
                            vis2d.subplot(d,d,j+1)
                            vis2d.imshow(render_sample.renders[RenderMode.DEPTH_SCENE].image)
                        vis2d.show()

                    # tally total amount of data
                    num_grasps = len(candidate_grasps)
                    num_images = image_samples_per_stable_pose 
                    num_save = num_images * num_grasps
                    logging.info('Saving %d datapoints' %(num_save))

                    # for each candidate grasp on the object compute the projection
                    # of the grasp into image space
                    for render_sample in render_samples:
                        # read images
                        binary_im = render_sample.renders[RenderMode.SEGMASK].image
                        depth_im_table = render_sample.renders[RenderMode.DEPTH_SCENE].image
                        # read camera params
                        T_stp_camera = render_sample.camera.object_to_camera_pose
                        shifted_camera_intr = render_sample.camera.camera_intr

                        # read pixel offsets
                        cx = depth_im_table.center[1]
                        cy = depth_im_table.center[0]

                        # compute intrinsics for virtual camera of the final
                        # cropped and rescaled images
                        camera_intr_scale = float(im_final_height) / float(im_crop_height)
                        cropped_camera_intr = shifted_camera_intr.crop(im_crop_height, im_crop_width, cy, cx)
                        final_camera_intr = cropped_camera_intr.resize(camera_intr_scale)

                        # create a thumbnail for each grasp
                        for grasp_info in candidate_grasp_info:
                            # read info
                            grasp = grasp_info.grasp
                            collision_free = grasp_info.collision_free
                            
                            # get the gripper pose
                            T_obj_camera = T_stp_camera * T_obj_stp.as_frames('obj', T_stp_camera.from_frame)
                            grasp_2d = grasp.project_camera(T_obj_camera, shifted_camera_intr)

                            # center images on the grasp, rotate to image x axis
                            dx = cx - grasp_2d.center.x
                            dy = cy - grasp_2d.center.y
                            translation = np.array([dy, dx])

                            binary_im_tf = binary_im.transform(translation, grasp_2d.angle)
                            depth_im_tf_table = depth_im_table.transform(translation, grasp_2d.angle)

                            # crop to image size
                            binary_im_tf = binary_im_tf.crop(im_crop_height, im_crop_width)
                            depth_im_tf_table = depth_im_tf_table.crop(im_crop_height, im_crop_width)

                            # resize to image size
                            binary_im_tf = binary_im_tf.resize((im_final_height, im_final_width), interp='nearest')
                            depth_im_tf_table = depth_im_tf_table.resize((im_final_height, im_final_width))
                            
                            # visualize the transformed images
                            if config['vis']['grasp_images']:
                                grasp_center = Point(depth_im_tf_table.center,
                                                     frame=final_camera_intr.frame)
                                tf_grasp_2d = Grasp2D(grasp_center, 0,
                                                      grasp_2d.depth,
                                                      width=gripper.max_width,
                                                      camera_intr=final_camera_intr)

                                # plot 2D grasp image
                                vis2d.figure()
                                vis2d.subplot(2,2,1)
                                vis2d.imshow(binary_im)
                                vis2d.grasp(grasp_2d)
                                vis2d.subplot(2,2,2)
                                vis2d.imshow(depth_im_table)
                                vis2d.grasp(grasp_2d)
                                vis2d.subplot(2,2,3)
                                vis2d.imshow(binary_im_tf)
                                vis2d.grasp(tf_grasp_2d)
                                vis2d.subplot(2,2,4)
                                vis2d.imshow(depth_im_tf_table)
                                vis2d.grasp(tf_grasp_2d)
                                vis2d.title('Coll Free? %d'%(grasp_info.collision_free))
                                vis2d.show()

                                # plot 3D visualization
                                vis.figure()
                                T_obj_world = vis.mesh_stable_pose(obj.mesh, stable_pose.T_obj_world, style='surface', dim=0.5)
                                vis.gripper(gripper, grasp, T_obj_world, color=(0.3,0.3,0.3))
                                vis.show()

                            # form hand pose array
                            hand_pose = np.r_[grasp_2d.center.y,
                                              grasp_2d.center.x,
                                              grasp_2d.depth,
                                              grasp_2d.angle,
                                              grasp_2d.center.y - shifted_camera_intr.cy,
                                              grasp_2d.center.x - shifted_camera_intr.cx,
                                              grasp_2d.width_px]
         

                            # store to data buffers
                            tensor_datapoint['depth_ims_tf_table'] = depth_im_tf_table.raw_data
                            tensor_datapoint['obj_masks'] = binary_im_tf.raw_data
                            tensor_datapoint['hand_poses'] = hand_pose
                            tensor_datapoint['collision_free'] = collision_free
                            tensor_datapoint['obj_labels'] = cur_obj_label
                            tensor_datapoint['pose_labels'] = cur_pose_label
                            tensor_datapoint['image_labels'] = cur_image_label

                            for metric_name, metric_val in grasp_metrics[grasp.id].iteritems():
                                coll_free_metric = (1 * collision_free) * metric_val
                                tensor_datapoint[metric_name] = coll_free_metric
                            tensor_dataset.add(tensor_datapoint)

                        # update image label
                        cur_image_label += 1

                    # update pose label
                    cur_pose_label += 1

                    # force clean up
                    gc.collect()

            # update object label
            cur_obj_label += 1

            # force clean up
            gc.collect()

    # save last file
    tensor_dataset.flush()

    # save category mappings
    obj_cat_filename = os.path.join(output_dir, 'object_category_map.json')
    json.dump(obj_category_map, open(obj_cat_filename, 'w'))
    pose_cat_filename = os.path.join(output_dir, 'pose_category_map.json')
    json.dump(pose_category_map, open(pose_cat_filename, 'w'))
Exemplo n.º 4
0
                push_idx = None
                # push_idx = 33
                sample_id = 0
                num_toppled, pose_angles, actual_poses = [], [], []
                print '\n\n\n\n\n\nSTARTING DIFFERENT PUSH', push_num
                while sample_id < 10:
                    sample_id += 1
                    # sim_point_cloud_masked = get_sim_point_cloud(depth_scene, env.state.obj)
                    usr_input = 'n'
                    while usr_input != 'y' and usr_input != 'a':
                        usr_input = utils.keyboard_input('\n\nPut the object in position y: continue, v: vis pose, a: abort push[y/v/a]:')
                        if usr_input == 'v':
                            vis3d.figure()
                            env.state.obj.T_obj_world = orig_pose
                            env.render_3d_scene()
                            vis3d.show(starting_camera_pose=CAMERA_POSE)
                    if usr_input == 'a':
                        print 'trying different push'
                        push_idx = None
                        sample_id = 0
                        num_toppled, pose_angles, actual_poses = [], [], []
                        continue

                    usr_input = 'n'
                    while usr_input != 'y':
                        s2r = sim_to_real_tf(sim_point_cloud_masked, vis=True)
                        usr_input = utils.keyboard_input('Did the pose registration work? [y/n]:')
                    env.state.obj.T_obj_world = s2r*orig_pose

                    # Plan topple action
                    action = policy.action(env.state, push_idx)
Exemplo n.º 5
0
    def showgraspfrompicklewithcandidate(self, object_name,metric_name,onlynagative,onlypositive,openravechecker):    
        object = self.dataset[object_name] 
        labelgrasps,labelmetrics,candidate_labelgrasps,candidate_labelmetrics=self.readdatafrompicklewithcandidate(object_name)


        config=self._get_config(None)
        low =0.0  #np.min(metrics)
        high = np.max([labelmetrics[i][metric_name] for i in range(len(labelmetrics))])
       # print 'high',high,config['quality_scale']
        if low == high:
            q_to_c = lambda quality: config['quality_scale']
        else:
            
            q_to_c = lambda quality: config['quality_scale'] * (quality - low) / (high - low)

        recalculate_grasp=[]    
        vis.figure()
        vis.mesh(object.mesh.trimesh ,style='surface')
        for i in range(len(labelgrasps)): 
         
                        if labelmetrics[i][metric_name]==0 : 
                           if not onlypositive: 
                                    vis.figure()
                                    vis.mesh(object.mesh.trimesh ,style='surface')
                                    color = plt.get_cmap('hsv')(q_to_c(labelmetrics[i][metric_name]))[:-1] 
                                    
                                    vis.graspwithapproachvectorusingcenter_point(labelgrasps[i] ,approaching_color=(1,0,0), grasp_axis_color=(1,0,0) )
                                    for kk in range(len(candidate_labelgrasps[i] )):  
                                        print 'candidate_labelgrasps[i][kk]',candidate_labelgrasps[i][kk]
                                        color = plt.get_cmap('hsv')(q_to_c(candidate_labelmetrics[i][candidate_labelgrasps[i][kk].id][metric_name]))[:-1] 
                                        vis.graspwithapproachvectorusingcenter_point(candidate_labelgrasps[i][kk] ,approaching_color=color, grasp_axis_color=color )
                         
                                    recalculate_grasp.append(labelgrasps[i])
                                    print 'maxtrics',i,labelmetrics[i][metric_name]   # ,high
                                
                                    vis.pose(RigidTransform(), alpha=0.1) 
                    
                                    vis.show(animate=False)             
                        elif  labelmetrics[i][metric_name]==-1   :
                            if not onlypositive:
                                 vis.shownormals(labelgrasps[i][1],labelgrasps[i][0],color=(0,0,1) ) 
                        else:
                                if not onlynagative: 
                                    vis.figure()
                                    vis.mesh(object.mesh.trimesh ,style='surface')
                                    color = plt.get_cmap('hsv')(q_to_c(labelmetrics[i][metric_name]))[:-1]                               
                                    if len(candidate_labelgrasps[i])>0:
                                        A=np.array(labelgrasps[i].rotated_full_axis[:,0] )
                                        B=np.array(candidate_labelgrasps[i][0].rotated_full_axis[:,0] ) 
                                        num = np.dot(A.T, B)
                                        print num
                                        if num<0.99:
                                            labelgrasps[i].approach_angle_= labelgrasps[i]._angle_aligned_with_table( -candidate_labelgrasps[i][0].rotated_full_axis[:,0])
                                                                        
                                    vis.graspwithapproachvectorusingcenter_point(labelgrasps[i] ,approaching_color=color, grasp_axis_color=(0,0,1) )
                                    print 'length',len(candidate_labelgrasps[i] )
                                    for kk in range(len(candidate_labelgrasps[i] )):   
                                        color = plt.get_cmap('hsv')(q_to_c(candidate_labelmetrics[i][candidate_labelgrasps[i][kk].id][metric_name]))[:-1] 
                                        vis.graspwithapproachvectorusingcenter_point(candidate_labelgrasps[i][kk] ,approaching_color=(0,1,1), grasp_axis_color=color )
 
                                    recalculate_grasp.append(labelgrasps[i])
                                    print 'maxtrics',i,labelmetrics[i][metric_name]   # ,high
                                
                                    vis.pose(RigidTransform(), alpha=0.1) 
                                    vis.show(animate=False) 
Exemplo n.º 6
0
                            break

                    # Load grasp metrics
                    grasp_metrics = dataset.grasp_metrics(obj.key, aligned_grasps, gripper=gripper.name)

                    # Project grasp coordinates in image
                    grasp_2d = aligned_grasp.project_camera(T_obj_camera, shifted_camera_intr)

                    if VISUALISE_3D:
                        vis.figure()
                        T_obj_world = vis.mesh_stable_pose(obj.mesh.trimesh,
                                                           stable_pose.T_obj_world, style='surface', dim=0.5)
                        T_camera_world = T_obj_world * T_obj_camera.inverse()
                        vis.gripper(gripper, aligned_grasp, T_obj_world, color=(0.3, 0.3, 0.3),
                                    T_camera_world=T_camera_world)
                        vis.show()

                    # Get translation image distances to grasp
                    dx = cx - grasp_2d.center.x
                    dy = cy - grasp_2d.center.y
                    translation = np.array([dy, dx])

                    # Transform, crop and resize image
                    binary_im_tf = binary_im.transform(translation, grasp_2d.angle)
                    depth_im_tf_table = depth_im_table.transform(translation, grasp_2d.angle)

                    binary_im_tf = binary_im_tf.crop(96, 96)
                    depth_im_tf_table = depth_im_tf_table.crop(96, 96)

                    depth_image = np.asarray(depth_im_tf_table.data)
                    dep_image = Image.fromarray(depth_image).resize((32, 32), resample=Image.BILINEAR)
Exemplo n.º 7
0
    def save_samples(self, sample, grasps, collisions, T_obj_stp,
                     grasp_metrics, stable_pose):
        self.tensor_datapoint['image_labels'] = self.cur_image_label
        T_stp_camera = sample.camera.object_to_camera_pose
        self.T_obj_camera = T_stp_camera * T_obj_stp.as_frames(
            'obj', T_stp_camera.from_frame)

        depth_im_table = sample.renders[RenderMode.DEPTH_SCENE].image
        camera_pose = self.get_camera_pose(sample)
        final_camera_intr = sample.camera.camera_intr.crop(
            self.crop_height, self.crop_width, depth_im_table.center[0],
            depth_im_table.center[1])
        self.tensor_datapoint['camera_intrs'] = np.r_[final_camera_intr.fx,
                                                      final_camera_intr.fy,
                                                      final_camera_intr.cx,
                                                      final_camera_intr.cy]

        self.save_image(depth_im_table)

        for aligned_grasp, collision_free in zip(grasps, collisions):
            # Project grasp coordinates in image
            grasp_point = aligned_grasp.close_fingers(self.obj,
                                                      vis=False,
                                                      check_approach=False)

            if not grasp_point[0]:
                Warning("Could not find contact points")
                continue
            # Get grasp width in pixel from endpoints
            p_1 = np.dot(self.T_obj_camera.matrix,
                         np.append(grasp_point[1][0].point, 1).T).T[:3]
            p_2 = np.dot(self.T_obj_camera.matrix,
                         np.append(grasp_point[1][1].point, 1).T).T[:3]
            grasp_width_px = self.width_px(final_camera_intr, p_1, p_2)

            grasp_width = aligned_grasp.width_from_endpoints(
                grasp_point[1][0].point, grasp_point[1][1].point)

            if VISUALISE_3D:
                c = np.array((1, 0, 0))
                vis.figure()
                T_obj_world = vis.mesh_stable_pose(self.obj.mesh.trimesh,
                                                   stable_pose.T_obj_world,
                                                   style='surface',
                                                   dim=0.5,
                                                   plot_table=True)
                points = np.array(
                    (np.append(grasp_point[1][0].point,
                               1), np.append(grasp_point[1][1].point, 1)))

                vis.gripper(self.gripper,
                            aligned_grasp,
                            T_obj_world,
                            color=(0.3, 0.3, 0.3),
                            point=points,
                            point_color=c)
                vis.show()
            grasp_2d = aligned_grasp.project_camera(self.T_obj_camera,
                                                    final_camera_intr)

            grasp_center_x = self.crop_width // 2 - grasp_2d.center.x
            grasp_center_y = self.crop_height // 2 - grasp_2d.center.y

            hand_pose = self.get_hand_pose(grasp_center_x, grasp_center_y,
                                           grasp_2d, final_camera_intr,
                                           grasp_width, grasp_width_px)

            self.add_datapoint(hand_pose, collision_free, camera_pose,
                               aligned_grasp, grasp_metrics)
        self.cur_image_label += 1
Exemplo n.º 8
0
    def antipodal_grasp_sampler(self):
    	#of = ObjFile(OBJ_FILENAME)
    	#sf = SdfFile(SDF_FILENAME)
    	#mesh = of.read()
    	#sdf = sf.read()
        #obj = GraspableObject3D(sdf, mesh)
        mass = 1.0
        CONFIG['obj_rescaling_type'] = RescalingType.RELATIVE
        mesh_processor = MeshProcessor(OBJ_FILENAME, CONFIG['cache_dir'])
        mesh_processor.generate_graspable(CONFIG)
        mesh = mesh_processor.mesh
        sdf = mesh_processor.sdf
        obj = GraspableObject3D(sdf, mesh)

        gripper = RobotGripper.load(GRIPPER_NAME)

        ags = AntipodalGraspSampler(gripper, CONFIG)
        grasps = ags.sample_grasps(obj, num_grasps=100)
        print(len(grasps))
  
        quality_config = GraspQualityConfigFactory.create_config(CONFIG['metrics']['robust_ferrari_canny'])
        #quality_config = GraspQualityConfigFactory.create_config(CONFIG['metrics']['force_closure'])
        quality_fn = GraspQualityFunctionFactory.create_quality_function(obj, quality_config)
        

        vis.figure()
        #vis.points(PointCloud(nparraypoints), scale=0.001, color=np.array([0.5,0.5,0.5]))
        #vis.plot3d(nparraypoints)

        metrics = []
        vis.mesh(obj.mesh.trimesh, style='surface')
        for grasp in grasps:
            success, c = grasp.close_fingers(obj)
            if success:
                c1, c2 = c
                #fn_fc = quality_fn(grasp).quality
                true_fc = PointGraspMetrics3D.grasp_quality(grasp, obj, quality_config)
                true_fc = true_fc
                metrics.append(true_fc)

    	    #color = quality_fn(grasp).quality
            #if (true_fc > 1.0):
            #    true_fc = 1.0
            #color = (1.0-true_fc, true_fc, 0.0)

        low = np.min(metrics)
        high = np.max(metrics)
        print(low)
        print(high)
        if low == high:
            q_to_c = lambda quality: CONFIG['quality_scale']
        else:
            q_to_c = lambda quality: CONFIG['quality_scale'] * (quality - low) / (high - low)
        print(len(metrics))
        for grasp, metric in zip(grasps, metrics):
            color = plt.get_cmap('hsv')(q_to_c(metric))[:-1]
            print(color)
            vis.grasp(grasp, grasp_axis_color=color,endpoint_color=color)


        vis.show(False)
Exemplo n.º 9
0
    def read_grasps(self,
                    object_name,
                    stable_pose_id,
                    max_grasps=10,
                    visualize=False):
        # load gripper
        gripper = RobotGripper.load(
            GRIPPER_NAME, gripper_dir='/home/silvia/dex-net/data/grippers')

        # open Dex-Net API
        dexnet_handle = DexNet()
        dexnet_handle.open_database(self.database_path)
        dexnet_handle.open_dataset(self.dataset_name)

        # read the most robust grasp
        sorted_grasps, metrics = dexnet_handle.dataset.sorted_grasps(
            object_name,
            stable_pose_id=('pose_' + str(stable_pose_id)),
            metric='force_closure',
            gripper=gripper.name)

        if (len(sorted_grasps)) == 0:
            print('no grasps for this stable pose')
            if visualize:
                stable_pose = dexnet_handle.dataset.stable_pose(
                    object_name,
                    stable_pose_id=('pose_' + str(stable_pose_id)))
                obj = dexnet_handle.dataset.graspable(object_name)
                vis.figure()
                T_table_world = RigidTransform(from_frame='table',
                                               to_frame='world')
                T_obj_world = Visualizer3D.mesh_stable_pose(
                    obj.mesh.trimesh,
                    stable_pose.T_obj_world,
                    T_table_world=T_table_world,
                    color=(0.5, 0.5, 0.5),
                    style='surface',
                    plot_table=True,
                    dim=0.15)
                vis.show(False)
            return None, None

        contact_points = map(get_contact_points, sorted_grasps)

        # ------------------------ VISUALIZATION CODE ------------------------
        if visualize:
            low = np.min(metrics)
            high = np.max(metrics)
            if low == high:
                q_to_c = lambda quality: CONFIG['quality_scale']
            else:
                q_to_c = lambda quality: CONFIG['quality_scale'] * (
                    quality - low) / (high - low)

            stable_pose = dexnet_handle.dataset.stable_pose(
                object_name, stable_pose_id=('pose_' + str(stable_pose_id)))
            obj = dexnet_handle.dataset.graspable(object_name)
            vis.figure()

            T_table_world = RigidTransform(from_frame='table',
                                           to_frame='world')
            T_obj_world = Visualizer3D.mesh_stable_pose(
                obj.mesh.trimesh,
                stable_pose.T_obj_world,
                T_table_world=T_table_world,
                color=(0.5, 0.5, 0.5),
                style='surface',
                plot_table=True,
                dim=0.15)
            for grasp, metric in zip(sorted_grasps, metrics):
                color = plt.get_cmap('hsv')((q_to_c(metric)))[:-1]
                vis.grasp(grasp,
                          T_obj_world=stable_pose.T_obj_world,
                          grasp_axis_color=color,
                          endpoint_color=color)
            vis.show(False)
        # ------------------------ END VISUALIZATION CODE ---------------------------

        # ------------------------ START COLLISION CHECKING ---------------------------
        #stable_pose = dexnet_handle.dataset.stable_pose(object_name, stable_pose_id=('pose_'+str(stable_pose_id)))
        #graspable = dexnet_handle.dataset.graspable(object_name)
        #cc = GraspCollisionChecker(gripper).set_graspable_object(graspable, stable_pose.T_obj_world)
        stable_pose_matrix = self.get_stable_pose(object_name, stable_pose_id)
        # CLOSE DATABASE
        dexnet_handle.close_database()
        gripper_poses = []

        for grasp in sorted_grasps[:max_grasps]:
            gripper_pose_matrix = np.eye(4, 4)
            center_world = np.matmul(
                stable_pose_matrix,
                [grasp.center[0], grasp.center[1], grasp.center[2], 1])
            axis_world = np.matmul(
                stable_pose_matrix,
                [grasp.axis_[0], grasp.axis_[1], grasp.axis_[2], 1])
            gripper_angle = math.atan2(axis_world[1], axis_world[0])
            gripper_pose_matrix[:3, 3] = center_world[:3]
            gripper_pose_matrix[0, 0] = math.cos(gripper_angle)
            gripper_pose_matrix[0, 1] = -math.sin(gripper_angle)
            gripper_pose_matrix[1, 0] = math.sin(gripper_angle)
            gripper_pose_matrix[1, 1] = math.cos(gripper_angle)
            #if visualize:
            #    vis.figure()
            #    vis.gripper_on_object(gripper, grasp, obj, stable_pose=stable_pose.T_obj_world)
            #    vis.show(False)
            gripper_poses.append(gripper_pose_matrix)

        return contact_points, gripper_poses
Exemplo n.º 10
0
    def save_samples(self, sample, grasps, collisions, T_obj_stp,
                     grasp_metrics, stable_pose):
        self.tensor_datapoint['image_labels'] = self.cur_image_label
        T_stp_camera = sample.camera.object_to_camera_pose
        self.T_obj_camera = T_stp_camera * T_obj_stp.as_frames(
            'obj', T_stp_camera.from_frame)

        depth_im_table = sample.renders[RenderMode.DEPTH_SCENE].image
        camera_pose = self.get_camera_pose(sample)
        shifted_camera_intr = sample.camera.camera_intr

        cx = depth_im_table.center[1]
        cy = depth_im_table.center[0]

        self.tensor_datapoint['camera_intrs'] = np.r_[shifted_camera_intr.fx,
                                                      shifted_camera_intr.fy,
                                                      shifted_camera_intr.cx,
                                                      shifted_camera_intr.cy]

        for aligned_grasp, collision_free in zip(grasps, collisions):
            # Project grasp coordinates in image
            grasp_point = aligned_grasp.close_fingers(self.obj,
                                                      vis=False,
                                                      check_approach=False)

            if not grasp_point[0]:
                raise Warning("Could not find contact points")
                continue

            grasp_width = aligned_grasp.width_from_endpoints(
                grasp_point[1][0].point, grasp_point[1][1].point)

            if VISUALISE_3D:
                c = np.array((1, 0, 0))
                vis.figure()
                T_obj_world = vis.mesh_stable_pose(self.obj.mesh.trimesh,
                                                   stable_pose.T_obj_world,
                                                   style='surface',
                                                   dim=0.5,
                                                   plot_table=True)
                points = np.array(
                    (np.append(grasp_point[1][0].point,
                               1), np.append(grasp_point[1][1].point, 1)))

                vis.gripper(self.gripper,
                            aligned_grasp,
                            T_obj_world,
                            color=(0.3, 0.3, 0.3),
                            point=points,
                            point_color=c)
                vis.show()
            grasp_2d = aligned_grasp.project_camera(self.T_obj_camera,
                                                    shifted_camera_intr)

            depth_im_tf = self.prepare_images(depth_im_table, grasp_2d, cx, cy)
            hand_pose = self.get_hand_pose(grasp_2d, shifted_camera_intr,
                                           grasp_width)

            self.add_datapoint(depth_im_tf, hand_pose, collision_free,
                               camera_pose, aligned_grasp, grasp_metrics)
        self.cur_image_label += 1
Exemplo n.º 11
0
def antipodal_grasp_sampler(visual=False, debug=False):
    mass = 1.0
    CONFIG['obj_rescaling_type'] = RescalingType.RELATIVE
    mesh_processor = MeshProcessor(OBJ_FILENAME, CONFIG['cache_dir'])
    mesh_processor.generate_graspable(CONFIG)
    mesh = mesh_processor.mesh
    sdf = mesh_processor.sdf
    stable_poses = mesh_processor.stable_poses
    obj = GraspableObject3D(sdf, mesh)
    stable_pose = stable_poses[0]
    if visual:
        vis.figure()

    T_table_world = RigidTransform(from_frame='table', to_frame='world')
    T_obj_world = Visualizer3D.mesh_stable_pose(obj.mesh.trimesh,
                                                stable_pose.T_obj_world,
                                                T_table_world=T_table_world,
                                                color=(0.5, 0.5, 0.5),
                                                style='surface',
                                                plot_table=True,
                                                dim=0.15)
    if debug:
        print(len(stable_poses))
    #for stable_pose in stable_poses:
    #    print(stable_pose.p)
    #    print(stable_pose.r)
    #    print(stable_pose.x0)
    #    print(stable_pose.face)
    #    print(stable_pose.id)
    # glass = 22 is standing straight
    if debug:
        print(stable_pose.r)
        print(stable_pose.T_obj_world)
    gripper = RobotGripper.load(
        GRIPPER_NAME, gripper_dir='/home/silvia/dex-net/data/grippers')

    ags = AntipodalGraspSampler(gripper, CONFIG)
    stable_pose.id = 0
    grasps = ags.generate_grasps(obj,
                                 target_num_grasps=20,
                                 max_iter=5,
                                 stable_pose=stable_pose.r)

    quality_config = GraspQualityConfigFactory.create_config(
        CONFIG['metrics']['robust_ferrari_canny'])

    metrics = []
    result = []

    #grasps = map(lambda g : g.perpendicular_table(stable_pose), grasps)

    for grasp in grasps:
        c1, c2 = grasp.endpoints
        true_fc = PointGraspMetrics3D.grasp_quality(grasp, obj, quality_config)
        metrics.append(true_fc)
        result.append((c1, c2))

        if debug:
            success, c = grasp.close_fingers(obj)
            if success:
                c1, c2 = c
                print("Grasp:")
                print(c1.point)
                print(c2.point)
                print(true_fc)

    low = np.min(metrics)
    high = np.max(metrics)
    if low == high:
        q_to_c = lambda quality: CONFIG['quality_scale']
    else:
        q_to_c = lambda quality: CONFIG['quality_scale'] * (quality - low) / (
            high - low)

    if visual:
        for grasp, metric in zip(grasps, metrics):
            #grasp2 = grasp.perpendicular_table(stable_pose)
            #c1, c2 = grasp.endpoints
            #axis = ParallelJawPtGrasp3D.axis_from_endpoints(c1, c2)
            #angle = np.dot(np.matmul(stable_pose.r, axis), [1,0,0])
            #angle = math.tan(axis[1]/axis[0])
            #angle = math.degrees(angle)%360
            #print(angle)
            #print(angle/360.0)
            color = plt.get_cmap('hsv')(q_to_c(metric))[:-1]
            vis.grasp(grasp,
                      T_obj_world=T_obj_world,
                      grasp_axis_color=color,
                      endpoint_color=color)

        #axis = np.array([[0,0,0], point])
        #points = [(x[0], x[1], x[2]) for x in axis]
        #Visualizer3D.plot3d(points, color=(0,0,1), tube_radius=0.002)
        vis.show(False)

    pose_matrix = np.eye(4, 4)
    pose_matrix[:3, :3] = T_obj_world.rotation
    pose_matrix[:3, 3] = T_obj_world.translation
    return pose_matrix, result