def _load_object(self, path, scale):
        if (path, scale) in self._cache:
            return self._cache[(path, scale)]
        obj = sample.Object(path)
        obj.rescale(scale)

        tmesh = obj.mesh
        tmesh_mean = np.mean(tmesh.vertices, 0)
        tmesh.vertices -= np.expand_dims(tmesh_mean, 0)

        lbs = np.min(tmesh.vertices, 0)
        ubs = np.max(tmesh.vertices, 0)
        object_distance = np.max(ubs - lbs) * 5

        mesh = pyrender.Mesh.from_trimesh(tmesh)

        context = {
            'tmesh': copy.deepcopy(tmesh),
            'distance': object_distance,
            'node': pyrender.Node(mesh=mesh),
            'mesh_mean': np.expand_dims(tmesh_mean, 0),
        }

        self._cache[(path, scale)] = context
        return self._cache[(path, scale)]
예제 #2
0
    def eval_scene(self, file_path, visualize=False):
        """
          Returns full_results, evaluator_results.
            full_results: Contains information about grasps in canonical pose, scores,
              ground truth positive grasps, and also cad path and scale that is used for
              flex evaluation.
            evaluator_results: Only contains information for the classification of positive
              and negative grasps. The info is gt label of each grasp, predicted score for
              each grasp, and the 4x4 transformation of each grasp.
        """
        pc, grasps, grasps_label, flex_info = self.read_eval_scene(file_path)
        canonical_transform = flex_info['to_canonical_transformation']
        evaluator_result = None
        full_results = None
        if self._eval_grasp_evaluator:
            latents = self._grasp_estimator.sample_latents()
            output_grasps, output_scores, _ = self._grasp_estimator.predict_grasps(
                self._sess,
                pc, latents, 0, grasps_rt=gevaluator_result = (grasps_label, output_scores, output_grasps)


            latents = np.random.rand(self._cfg.num_samples, self._cfg.latent_size) * 4 - 2
            print(pc.shape)
                                    
            generated_grasps, generated_scores, _ = self._grasp_estimator.predict_grasps(
                self._sess,
                pc,
                latents,
                num_refine_steps=self._cfg.num_refine_steps,
            )

            gt_pos_grasps = [g for g, l in zip(grasps, grasps_label) if l == 1]
            gt_pos_grasps = np.asarray(gt_pos_grasps).copy()
            gt_pos_grasps_canonical = np.matmul(canonical_transform, gt_pos_grasps)
            generated_grasps = np.asarray(generated_grasps)
                                               
            print(generated_grasps.shape)
            generated_grasps_canonical = np.matmul(canonical_transform, generated_grasps)
            
                                                  
            obj = sample.Object(flex_info['cad_path'])
            obj.rescale(flex_info['cad_scale'])
            mesh = obj.mesh
            mesh_mean = np.mean(mesh.vertices, 0, keepdims=True)

            canonical_pc = pc.dot(canonical_transform[:3, :3].T)
            canonical_pc += np.expand_dims(canonical_transform[:3, 3], 0)

            gt_pos_grasps_canonical[:, :3, 3] += mesh_mean
            canonical_pc += mesh_mean
            generated_grasps_canonical[:, :3, 3] += mesh_mean
    def _load_object(self, path, scale=1.0):
        obj = sample.Object(path)
        obj.rescale(scale)
        print('rescaling with scale', scale)

        tmesh = obj.mesh
        tmesh_mean = np.mean(tmesh.vertices, 0)
        tmesh.vertices -= np.expand_dims(tmesh_mean, 0)

        lbs = np.min(tmesh.vertices, 0)
        ubs = np.max(tmesh.vertices, 0)
        self._object_distances.append(np.max(ubs - lbs) * 5)

        print(self._object_distances)

        self.tmesh = copy.deepcopy(tmesh)
        mesh = pyrender.Mesh.from_trimesh(tmesh)

        return self._scene.add(mesh,
                               name='object'), np.expand_dims(tmesh_mean, 0)
    def read_object_grasp_data(
        self, json_path, quality='quality_flex_object_in_gripper', 
        ratio_of_grasps_to_be_used=1., return_all_grasps=False):
        """
        Reads the grasps from the json path and loads the mesh and all the 
        grasps.
        """
        num_clusters = self._num_grasp_clusters
        root_folder = self._root_folder

        if num_clusters <= 0:
            raise NoPositiveGraspsException

        json_dict = json.load(open(json_path))
        
        object_model = sample.Object(os.path.join(root_folder, json_dict['object']))
        object_model.rescale(json_dict['object_scale'])
        object_model = object_model.mesh
        object_mean = np.mean(object_model.vertices, 0, keepdims=1)

        object_model.vertices -= object_mean
        grasps = np.asarray(json_dict['transforms'])
        grasps[:, :3, 3] -= object_mean

        flex_qualities = np.asarray(json_dict[quality])
        try:
            heuristic_qualities = np.asarray(json_dict['quality_number_of_contacts'])
        except KeyError:
            heuristic_qualities = np.ones(flex_qualities.shape)

        successful_mask = np.logical_and(flex_qualities > 0.01, heuristic_qualities > 0.01)
        
        positive_grasp_indexes = np.where(successful_mask)[0]
        negative_grasp_indexes = np.where(~successful_mask)[0]

        positive_grasps = grasps[positive_grasp_indexes, :, :]
        negative_grasps = grasps[negative_grasp_indexes, :, :]
        positive_qualities = heuristic_qualities[positive_grasp_indexes]
        negative_qualities = heuristic_qualities[negative_grasp_indexes]
        # print('positive grasps: {} negative grasps: {}'.format(positive_grasps.shape, negative_grasps.shape))

        def cluster_grasps(grasps, qualities):
            cluster_indexes = np.asarray(farthest_points(grasps, num_clusters, distance_by_translation_grasp))
            output_grasps = []
            output_qualities = []

            for i in range(num_clusters):
                indexes = np.where(cluster_indexes == i)[0]
                if ratio_of_grasps_to_be_used < 1:
                    num_grasps_to_choose = max(1, int(ratio_of_grasps_to_be_used * float(len(indexes))))
                    if len(indexes) == 0:
                        raise NoPositiveGraspsException
                    indexes = np.random.choice(indexes, size=num_grasps_to_choose, replace=False)
                    
                output_grasps.append(grasps[indexes, :, :])
                output_qualities.append(qualities[indexes])
            
            output_grasps = np.asarray(output_grasps)
            output_qualities = np.asarray(output_qualities)
            
            return output_grasps, output_qualities
        
        if not return_all_grasps:
            positive_grasps, positive_qualities = cluster_grasps(
                positive_grasps, positive_qualities
            )
            negative_grasps, negative_qualities = cluster_grasps(
                negative_grasps, negative_qualities
            )
            num_positive_grasps = np.sum([p.shape[0] for p in positive_grasps])
            num_negative_grasps = np.sum([p.shape[0] for p in negative_grasps])
        else:
            num_positive_grasps = positive_grasps.shape[0]
            num_negative_grasps = negative_grasps.shape[0]


        return positive_grasps, positive_qualities, negative_grasps, negative_qualities, object_model, os.path.join(root_folder, json_dict['object']), json_dict['object_scale']
        else:
            output_pcs, output_grasps, output_labels, output_qualities, output_pc_poses, output_cad_files, output_cad_scales = pcreader.get_evaluator_data(grasp_path, verify_grasps=False)
        
        
        print(output_grasps.shape)

        for pc, pose in zip(output_pcs, output_pc_poses):
            assert(np.all(pc == output_pcs[0]))
            assert(np.all(pose == output_pc_poses[0]))

        
        pc = output_pcs[0]
        pose = output_pc_poses[0]
        cad_file = output_cad_files[0]
        cad_scale = output_cad_scales[0]
        obj = sample.Object(cad_file)
        obj.rescale(cad_scale)
        obj = obj.mesh
        obj.vertices -= np.expand_dims(np.mean(obj.vertices, 0), 0)
        

        
        print('mean_pc', np.mean(pc, 0))
        print('pose', pose)
        draw_scene(
            pc,
            grasps=output_grasps,
            grasp_scores=None if args.vae_mode else output_labels,
        )
        mlab.figure()
        draw_scene(
예제 #6
0
def load_object(path, scale):
    object_model = sample.Object(path)
    object_model.rescale(scale)
    object_model = object_model.mesh
    object_model.vertices -= np.mean(object_model.vertices, 0, keepdims=1)
    return object_model
예제 #7
0
def draw_scene(
    pc, 
    grasps=[], 
    grasp_scores=None, 
    grasp_color=None, 
    gripper_color=(0,1,0), 
    mesh=None, 
    show_gripper_mesh=False, 
    grasps_selection=None, 
    visualize_diverse_grasps=False, 
    min_seperation_distance=0.03, 
    pc_color=None, 
    plasma_coloring=False):
    """
    Draws the 3D scene for the object and the scene.
    Args:
      pc: point cloud of the object
      grasps: list of 4x4 numpy array indicating the transformation of the grasps.
        grasp_scores: grasps will be colored based on the scores. If left 
        empty, grasps are visualized in green.
      grasp_color: if it is a tuple, sets the color for all the grasps. If list
        is provided it is the list of tuple(r,g,b) for each grasp.
      mesh: If not None, shows the mesh of the object. Type should be trimesh 
         mesh.
      show_gripper_mesh: If True, shows the gripper mesh for each grasp. 
      grasp_selection: if provided, filters the grasps based on the value of 
        each selection. 1 means select ith grasp. 0 means exclude the grasp.
      visualize_diverse_grasps: sorts the grasps based on score. Selects the 
        top score grasp to visualize and then choose grasps that are not within
        min_seperation_distance distance of any of the previously selected
        grasps. Only set it to True to declutter the grasps for better
        visualization.
      pc_color: if provided, should be a n x 3 numpy array for color of each 
        point in the point cloud pc. Each number should be between 0 and 1.
      plasma_coloring: If True, sets the plasma colormap for visualizting the 
        pc.
    """
    
    max_grasps = 200
    grasps = np.array(grasps)

    if grasp_scores is not None:
        grasp_scores = np.array(grasp_scores)

    if len(grasps) > max_grasps:

        print('Downsampling grasps, there are too many')
        chosen_ones = np.random.randint(low=0, high=len(grasps), size=max_grasps)
        grasps = grasps[chosen_ones]
        if grasp_scores is not None:
            grasp_scores = grasp_scores[chosen_ones]

    
    if mesh is not None:
        if type(mesh) == list:
            for elem in mesh:
                plot_mesh(elem)
        else:
            plot_mesh(mesh)

    if pc_color is None and pc is not None:
        if plasma_coloring:
            mlab.points3d(pc[:, 0], pc[:, 1], pc[:, 2], pc[:, 2], colormap='plasma')
        else:
            mlab.points3d(pc[:, 0], pc[:, 1], pc[:, 2], color=(0.1,0.1,1),scale_factor=0.01)
    elif pc is not None:
        if plasma_coloring:
            mlab.points3d(pc[:, 0], pc[:, 1], pc[:, 2], pc_color[:, 0], colormap='plasma')
        else:
            rgba = np.zeros((pc.shape[0], 4), dtype=np.uint8)
            rgba[:, :3] = np.asarray(pc_color)
            rgba[:, 3] = 255
            src = mlab.pipeline.scalar_scatter(pc[:, 0], pc[:, 1], pc[:, 2])
            src.add_attribute(rgba, 'colors')
            src.data.point_data.set_active_scalars('colors')
            g = mlab.pipeline.glyph(src)
            g.glyph.scale_mode = "data_scaling_off"
            g.glyph.glyph.scale_factor = 0.01


    grasp_pc = np.squeeze(tf_utils.get_control_point_tensor(1, False), 0)
    print(grasp_pc.shape)
    grasp_pc[2, 2] = 0.059
    grasp_pc[3, 2] = 0.059

    mid_point = 0.5*(grasp_pc[2, :] + grasp_pc[3, :])

    modified_grasp_pc = []
    modified_grasp_pc.append(np.zeros((3,), np.float32))
    modified_grasp_pc.append(mid_point)
    modified_grasp_pc.append(grasp_pc[2])
    modified_grasp_pc.append(grasp_pc[4])
    modified_grasp_pc.append(grasp_pc[2])
    modified_grasp_pc.append(grasp_pc[3])
    modified_grasp_pc.append(grasp_pc[5])

    grasp_pc = np.asarray(modified_grasp_pc)

    def transform_grasp_pc(g):
        output = np.matmul(grasp_pc, g[:3, :3].T)
        output += np.expand_dims(g[:3, 3], 0)

        return output

    
    if grasp_scores is not None:
        indexes = np.argsort(-np.asarray(grasp_scores))
    else:
        indexes = range(len(grasps))

    print('draw scene ', len(grasps))
    
    selected_grasps_so_far = []
    removed = 0
    

    if grasp_scores is not None:
        min_score = np.min(grasp_scores)        
        max_score = np.max(grasp_scores)
        top5 = np.array(grasp_scores).argsort()[-5:][::-1]

    for ii in range(len(grasps)):
        i = indexes[ii]
        if grasps_selection is not None:
            if grasps_selection[i] == False:
                continue
        
        g = grasps[i]
        is_diverse = True
        for prevg in selected_grasps_so_far:
            distance = np.linalg.norm(prevg[:3, 3] - g[:3, 3])
    
            if distance < min_seperation_distance:
                is_diverse = False
                break
        
        if visualize_diverse_grasps:
            if not is_diverse:
                removed += 1
                continue
            else:
                if grasp_scores is not None:
                    print('selected', i, grasp_scores[i], min_score, max_score)
                else:
                    print('selected', i)
                selected_grasps_so_far.append(g)

        if isinstance(gripper_color, list):
            pass
        elif grasp_scores is not None:
            normalized_score = (grasp_scores[i] - min_score) / (max_score - min_score + 0.0001)
            if grasp_color is not None:
                gripper_color = grasp_color[ii]
            else:
                gripper_color = get_color_plasma(normalized_score)

            if min_score == 1.0:
                gripper_color = (0.0, 1.0, 0.0)

    
        if show_gripper_mesh:
            gripper_mesh = sample.Object('panda_gripper.obj').mesh
            gripper_mesh.apply_transform(g)
            mlab.triangular_mesh(
                gripper_mesh.vertices[:, 0],
                gripper_mesh.vertices[:, 1],
                gripper_mesh.vertices[:, 2],
                gripper_mesh.faces,
                color=gripper_color,
                opacity=1 if visualize_diverse_grasps else 0.5
            )
        else:
            pts = np.matmul(grasp_pc, g[:3, :3].T)
            pts += np.expand_dims(g[:3, 3], 0)
            if isinstance(gripper_color, list):
                mlab.plot3d(pts[:, 0], pts[:, 1], pts[:, 2], color=gripper_color[i], tube_radius=0.003, opacity=1)
            else:
                tube_radius = 0.001                    
                mlab.plot3d(pts[:, 0], pts[:, 1], pts[:, 2], color=gripper_color, tube_radius=tube_radius, opacity=1)

    print('removed {} similar grasps'.format(removed))