def propose_grasps(pc, radius, num_grasps=1, vis=False):
    output_grasps = []

    for _ in range(num_grasps):
        center_index = np.random.randint(pc.shape[0])
        center_point = pc[center_index, :].copy()
        d = np.sqrt(np.sum(np.square(pc - np.expand_dims(center_point, 0)),
                           -1))
        index = np.where(d < radius)[0]
        neighbors = pc[index, :]

        eigen_values, eigen_vectors = cov_matrix(center_point, neighbors)
        direction = eigen_vectors[:, 2]

        direction = choose_direction(direction, center_point)

        surface_orientation = trimesh.geometry.align_vectors([0, 0, 1],
                                                             direction)
        roll_orientation = tra.quaternion_matrix(
            tra.quaternion_about_axis(np.random.uniform(0, 2 * np.pi),
                                      [0, 0, 1]))
        gripper_transform = surface_orientation.dot(roll_orientation)
        gripper_transform[:3, 3] = center_point

        translation_transform = np.eye(4)
        translation_transform[2, 3] = -np.random.uniform(0.0669, 0.1122)

        gripper_transform = gripper_transform.dot(translation_transform)
        output_grasps.append(gripper_transform.copy())

    if vis:
        draw_scene(pc, grasps=output_grasps)
        mlab.show()

    return np.asarray(output_grasps)
def main_check_pointcloud(iterate_all_viewpoints=True):
    from visualization_utils import draw_scene
    import mayavi.mlab as mlab
    import matplotlib.pyplot as plt
    import glob
    import random
    import cv2
    import time
    import json

    quaternions = [
        l[:-1].split('\t')
        for l in open('uniform_quaternions/data2_4608.qua', 'r').readlines()
    ]

    quaternions = [[float(t[0]),
                    float(t[1]),
                    float(t[2]),
                    float(t[3])] for t in quaternions]
    quaternions = np.asarray(quaternions)
    quaternions = np.roll(quaternions, 1, axis=1)
    all_eulers = [tra.quaternion_matrix(q) for q in quaternions]
    all_eulers = []
    for az in np.linspace(0, math.pi * 2, 30):
        for el in np.linspace(-math.pi / 2, math.pi / 2, 30):
            all_eulers.append(tra.euler_matrix(el, az, 0))

    renderer = OnlineObjectRendererMultiProcess(caching=True)
    renderer.start()

    grasps_path = glob.glob('unified_grasp_data/grasps/*.json')
    random.shuffle(grasps_path)
    #mesh_paths = ['unified_grasp_data/meshes/Bowl/9a52843cc89cd208362be90aaa182ec6.stl']

    for main_iter in range(5 * len(grasps_path)):
        gpath = grasps_path[main_iter % len(grasps_path)]
        json_dict = json.load(open(gpath))
        mpath = os.path.join('unified_grasp_data', json_dict['object'])
        scale = json_dict['object_scale']
        #mpath = 'unified_grasp_data/meshes/Bottle/10d3d5961e00b2133ff038bc77759685.stl'
        #mpath = 'unified_grasp_data/meshes/Bottle/ef631a2ce94fae3ab8966911a5afa22c.stl'
        print(main_iter, mpath)
        start_time = time.time()
        renderer.change_object(mpath, scale)
        if iterate_all_viewpoints == True:
            viewpoints = all_eulers
        else:
            viewpoints = [all_eulers[np.random.randint(len(all_eulers))]]

        for view in viewpoints:
            image, depth, pc, _ = renderer.render(view)
            print(time.time() - start_time)
            print('depth min = {} max = {} npoints = {}'.format(
                np.min(depth), np.max(depth), pc.shape))
            draw_scene(pc, None)
            mlab.show()

    renderer.terminate()
    renderer.join()
Exemplo n.º 3
0
    def visualize(self, visualize_all: bool = True):
        """Visualize point cloud and last batch of computed grasps in a 3D visualizer
        """

        candidates_to_display = self.n_of_candidates if (
            (self.n_of_candidates > 0) and
            (self.n_of_candidates < len(self.latest_grasps))
            and not visualize_all) else len(self.latest_grasps)

        mlab.figure(bgcolor=(1, 1, 1))
        visualization_utils.draw_scene(
            pc=self.scene_pc,
            grasps=self.latest_grasps[:candidates_to_display],
            grasp_scores=self.latest_grasp_scores[:candidates_to_display],
            pc_color=self.scene_pc_colors,
            show_gripper_mesh=True)
        print('[INFO] Close visualization window to proceed')
        mlab.show()
Exemplo n.º 4
0
    def eval_scene(self, file_path, visualize=False):
        """
          Returns full_results, evaluator_results.
            full_results: Contains information about grasps in canonical pose, scores,
              ground truth positive grasps, and also cad path and scale that is used for
              flex evaluation.
            evaluator_results: Only contains information for the classification of positive
              and negative grasps. The info is gt label of each grasp, predicted score for
              each grasp, and the 4x4 transformation of each grasp.
        """
        pc, grasps, grasps_label, flex_info = self.read_eval_scene(file_path)
        canonical_transform = flex_info['to_canonical_transformation']
        evaluator_result = None
        full_results = None
        if self._eval_grasp_evaluator:
            latents = self._grasp_estimator.sample_latents()
            output_grasps, output_scores, _ = self._grasp_estimator.predict_grasps(
                self._sess, pc, latents, 0, grasps_rt=grasps)
            evaluator_result = (grasps_label, output_scores, output_grasps)

        if self._eval_vae_and_evaluator:
            latents = np.random.rand(self._cfg.num_samples,
                                     self._cfg.latent_size) * 4 - 2
            print(pc.shape)
            generated_grasps, generated_scores, _ = self._grasp_estimator.predict_grasps(
                self._sess,
                pc,
                latents,
                num_refine_steps=self._cfg.num_refine_steps,
            )

            gt_pos_grasps = [g for g, l in zip(grasps, grasps_label) if l == 1]
            gt_pos_grasps = np.asarray(gt_pos_grasps).copy()
            gt_pos_grasps_canonical = np.matmul(canonical_transform,
                                                gt_pos_grasps)
            generated_grasps = np.asarray(generated_grasps)
            print(generated_grasps.shape)
            generated_grasps_canonical = np.matmul(canonical_transform,
                                                   generated_grasps)

            obj = sample.Object(flex_info['cad_path'])
            obj.rescale(flex_info['cad_scale'])
            mesh = obj.mesh
            mesh_mean = np.mean(mesh.vertices, 0, keepdims=True)

            canonical_pc = pc.dot(canonical_transform[:3, :3].T)
            canonical_pc += np.expand_dims(canonical_transform[:3, 3], 0)

            gt_pos_grasps_canonical[:, :3, 3] += mesh_mean
            canonical_pc += mesh_mean
            generated_grasps_canonical[:, :3, 3] += mesh_mean

            if visualize:
                from visualization_utils import draw_scene
                import mayavi.mlab as mlab

                draw_scene(canonical_pc,
                           grasps=gt_pos_grasps_canonical,
                           mesh=mesh)
                mlab.show()

                draw_scene(canonical_pc + mesh_mean,
                           grasps=generated_grasps_canonical,
                           mesh=mesh,
                           grasp_scores=generated_scores)
                mlab.show()

            full_results = (generated_grasps_canonical, generated_scores,
                            gt_pos_grasps_canonical, flex_info['cad_path'],
                            flex_info['cad_scale'])

        return full_results, evaluator_result
Exemplo n.º 5
0
    def read_eval_scene(self, file_path, visualize=False):
        if not os.path.isfile(file_path):
            if not self._should_create_data:
                raise ValueError('could not find data {}'.format(file_path))

            json_path = self._grasp_reader.generate_object_set(
                self._cfg.eval_split)
            obj_grasp_data = self._grasp_reader.read_grasp_file(
                os.path.join(self._cfg.dataset_root_folder, json_path), True)
            obj_pose = self._grasp_reader.arrange_objects(
                obj_grasp_data[-3])[0]
            in_camera_pose = None
            print('changing object to ', obj_grasp_data[-2])
            self._grasp_reader.change_object(obj_grasp_data[-2],
                                             obj_grasp_data[-1])
            pc, camera_pose, in_camera_pose = self._grasp_reader.render_random_scene(
                None)
            folder_path = file_path[:file_path.rfind('/')]
            create_directory(folder_path)

            print('writing {}'.format(file_path))
            np.save(
                file_path, {
                    'json': json_path,
                    'obj_pose': obj_pose,
                    'camera_pose': in_camera_pose
                })
        else:
            d = np.load(file_path).item()
            json_path = d['json']
            obj_pose = d['obj_pose']
            obj_grasp_data = self._grasp_reader.read_grasp_file(
                os.path.join(self._cfg.dataset_root_folder, json_path), True)
            in_camera_pose = d['camera_pose']
            self._grasp_reader.change_object(obj_grasp_data[-2],
                                             obj_grasp_data[-1])
            pc, camera_pose, _ = self._grasp_reader.render_random_scene(
                in_camera_pose)

        pos_grasps = np.matmul(np.expand_dims(camera_pose, 0),
                               obj_grasp_data[0])
        neg_grasps = np.matmul(np.expand_dims(camera_pose, 0),
                               obj_grasp_data[2])
        grasp_labels = np.hstack(
            (np.ones(pos_grasps.shape[0]),
             np.zeros(neg_grasps.shape[0]))).astype(np.int32)
        grasps = np.concatenate((pos_grasps, neg_grasps), 0)

        if visualize:
            from visualization_utils import draw_scene
            import mayavi.mlab as mlab

            pos_mask = np.logical_and(
                grasp_labels == 1,
                np.random.rand(*grasp_labels.shape) < 0.1)
            neg_mask = np.logical_and(
                grasp_labels == 0,
                np.random.rand(*grasp_labels.shape) < 0.01)

            print(grasps[pos_mask, :, :].shape, grasps[neg_mask, :, :].shape)
            draw_scene(pc, grasps[pos_mask, :, :])
            mlab.show()

            draw_scene(pc, grasps[neg_mask, :, :])
            mlab.show()

        return pc[:, :3], grasps, grasp_labels, {
            'cad_path':
            obj_grasp_data[-2],
            'cad_scale':
            obj_grasp_data[-1],
            'to_canonical_transformation':
            grasp_data_reader.inverse_transform(camera_pose)
        }
            assert(np.all(pose == output_pc_poses[0]))

        
        pc = output_pcs[0]
        pose = output_pc_poses[0]
        cad_file = output_cad_files[0]
        cad_scale = output_cad_scales[0]
        obj = sample.Object(cad_file)
        obj.rescale(cad_scale)
        obj = obj.mesh
        obj.vertices -= np.expand_dims(np.mean(obj.vertices, 0), 0)
        

        
        print('mean_pc', np.mean(pc, 0))
        print('pose', pose)
        draw_scene(
            pc,
            grasps=output_grasps,
            grasp_scores=None if args.vae_mode else output_labels,
        )
        mlab.figure()
        draw_scene(
            pc.dot(pose.T),
            grasps=[pose.dot(g) for g in output_grasps],
            mesh=obj,
            grasp_scores=None if args.vae_mode else output_labels,
        )
        mlab.show()

Exemplo n.º 7
0
        grasps = np.concatenate((pos_grasp
            s, neg_grasps), 0)
        
if visualize:
            from visualization_utils import draw_scene
            import mayavi.mlab as mlab

            pos_mask = np.logical_and(grasp_labels == 1, np.random.rand(*grasp_labels.shape) < 0.1)
            neg_mask = np.logical_and(
                grasp_labels == 0,
                np.random.rand(*grasp_labels.shape) < 0.01)

                
               
            print(grasps[pos_mask, :, :].shape, grasps[neg_mask, :, :].shape)
            draw_scene(pc, grasps[pos_mask, :, :])
            mlab.show()

            draw_scene(pc, grasps[neg_mask, :, :])
            mlab.show()

        return pc[:, :3], grasps, grasp_labels, {'cad_path': obj_grasp_data[-2], 'cad_scale': obj_grasp_data[-1], 'to_canonical_transformation': grasp_data_reader.inverse_transform(camera_pose)}
    
            
           
           
           
           
           
    }
    def eval_scene(self, file_path, visualize=False):