コード例 #1
0
    def __init__(self):
        vae_checkpoint_folder = f'{graspnet_path}/checkpoints/latent_size_2_ngpus_1_gan_1_confidence_weight_0.1_npoints_1024_num_grasps_per_object_256_train_evaluator_0_'
        evaluator_checkpoint_folder = f'{graspnet_path}/checkpoints/npoints_1024_train_evaluator_1_allowed_categories__ngpus_8_/'

        self.vae_checkpoint_folder = vae_checkpoint_folder
        self.evaluator_checkpoint_folder = evaluator_checkpoint_folder
        self.grasp_conf_threshold = 0.8
        self.gradient_based_refinement = False

        cfg = grasp_estimator.joint_config(
            self.vae_checkpoint_folder,
            self.evaluator_checkpoint_folder,
        )

        cfg['threshold'] = self.grasp_conf_threshold
        cfg['sample_based_improvement'] = 1 - int(
            self.gradient_based_refinement)
        cfg['num_refine_steps'] = 10 if self.gradient_based_refinement else 20
        cfg['training'] = True
        cfg['logdir'] = "log"

        cfg["vae_checkpoint_folder"] = None
        cfg["evaluator_checkpoint_folder"] = None
        cfg["npoints"] = 1024
        cfg["num_grasps_per_object"] = 64

        self.cfg = cfg

        self.num_refine_steps = cfg['num_refine_steps']
        self.estimator = grasp_estimator.GraspEstimator(cfg)
        self.sess = tf.Session()

        ph = get_vae_placeholder_dict(cfg)

        self.tf_output = self.estimator.build_network(ph)
コード例 #2
0
ファイル: eval.py プロジェクト: zhaodanlw8666/6dof-graspnet
 def __init__(self,
              cfg,
              create_data_if_not_exist,
              output_folder,
              eval_experiment_folder,
              num_experiments,
              eval_grasp_evaluator=False,
              eval_vae_and_evaluator=True):
     self._should_create_data = create_data_if_not_exist
     self._eval_experiment_folder = eval_experiment_folder
     self._num_experiments = num_experiments
     self._grasp_reader = grasp_data_reader.PointCloudReader(
         root_folder=cfg.dataset_root_folder,
         batch_size=cfg.num_grasps_per_object,
         num_grasp_clusters=cfg.num_grasp_clusters,
         npoints=cfg.npoints,
         min_difference_allowed=(0, 0, 0),
         max_difference_allowed=(3, 3, 0),
         occlusion_nclusters=0,
         occlusion_dropout_rate=0.,
         use_uniform_quaternions=False,
         ratio_of_grasps_used=1,
     )
     self._cfg = cfg
     self._grasp_estimator = grasp_estimator.GraspEstimator(cfg)
     os.environ['CUDA_VISIBLE_DEVICES'] = str(self._cfg.gpu)
     self._sess = tf.Session()
     del os.environ['CUDA_VISIBLE_DEVICES']
     self._grasp_estimator.build_network()
     self._eval_grasp_evaluator = eval_grasp_evaluator
     self._eval_vae_and_evaluator = eval_vae_and_evaluator
     self._flex_initialized = False
     self._output_folder = output_folder
     self.update_time_stamp()
コード例 #3
0
def main():
    """
    need to specify save path: save_path_npy and save_path_txt
    :return:
    """
    args = make_parser().parse_args()
    grasp_sampler_args = utils.read_checkpoint_args(args.grasp_sampler_folder)
    grasp_sampler_args.is_train = False
    grasp_evaluator_args = utils.read_checkpoint_args(
        args.grasp_evaluator_folder)
    grasp_evaluator_args.continue_train = True
    estimator = grasp_estimator.GraspEstimator(grasp_sampler_args,
                                               grasp_evaluator_args, args)

    point_cloud = np.load(args.pc_path)
    generated_grasps, generated_scores = estimator.generate_and_refine_grasps(
        point_cloud)

    grasp_pose_score = {
        'grasp': np.array(generated_grasps),
        'score': np.array(generated_scores)
    }
    np.save(args.npy_save_path, grasp_pose_score)
    print('successfully saved %s' % args.npy_save_path)
    with open(args.txt_save_path, 'w') as w:
        for pose, score in zip(generated_grasps, generated_scores):
            w.write(str(pose))
            w.write('   ')
            w.write(str(score) + '\n')
            w.flush()
    print('successfully saved %s' % args.txt_save_path)
コード例 #4
0
ファイル: main.py プロジェクト: SeungBack/6dof-graspnet
def main(args):
    parser = make_parser()
    args = parser.parse_args(args)
    cfg = grasp_estimator.joint_config(
        args.vae_checkpoint_folder,
        args.evaluator_checkpoint_folder,
    )
    cfg['threshold'] = args.threshold
    cfg['sample_based_improvement'] = 1 - int(args.gradient_based_refinement)
    cfg['num_refine_steps'] = 10 if args.gradient_based_refinement else 20
    estimator = grasp_estimator.GraspEstimator(cfg)
    os.environ['CUDA_VISIBLE_DEVICES'] = str(cfg.gpu)
    sess = tf.Session()
    estimator.build_network()
    estimator.load_weights(sess)

    for npy_file in glob.glob(os.path.join(args.npy_folder, '*.npy')):
        print(npy_file)
        # Depending on your numpy version you may need to change allow_pickle
        # from True to False.
        data = np.load(npy_file, allow_pickle=True).item()
        print(data.keys())
        for k in data.keys():
            print(k, np.shape(data[k]))

        depth = data['depth']
        image = data['image']
        K = data['intrinsics_matrix']
        # Removing points that are farther than 1 meter or missing depth
        # values.
        depth[depth == 0] = np.nan
        depth[depth > 1] = np.nan
        pc, selection = backproject(depth,
                                    K,
                                    return_finite_depth=True,
                                    return_selection=True)
        pc_colors = image.copy()
        pc_colors = np.reshape(pc_colors, [-1, 3])
        pc_colors = pc_colors[selection, :]

        # Smoothed pc comes from averaging the depth for 10 frames and removing
        # the pixels with jittery depth between those 10 frames.
        object_pc = data['smoothed_object_pc']
        print(object_pc)
        latents = estimator.sample_latents()
        generated_grasps, generated_scores, _ = estimator.predict_grasps(
            sess,
            object_pc,
            latents,
            num_refine_steps=cfg.num_refine_steps,
        )
        mlab.figure(bgcolor=(1, 1, 1))
        draw_scene(
            pc,
            pc_color=pc_colors,
            grasps=generated_grasps,
            grasp_scores=generated_scores,
        )
        print('close the window to continue to next object . . .')
        mlab.show()
コード例 #5
0
def main(args):

    parser = make_parser()
    args = parser.parse_args(args)
    cfg = grasp_estimator.joint_config(
        args.vae_checkpoint_folder,
        args.evaluator_checkpoint_folder,
    )
    cfg['threshold'] = args.threshold
    cfg['sample_based_improvement'] = 1 - int(args.gradient_based_refinement)
    cfg['num_refine_steps'] = 10 if args.gradient_based_refinement else 20
    estimator = grasp_estimator.GraspEstimator(cfg)
    os.environ['CUDA_VISIBLE_DEVICES'] = str(cfg.gpu)
    #gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.25)
    config = tf.ConfigProto()
    #config=tf.ConfigProto(gpu_options=gpu_options)
    config.gpu_options.allow_growth = True
    sess = tf.Session(config=config)
    #sess = tf.Session()
    estimator.build_network()
    estimator.load_weights(sess)

    msg = rospy.wait_for_message('/cloud_pcd', PointCloud2)
    pc = ros_numpy.numpify(msg)
    points = np.zeros((pc.shape[0], 3))
    colors = 100 * np.ones((pc.shape[0], 3))
    points[:, 0] = pc['x']
    points[:, 1] = pc['y']
    points[:, 2] = pc['z']

    latents = estimator.sample_latents()
    generated_grasps, generated_scores, _ = estimator.predict_grasps(
        sess,
        points,
        latents,
        num_refine_steps=cfg.num_refine_steps,
    )
    #print(np.array(generated_grasps).shape)
    print(type(generated_grasps))
    scores_np = np.asarray(generated_scores)
    sorting = np.argsort(-scores_np)
    sorted_scores = scores_np[sorting]

    grasps_np = np.asarray(generated_grasps)
    sorted_grasps = grasps_np[sorting]

    mlab.figure(bgcolor=(1, 1, 1))
    if len(generated_scores) != 0:
        draw_scene(points,
                   pc_color=colors,
                   grasps=sorted_grasps[0:20].tolist(),
                   grasp_scores=sorted_scores[0:20].tolist())
    else:
        draw_scene(
            points,
            pc_color=colors,
        )
    print('close the window to continue to next object . . .')
    mlab.show()
コード例 #6
0
def main(args):

    parser = make_parser()
    args = parser.parse_args()
    grasp_sampler_args = utils.read_checkpoint_args(args.grasp_sampler_folder)
    grasp_sampler_args.is_train = False
    grasp_evaluator_args = utils.read_checkpoint_args(
        args.grasp_evaluator_folder)
    grasp_evaluator_args.continue_train = True
    estimator = grasp_estimator.GraspEstimator(grasp_sampler_args,
                                               grasp_evaluator_args, args)
    if args.train_data:
        grasp_sampler_args.dataset_root_folder = args.dataset_root_folder
        grasp_sampler_args.num_grasps_per_object = 1
        grasp_sampler_args.num_objects_per_batch = 1
        dataset = DataLoader(grasp_sampler_args)
        for i, data in enumerate(dataset):
            generated_grasps, generated_scores = estimator.generate_and_refine_grasps(
                data["pc"].squeeze())

            return generated_grasps, generated_scores
    else:
        for npy_file in glob.glob(os.path.join(args.npy_folder, '*.npy'))[::-1]:
            # Depending on your numpy version you may need to change allow_pickle
            # from True to False.

            data = np.load(npy_file, allow_pickle=True,
                           encoding="latin1").item()

            depth = data['depth']
            image = data['image']
            K = data['intrinsics_matrix']
            # Removing points that are farther than 1 meter or missing depth
            # values.
            #depth[depth == 0 or depth > 1] = np.nan

            np.nan_to_num(depth, copy=False)
            mask = np.where(np.logical_or(depth == 0, depth > 1))
            depth[mask] = np.nan
            pc, selection = backproject(depth,
                                        K,
                                        return_finite_depth=True,
                                        return_selection=True)
            pc_colors = image.copy()
            pc_colors = np.reshape(pc_colors, [-1, 3])
            pc_colors = pc_colors[selection, :]

            # Smoothed pc comes from averaging the depth for 10 frames and removing
            # the pixels with jittery depth between those 10 frames.
            object_pc = data['smoothed_object_pc']
            generated_grasps, generated_scores = estimator.generate_and_refine_grasps(
                object_pc)
            return generated_grasps, generated_scores
コード例 #7
0
    def configure(self, cfg: dict):
        """Additional class configuration

        Parameters
        ----------
        cfg : dict configuration dict, as sourced from the YAML file
        """

        # Create a namespace from the config dict
        # Since the GraspNet implementation uses a namespace
        self.cfg_ns = SimpleNamespace(**self.cfg)

        self.grasp_sampler_args = utils.read_checkpoint_args(
            self.cfg_ns.grasp_sampler_folder)
        self.grasp_sampler_args.is_train = False
        self.grasp_evaluator_args = utils.read_checkpoint_args(
            self.cfg_ns.grasp_evaluator_folder)
        self.grasp_evaluator_args.continue_train = True

        self.estimator = grasp_estimator.GraspEstimator(
            self.grasp_sampler_args, self.grasp_evaluator_args, self.cfg_ns)
コード例 #8
0
    return X


## The model is loaded as a global object ##

parser = make_parser()
args = parser.parse_args(sys.argv[1:])
cfg = grasp_estimator.joint_config(
    args.vae_checkpoint_folder,
    args.evaluator_checkpoint_folder,
)
cfg['threshold'] = args.threshold
cfg['sample_based_improvement'] = 1 - int(args.gradient_based_refinement)
cfg['num_refine_steps'] = 10 if args.gradient_based_refinement else 20
estimator = grasp_estimator.GraspEstimator(cfg)
os.environ['CUDA_VISIBLE_DEVICES'] = str(cfg.gpu)
#gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.25)
config = tf.ConfigProto()
#config=tf.ConfigProto(gpu_options=gpu_options)
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
#sess = tf.Session()
estimator.build_network()
estimator.load_weights(sess)

pub = rospy.Publisher('/grasp_proposals', PoseArray)
pub2 = rospy.Publisher("/grasp_points", PointCloud2, queue_size=1000000)
pub3 = rospy.Publisher('/pc_centroid', Marker)

コード例 #9
0
def main(args):

    sock = su.initialize_client('localhost', 7777)

    parser = make_parser()
    args = parser.parse_args(args)
    cfg = grasp_estimator.joint_config(
        args.vae_checkpoint_folder,
        args.evaluator_checkpoint_folder,
    )
    cfg['threshold'] = args.threshold
    cfg['sample_based_improvement'] = 1 - int(args.gradient_based_refinement)
    cfg['num_refine_steps'] = 10 if args.gradient_based_refinement else 20
    estimator = grasp_estimator.GraspEstimator(cfg)
    os.environ['CUDA_VISIBLE_DEVICES'] = str(cfg.gpu)
    sess = tf.Session()
    estimator.build_network()
    estimator.load_weights(sess)

    while True:
        data = su.recvall_pickle(sock)

        print(data.keys())
        for k in data.keys():
            print(k, np.shape(data[k]))

        depth = data['depth']
        image = data['image']
        K = data['intrinsics_matrix']
        # Removing points that are farther than 1 meter or missing depth 
        # values.
        depth[depth == 0] = np.nan
        depth[depth > 1] = np.nan
        pc, selection = backproject(depth, K, return_finite_depth=True, return_selection=True)
        pc_colors = image.copy()
        pc_colors = np.reshape(pc_colors, [-1, 3])
        pc_colors = pc_colors[selection, ::-1]

        # down sampling
        idx = np.random.choice(pc_colors.shape[0], 100000, replace=False)
        pc = pc[idx, :]
        pc_colors = pc_colors[idx, :]

        # Smoothed pc comes from averaging the depth for 10 frames and removing
        # the pixels with jittery depth between those 10 frames.
        object_pc = data['smoothed_object_pc']
        latents = estimator.sample_latents()
        generated_grasps, generated_scores, _ = estimator.predict_grasps(
            sess,
            object_pc,
            latents,
            num_refine_steps=cfg.num_refine_steps,
        )
        print("====>", generated_grasps, generated_scores)
        mlab.figure(bgcolor=(1,1,1))
        if len(generated_grasps) != 0:
            draw_scene(
                pc,
                pc_color=pc_colors,
                grasps=generated_grasps,
                grasp_scores=generated_scores,
            )
            print('close the window to continue to next object . . .')
        mlab.show()
        su.sendall_pickle(sock, [generated_grasps, generated_scores])
コード例 #10
0
ファイル: eval.py プロジェクト: rhett-chen/6dof-graspnet
if __name__ == '__main__':
    args = make_parser(sys.argv)
    utils.mkdir(args.output_folder)

    grasp_sampler_args = utils.read_checkpoint_args(args.grasp_sampler_folder)
    grasp_sampler_args.is_train = False
    grasp_evaluator_args = utils.read_checkpoint_args(
        args.grasp_evaluator_folder)
    grasp_evaluator_args.continue_train = True
    if args.gradient_based_refinement:
        args.num_refine_steps = 10
        args.refinement = "gradient"
    else:
        args.num_refine_steps = 20
        args.refinement = "sampling"

    estimator = grasp_estimator.GraspEstimator(grasp_sampler_args,
                                               grasp_evaluator_args, args)
    evaluator = Evaluator(
        cfg,
        args.generate_data_if_missing,
        args.output_folder,
        args.eval_data_folder,
        args.num_experiments,
        eval_grasp_evaluator=args.eval_grasp_evaluator,
        eval_vae_and_evaluator=args.eval_vae_and_evaluator,
    )
    evaluator.eval_all(True)
    del evaluator
コード例 #11
0
    def __init__(self, config: Config):
        self.config = config
        self.vae_checkpoint_folder = config.vae_checkpoint_folder
        self.evaluator_checkpoint_folder = config.evaluator_checkpoint_folder

        self.gradient_based_refinement = False
        self.grasp_conf_threshold = 0.8
        self.cut_off_points = config.cut_off_points
        self.output_grasps_dir = "vae_generated_grasps"
        self.fix_view = config.fix_view

        ##### Prepare the 6dof graspnet network for forward pass ######
        cfg = grasp_estimator.joint_config(
            self.vae_checkpoint_folder,
            self.evaluator_checkpoint_folder,
        )
        cfg['threshold'] = self.grasp_conf_threshold
        cfg['sample_based_improvement'] = 1 - int(
            self.gradient_based_refinement)
        cfg['num_refine_steps'] = 10 if self.gradient_based_refinement else 20

        if self.config.data_collection_mode:
            if not self.config.data_collection_from_trained_model:
                cfg["use_geometry_sampling"] = True
                cfg['num_refine_steps'] = 0
                cfg['grasp_selection_mode'] = "all"

        #cfg['num_refine_steps'] = 0
        self.num_refine_steps = cfg['num_refine_steps']
        self.estimator = grasp_estimator.GraspEstimator(cfg)
        self.sess = tf.Session()
        self.estimator.build_network()
        self.estimator.load_weights(self.sess)

        if not os.path.exists(self.output_grasps_dir):
            os.makedirs(self.output_grasps_dir)

        # set camera for this:
        self.camera_positions, self.camera_quats = pcp_utils.cameras.generate_new_cameras_hemisphere(
            radius=self.config.camera_radius,
            lookat_point=self.config.camera_lookat_pos,
            pitch=self.config.camera_pitch,
            yaw=self.config.camera_yaw,
            yaw_list=self.config.camera_yaw_list)
        self.n_cams = len(self.camera_positions)

        # i don't think we need this
        mujoco_T_adam = np.array([[1, 0, 0], [0, 0, 1], [0, -1, 0]],
                                 dtype=np.float32)
        origin_T_camR_xpos = np.array(config.table_top, np.float32) + np.array(
            config.table_T_camR, np.float)

        origin_T_adam = np.zeros((4, 4), dtype=np.float32)
        origin_T_adam[:3, :3] = mujoco_T_adam
        origin_T_adam[:3, 3] = origin_T_camR_xpos
        origin_T_adam[3, 3] = 1
        self.origin_T_adam = origin_T_adam
        self.adam_T_origin = np.linalg.inv(self.origin_T_adam)

        gripper_pc = np.squeeze(tf_utils.get_control_point_tensor(1, False), 0)

        gripper_pc[2, 2] = 0.059
        gripper_pc[3, 2] = 0.059
        mid_point = 0.5 * (gripper_pc[2, :] + gripper_pc[3, :])

        # modified grasps
        modified_gripper_pc = []
        modified_gripper_pc.append(np.zeros((3, ), np.float32))
        modified_gripper_pc.append(mid_point)
        modified_gripper_pc.append(gripper_pc[2])
        modified_gripper_pc.append(gripper_pc[4])
        modified_gripper_pc.append(gripper_pc[2])
        modified_gripper_pc.append(gripper_pc[3])
        modified_gripper_pc.append(gripper_pc[5])

        self.gripper_pc_ori = np.asarray(modified_gripper_pc)
コード例 #12
0
    def plan_grasp(self, camera_data: CameraData,
                   aruco_board_data: ArucoBoardData,
                   n_candidates: int) -> bool:
        """Plan grasps according to visual data. Grasps are returned with
        respect to the camera reference frame

        Parameters
        ----------
        camera_data : CameraData
        n_candidates : int
            Number of grasp candidates to plan and return

        Returns
        -------
        bool
            True if any number of candidates could be retrieved, False otherwise
        """

        self.n_of_candidates = n_candidates

        self.estimator = grasp_estimator.GraspEstimator(
            self.cfg_grasp_estimator)
        # Compute grasps according to the pytorch implementation
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        sess = tf.Session(config=config)
        self.estimator.build_network()
        self.estimator.load_weights(sess)

        # # Depending on your numpy version you may need to change allow_pickle
        # # from True to False.

        # data = np.load('/workspace/sources/6dof-graspnet/demo/data/cheezit.npy', allow_pickle=True,  encoding='latin1').item()
        # print(data.keys())
        # depth = data['depth']
        # image = data['image']
        # K = data['intrinsics_matrix']
        # # Removing points that are farther than 1 meter or missing depth
        # # values.
        # depth[depth == 0] = np.nan
        # depth[depth > 1] = np.nan
        # pc, selection = backproject(depth, K, return_finite_depth=True, return_selection=True)
        # pc_colors = image.copy()
        # pc_colors = np.reshape(pc_colors, [-1, 3])
        # pc_colors = pc_colors[selection, :]

        # # Smoothed pc comes from averaging the depth for 10 frames and removing
        # # the pixels with jittery depth between those 10 frames.
        # object_pc2 = data['smoothed_object_pc']

        self.latest_grasps, self.latest_grasp_scores, output_latents = self.estimator.predict_grasps(
            sess, self.object_pc, self.estimator.sample_latents(),
            self.cfg_grasp_estimator.num_refine_steps)
        self.grasp_poses.clear()

        # Sort grasps from best to worst
        # (Assume grasps and scores are lists)
        if len(self.latest_grasps) >= n_candidates:
            sorted_grasps_quality_list = sorted(zip(self.latest_grasp_scores,
                                                    self.latest_grasps),
                                                key=lambda pair: pair[0],
                                                reverse=True)
            self.latest_grasps = [g[1] for g in sorted_grasps_quality_list]
            self.latest_grasp_scores = [
                g[0] for g in sorted_grasps_quality_list
            ]
        else:
            return False

        # Organize grasps in a Grasp class
        # Grasps are specified wrt the camera ref frame

        filtered_latest_grasps = []
        filtered_latest_grasp_scores = []

        for grasp, score in zip(self.latest_grasps, self.latest_grasp_scores):
            # Grasps should already be in 6D as output
            # A 90 degrees offset is applied to account for the difference in
            # reference frame (see
            # https://github.com/NVlabs/6dof-graspnet/issues/8)
            # when dealing with the real robot hand
            #TODO Offset should be applied here too
            offset_transform = np.array([[0, -1, 0, self._grasp_offset[0]],
                                         [1, 0, 0, self._grasp_offset[1]],
                                         [0, 0, 1, self._grasp_offset[2]],
                                         [0, 0, 0, 1]])
            grasp_with_offset = np.dot(grasp, offset_transform)
            grasp_6d = Grasp6D(position=grasp_with_offset[:3, 3],
                               rotation=grasp_with_offset[:3, :3],
                               width=0,
                               score=score,
                               ref_frame=camera_data.intrinsic_params['frame'])

            if (aruco_board_data.enable_grasp_filter == True):
                if check_collision_between_gripper_and_table(
                        grasp_6d, camera_data, aruco_board_data) == False:
                    self.grasp_poses.append(grasp_6d)
                    filtered_latest_grasps.append(grasp)
                    filtered_latest_grasp_scores.append(score)
            else:
                self.grasp_poses.append(grasp_6d)

        if (aruco_board_data.enable_grasp_filter == True):
            self.latest_grasps = filtered_latest_grasps
            self.latest_grasp_scores = filtered_latest_grasp_scores

        self.grasp_poses = self.grasp_poses[0:n_candidates]
        self.best_grasp = self.grasp_poses[0]

        return True
def main(config_file, task_config_file, output_file):
    config = utils.config_from_yaml_file(config_file)

    # object configurations
    objs_config = utils.config_from_yaml_file(task_config_file)
    objects_to_get_grasps_for = list()
    for obj_name, obj_config in objs_config['objs'].items():
        obj_config_class = MeshObject.Config().update(obj_config)
        obj = MeshObject(obj_config_class, obj_name)
        objects_to_get_grasps_for.append(obj)

    print(f'found {len(objects_to_get_grasps_for)} objects to find grasps for')
    updated_config = Grasp6d_graspnet.Config().update(config)
    grasper = Grasp6d_graspnet(updated_config)

    ##### Prepare the 6dof graspnet network for forward pass ######
    cfg = grasp_estimator.joint_config(
        grasper.vae_checkpoint_folder,
        grasper.evaluator_checkpoint_folder,
    )
    cfg['threshold'] = grasper.grasp_conf_threshold
    cfg['sample_based_improvement'] = 1 - int(
        grasper.gradient_based_refinement)
    cfg['num_refine_steps'] = 10 if grasper.gradient_based_refinement else 20
    estimator = grasp_estimator.GraspEstimator(cfg)
    sess = tf.Session()
    estimator.build_network()
    estimator.load_weights(sess)
    ##### End of 6dof graspnet network preparation ################

    nmeshes = len(objects_to_get_grasps_for)
    for mesh_id, meshobj in enumerate(objects_to_get_grasps_for):
        print(f'---- processing {mesh_id}/{nmeshes} ------')
        obj_pc, pc, pc_colors, env_state = grasper.add_object(meshobj)

        # the object is added, data is collected, processed and returned
        # now sample the grasps, and save them
        latents = estimator.sample_latents()
        generated_grasps, generated_scores, _ = estimator.predict_grasps(
            sess,
            obj_pc,
            latents,
            num_refine_steps=cfg.num_refine_steps,
        )

        print(
            f'------ number of generated grasps are: {len(generated_grasps)} ---------'
        )
        save_file_path = os.path.join(grasper.output_grasps_dir,
                                      f'grasps_{meshobj.name}.npy')
        print(f'---- saving to {save_file_path} -----')

        save_dict = {
            'generated_grasps': generated_grasps,
            'generated_scores': generated_scores,
            'pcd': pc,
            'pcd_color': pc_colors,
            'obj_pcd': obj_pc,
            'env_state': env_state,
        }

        np.save(save_file_path, save_dict)
コード例 #14
0
def main(args):
    parser = make_parser()
    args = parser.parse_args(args)
    cfg = grasp_estimator.joint_config(
        args.vae_checkpoint_folder,
        args.evaluator_checkpoint_folder,
    )
    cfg['threshold'] = args.threshold
    cfg['sample_based_improvement'] = 1 - int(args.gradient_based_refinement)
    cfg['num_refine_steps'] = 10 if args.gradient_based_refinement else 20
    cfg['num_samples'] = 20
    estimator = grasp_estimator.GraspEstimator(cfg)
    os.environ['CUDA_VISIBLE_DEVICES'] = str(cfg.gpu)
    #edit by alessandro
    os.environ['CUDA_VISIBLE_DEVICES'] ='0'
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    sess = tf.Session(config=config)
    #sess = tf.Session()
    #
    estimator.build_network()
    estimator.load_weights(sess)



    for npy_file in glob.glob(os.path.join(args.npy_folder, '*.npy')):
        print(npy_file)
        # Depending on your numpy version you may need to change allow_pickle
        # from True to False.
        data = np.load(npy_file, allow_pickle=True).item()
        print(data.keys())
        depth = data['depth']
        image = data['image']
        K = data['intrinsics_matrix']
        # Removing points that are farther than 1 meter or missing depth 
        # values.
        depth[depth == 0] = np.nan
        depth[depth > 1] = np.nan
        pc, selection = backproject(depth, K, return_finite_depth=True, return_selection=True)
        pc_colors = image.copy()
        pc_colors = np.reshape(pc_colors, [-1, 3])
        pc_colors = pc_colors[selection, :]


        ipdb.set_trace()
        # Smoothed pc comes from averaging the depth for 10 frames and removing
        # the pixels with jittery depth between those 10 frames.
        object_pc = data['smoothed_object_pc']
        latents = estimator.sample_latents()
        ###################################
        #### Test PCD #####################
        #import open3d as o3d

        #from matplotlib import pyplot
        #from mpl_toolkits.mplot3d import Axes3D

        #pcd = o3d.io.read_point_cloud("./demo/data/tomato_soup_0.pcd")
        #object_pc = np.asarray(pcd.points)
        #color_c1 = 200*np.ones((object_pc.shape[0],1))
        #color_c23 = np.zeros((object_pc.shape[0],2))
        #pc_colors = np.concatenate((color_c1,color_c23),axis=1)
        #pc = object_pc

        ###################################



        print('HERE1')
        generated_grasps, generated_scores, _ = estimator.predict_grasps(
            sess,
            object_pc,
            latents,
            num_refine_steps=cfg.num_refine_steps,
        )
        print('HERE2')
        mlab.figure(bgcolor=(1,1,1))
        draw_scene(
            pc,
            pc_color=pc_colors,
            grasps=generated_grasps,
            grasp_scores=generated_scores,
        )
        print('close the window to continue to next object . . .')
        mlab.show()