Пример #1
0
    def __init__(self):
        vae_checkpoint_folder = f'{graspnet_path}/checkpoints/latent_size_2_ngpus_1_gan_1_confidence_weight_0.1_npoints_1024_num_grasps_per_object_256_train_evaluator_0_'
        evaluator_checkpoint_folder = f'{graspnet_path}/checkpoints/npoints_1024_train_evaluator_1_allowed_categories__ngpus_8_/'

        self.vae_checkpoint_folder = vae_checkpoint_folder
        self.evaluator_checkpoint_folder = evaluator_checkpoint_folder
        self.grasp_conf_threshold = 0.8
        self.gradient_based_refinement = False

        cfg = grasp_estimator.joint_config(
            self.vae_checkpoint_folder,
            self.evaluator_checkpoint_folder,
        )

        cfg['threshold'] = self.grasp_conf_threshold
        cfg['sample_based_improvement'] = 1 - int(
            self.gradient_based_refinement)
        cfg['num_refine_steps'] = 10 if self.gradient_based_refinement else 20
        cfg['training'] = True
        cfg['logdir'] = "log"

        cfg["vae_checkpoint_folder"] = None
        cfg["evaluator_checkpoint_folder"] = None
        cfg["npoints"] = 1024
        cfg["num_grasps_per_object"] = 64

        self.cfg = cfg

        self.num_refine_steps = cfg['num_refine_steps']
        self.estimator = grasp_estimator.GraspEstimator(cfg)
        self.sess = tf.Session()

        ph = get_vae_placeholder_dict(cfg)

        self.tf_output = self.estimator.build_network(ph)
Пример #2
0
def main(args):
    parser = make_parser()
    args = parser.parse_args(args)
    cfg = grasp_estimator.joint_config(
        args.vae_checkpoint_folder,
        args.evaluator_checkpoint_folder,
    )
    cfg['threshold'] = args.threshold
    cfg['sample_based_improvement'] = 1 - int(args.gradient_based_refinement)
    cfg['num_refine_steps'] = 10 if args.gradient_based_refinement else 20
    estimator = grasp_estimator.GraspEstimator(cfg)
    os.environ['CUDA_VISIBLE_DEVICES'] = str(cfg.gpu)
    sess = tf.Session()
    estimator.build_network()
    estimator.load_weights(sess)

    for npy_file in glob.glob(os.path.join(args.npy_folder, '*.npy')):
        print(npy_file)
        # Depending on your numpy version you may need to change allow_pickle
        # from True to False.
        data = np.load(npy_file, allow_pickle=True).item()
        print(data.keys())
        for k in data.keys():
            print(k, np.shape(data[k]))

        depth = data['depth']
        image = data['image']
        K = data['intrinsics_matrix']
        # Removing points that are farther than 1 meter or missing depth
        # values.
        depth[depth == 0] = np.nan
        depth[depth > 1] = np.nan
        pc, selection = backproject(depth,
                                    K,
                                    return_finite_depth=True,
                                    return_selection=True)
        pc_colors = image.copy()
        pc_colors = np.reshape(pc_colors, [-1, 3])
        pc_colors = pc_colors[selection, :]

        # Smoothed pc comes from averaging the depth for 10 frames and removing
        # the pixels with jittery depth between those 10 frames.
        object_pc = data['smoothed_object_pc']
        print(object_pc)
        latents = estimator.sample_latents()
        generated_grasps, generated_scores, _ = estimator.predict_grasps(
            sess,
            object_pc,
            latents,
            num_refine_steps=cfg.num_refine_steps,
        )
        mlab.figure(bgcolor=(1, 1, 1))
        draw_scene(
            pc,
            pc_color=pc_colors,
            grasps=generated_grasps,
            grasp_scores=generated_scores,
        )
        print('close the window to continue to next object . . .')
        mlab.show()
Пример #3
0
def main(args):

    parser = make_parser()
    args = parser.parse_args(args)
    cfg = grasp_estimator.joint_config(
        args.vae_checkpoint_folder,
        args.evaluator_checkpoint_folder,
    )
    cfg['threshold'] = args.threshold
    cfg['sample_based_improvement'] = 1 - int(args.gradient_based_refinement)
    cfg['num_refine_steps'] = 10 if args.gradient_based_refinement else 20
    estimator = grasp_estimator.GraspEstimator(cfg)
    os.environ['CUDA_VISIBLE_DEVICES'] = str(cfg.gpu)
    #gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.25)
    config = tf.ConfigProto()
    #config=tf.ConfigProto(gpu_options=gpu_options)
    config.gpu_options.allow_growth = True
    sess = tf.Session(config=config)
    #sess = tf.Session()
    estimator.build_network()
    estimator.load_weights(sess)

    msg = rospy.wait_for_message('/cloud_pcd', PointCloud2)
    pc = ros_numpy.numpify(msg)
    points = np.zeros((pc.shape[0], 3))
    colors = 100 * np.ones((pc.shape[0], 3))
    points[:, 0] = pc['x']
    points[:, 1] = pc['y']
    points[:, 2] = pc['z']

    latents = estimator.sample_latents()
    generated_grasps, generated_scores, _ = estimator.predict_grasps(
        sess,
        points,
        latents,
        num_refine_steps=cfg.num_refine_steps,
    )
    #print(np.array(generated_grasps).shape)
    print(type(generated_grasps))
    scores_np = np.asarray(generated_scores)
    sorting = np.argsort(-scores_np)
    sorted_scores = scores_np[sorting]

    grasps_np = np.asarray(generated_grasps)
    sorted_grasps = grasps_np[sorting]

    mlab.figure(bgcolor=(1, 1, 1))
    if len(generated_scores) != 0:
        draw_scene(points,
                   pc_color=colors,
                   grasps=sorted_grasps[0:20].tolist(),
                   grasp_scores=sorted_scores[0:20].tolist())
    else:
        draw_scene(
            points,
            pc_color=colors,
        )
    print('close the window to continue to next object . . .')
    mlab.show()
Пример #4
0
    def configure(self, cfg: dict):
        """Additional class configuration

        Parameters
        ----------
        cfg : dict configuration dict, as sourced from the YAML file
        """

        # Create a namespace from the config dict
        # Since the GraspNet implementation uses a namespace

        self.cfg_ns = SimpleNamespace(**self.cfg)
        self.cfg_grasp_estimator = grasp_estimator.joint_config(
            self.cfg_ns.vae_checkpoint_folder,
            self.cfg_ns.evaluator_checkpoint_folder,
        )
        self.cfg_grasp_estimator['threshold'] = self.cfg_ns.threshold
        self.cfg_grasp_estimator['sample_based_improvement'] = 1 - int(
            self.cfg_ns.gradient_based_refinement)
        self.cfg_grasp_estimator[
            'num_refine_steps'] = 10 if self.cfg_ns.gradient_based_refinement else 20
Пример #5
0
    if return_finite_depth:
        selection = np.isfinite(X[:, 0])
        X = X[selection, :]

    if return_selection:
        return X, selection

    return X


## The model is loaded as a global object ##

parser = make_parser()
args = parser.parse_args(sys.argv[1:])
cfg = grasp_estimator.joint_config(
    args.vae_checkpoint_folder,
    args.evaluator_checkpoint_folder,
)
cfg['threshold'] = args.threshold
cfg['sample_based_improvement'] = 1 - int(args.gradient_based_refinement)
cfg['num_refine_steps'] = 10 if args.gradient_based_refinement else 20
estimator = grasp_estimator.GraspEstimator(cfg)
os.environ['CUDA_VISIBLE_DEVICES'] = str(cfg.gpu)
#gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.25)
config = tf.ConfigProto()
#config=tf.ConfigProto(gpu_options=gpu_options)
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
#sess = tf.Session()
estimator.build_network()
estimator.load_weights(sess)
Пример #6
0
          grasp_success: list of binary numbers. 0 means that the grasp failed,
            and 1 means that the grasp succeeded.
        """
        raise NotImplementedError(
            "The code for grasp evaluation is not released")

    def __del__(self):
        del self._grasp_reader


if __name__ == '__main__':
    args = make_parser(sys.argv)
    create_directory(args.output_folder)

    cfg = grasp_estimator.joint_config(args.vae_training_folder,
                                       args.evaluator_training_folder,
                                       args.dataset_root_folder,
                                       args.eval_split)
    cfg['threshold'] = -1
    if args.gradient_based_refinement:
        cfg['num_refine_steps'] = 10
        cfg['sample_based_improvement'] = 0
    else:
        cfg['num_refine_steps'] = 20
        cfg['sample_based_improvement'] = 1

    evaluator = Evaluator(
        cfg,
        args.generate_data_if_missing,
        args.output_folder,
        args.eval_data_folder,
        args.num_experiments,
Пример #7
0
def main(args):

    sock = su.initialize_client('localhost', 7777)

    parser = make_parser()
    args = parser.parse_args(args)
    cfg = grasp_estimator.joint_config(
        args.vae_checkpoint_folder,
        args.evaluator_checkpoint_folder,
    )
    cfg['threshold'] = args.threshold
    cfg['sample_based_improvement'] = 1 - int(args.gradient_based_refinement)
    cfg['num_refine_steps'] = 10 if args.gradient_based_refinement else 20
    estimator = grasp_estimator.GraspEstimator(cfg)
    os.environ['CUDA_VISIBLE_DEVICES'] = str(cfg.gpu)
    sess = tf.Session()
    estimator.build_network()
    estimator.load_weights(sess)

    while True:
        data = su.recvall_pickle(sock)

        print(data.keys())
        for k in data.keys():
            print(k, np.shape(data[k]))

        depth = data['depth']
        image = data['image']
        K = data['intrinsics_matrix']
        # Removing points that are farther than 1 meter or missing depth 
        # values.
        depth[depth == 0] = np.nan
        depth[depth > 1] = np.nan
        pc, selection = backproject(depth, K, return_finite_depth=True, return_selection=True)
        pc_colors = image.copy()
        pc_colors = np.reshape(pc_colors, [-1, 3])
        pc_colors = pc_colors[selection, ::-1]

        # down sampling
        idx = np.random.choice(pc_colors.shape[0], 100000, replace=False)
        pc = pc[idx, :]
        pc_colors = pc_colors[idx, :]

        # Smoothed pc comes from averaging the depth for 10 frames and removing
        # the pixels with jittery depth between those 10 frames.
        object_pc = data['smoothed_object_pc']
        latents = estimator.sample_latents()
        generated_grasps, generated_scores, _ = estimator.predict_grasps(
            sess,
            object_pc,
            latents,
            num_refine_steps=cfg.num_refine_steps,
        )
        print("====>", generated_grasps, generated_scores)
        mlab.figure(bgcolor=(1,1,1))
        if len(generated_grasps) != 0:
            draw_scene(
                pc,
                pc_color=pc_colors,
                grasps=generated_grasps,
                grasp_scores=generated_scores,
            )
            print('close the window to continue to next object . . .')
        mlab.show()
        su.sendall_pickle(sock, [generated_grasps, generated_scores])
Пример #8
0
    def __init__(self, config: Config):
        self.config = config
        self.vae_checkpoint_folder = config.vae_checkpoint_folder
        self.evaluator_checkpoint_folder = config.evaluator_checkpoint_folder

        self.gradient_based_refinement = False
        self.grasp_conf_threshold = 0.8
        self.cut_off_points = config.cut_off_points
        self.output_grasps_dir = "vae_generated_grasps"
        self.fix_view = config.fix_view

        ##### Prepare the 6dof graspnet network for forward pass ######
        cfg = grasp_estimator.joint_config(
            self.vae_checkpoint_folder,
            self.evaluator_checkpoint_folder,
        )
        cfg['threshold'] = self.grasp_conf_threshold
        cfg['sample_based_improvement'] = 1 - int(
            self.gradient_based_refinement)
        cfg['num_refine_steps'] = 10 if self.gradient_based_refinement else 20

        if self.config.data_collection_mode:
            if not self.config.data_collection_from_trained_model:
                cfg["use_geometry_sampling"] = True
                cfg['num_refine_steps'] = 0
                cfg['grasp_selection_mode'] = "all"

        #cfg['num_refine_steps'] = 0
        self.num_refine_steps = cfg['num_refine_steps']
        self.estimator = grasp_estimator.GraspEstimator(cfg)
        self.sess = tf.Session()
        self.estimator.build_network()
        self.estimator.load_weights(self.sess)

        if not os.path.exists(self.output_grasps_dir):
            os.makedirs(self.output_grasps_dir)

        # set camera for this:
        self.camera_positions, self.camera_quats = pcp_utils.cameras.generate_new_cameras_hemisphere(
            radius=self.config.camera_radius,
            lookat_point=self.config.camera_lookat_pos,
            pitch=self.config.camera_pitch,
            yaw=self.config.camera_yaw,
            yaw_list=self.config.camera_yaw_list)
        self.n_cams = len(self.camera_positions)

        # i don't think we need this
        mujoco_T_adam = np.array([[1, 0, 0], [0, 0, 1], [0, -1, 0]],
                                 dtype=np.float32)
        origin_T_camR_xpos = np.array(config.table_top, np.float32) + np.array(
            config.table_T_camR, np.float)

        origin_T_adam = np.zeros((4, 4), dtype=np.float32)
        origin_T_adam[:3, :3] = mujoco_T_adam
        origin_T_adam[:3, 3] = origin_T_camR_xpos
        origin_T_adam[3, 3] = 1
        self.origin_T_adam = origin_T_adam
        self.adam_T_origin = np.linalg.inv(self.origin_T_adam)

        gripper_pc = np.squeeze(tf_utils.get_control_point_tensor(1, False), 0)

        gripper_pc[2, 2] = 0.059
        gripper_pc[3, 2] = 0.059
        mid_point = 0.5 * (gripper_pc[2, :] + gripper_pc[3, :])

        # modified grasps
        modified_gripper_pc = []
        modified_gripper_pc.append(np.zeros((3, ), np.float32))
        modified_gripper_pc.append(mid_point)
        modified_gripper_pc.append(gripper_pc[2])
        modified_gripper_pc.append(gripper_pc[4])
        modified_gripper_pc.append(gripper_pc[2])
        modified_gripper_pc.append(gripper_pc[3])
        modified_gripper_pc.append(gripper_pc[5])

        self.gripper_pc_ori = np.asarray(modified_gripper_pc)
def main(config_file, task_config_file, output_file):
    config = utils.config_from_yaml_file(config_file)

    # object configurations
    objs_config = utils.config_from_yaml_file(task_config_file)
    objects_to_get_grasps_for = list()
    for obj_name, obj_config in objs_config['objs'].items():
        obj_config_class = MeshObject.Config().update(obj_config)
        obj = MeshObject(obj_config_class, obj_name)
        objects_to_get_grasps_for.append(obj)

    print(f'found {len(objects_to_get_grasps_for)} objects to find grasps for')
    updated_config = Grasp6d_graspnet.Config().update(config)
    grasper = Grasp6d_graspnet(updated_config)

    ##### Prepare the 6dof graspnet network for forward pass ######
    cfg = grasp_estimator.joint_config(
        grasper.vae_checkpoint_folder,
        grasper.evaluator_checkpoint_folder,
    )
    cfg['threshold'] = grasper.grasp_conf_threshold
    cfg['sample_based_improvement'] = 1 - int(
        grasper.gradient_based_refinement)
    cfg['num_refine_steps'] = 10 if grasper.gradient_based_refinement else 20
    estimator = grasp_estimator.GraspEstimator(cfg)
    sess = tf.Session()
    estimator.build_network()
    estimator.load_weights(sess)
    ##### End of 6dof graspnet network preparation ################

    nmeshes = len(objects_to_get_grasps_for)
    for mesh_id, meshobj in enumerate(objects_to_get_grasps_for):
        print(f'---- processing {mesh_id}/{nmeshes} ------')
        obj_pc, pc, pc_colors, env_state = grasper.add_object(meshobj)

        # the object is added, data is collected, processed and returned
        # now sample the grasps, and save them
        latents = estimator.sample_latents()
        generated_grasps, generated_scores, _ = estimator.predict_grasps(
            sess,
            obj_pc,
            latents,
            num_refine_steps=cfg.num_refine_steps,
        )

        print(
            f'------ number of generated grasps are: {len(generated_grasps)} ---------'
        )
        save_file_path = os.path.join(grasper.output_grasps_dir,
                                      f'grasps_{meshobj.name}.npy')
        print(f'---- saving to {save_file_path} -----')

        save_dict = {
            'generated_grasps': generated_grasps,
            'generated_scores': generated_scores,
            'pcd': pc,
            'pcd_color': pc_colors,
            'obj_pcd': obj_pc,
            'env_state': env_state,
        }

        np.save(save_file_path, save_dict)
Пример #10
0
def main(args):
    parser = make_parser()
    args = parser.parse_args(args)
    cfg = grasp_estimator.joint_config(
        args.vae_checkpoint_folder,
        args.evaluator_checkpoint_folder,
    )
    cfg['threshold'] = args.threshold
    cfg['sample_based_improvement'] = 1 - int(args.gradient_based_refinement)
    cfg['num_refine_steps'] = 10 if args.gradient_based_refinement else 20
    cfg['num_samples'] = 20
    estimator = grasp_estimator.GraspEstimator(cfg)
    os.environ['CUDA_VISIBLE_DEVICES'] = str(cfg.gpu)
    #edit by alessandro
    os.environ['CUDA_VISIBLE_DEVICES'] ='0'
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    sess = tf.Session(config=config)
    #sess = tf.Session()
    #
    estimator.build_network()
    estimator.load_weights(sess)



    for npy_file in glob.glob(os.path.join(args.npy_folder, '*.npy')):
        print(npy_file)
        # Depending on your numpy version you may need to change allow_pickle
        # from True to False.
        data = np.load(npy_file, allow_pickle=True).item()
        print(data.keys())
        depth = data['depth']
        image = data['image']
        K = data['intrinsics_matrix']
        # Removing points that are farther than 1 meter or missing depth 
        # values.
        depth[depth == 0] = np.nan
        depth[depth > 1] = np.nan
        pc, selection = backproject(depth, K, return_finite_depth=True, return_selection=True)
        pc_colors = image.copy()
        pc_colors = np.reshape(pc_colors, [-1, 3])
        pc_colors = pc_colors[selection, :]


        ipdb.set_trace()
        # Smoothed pc comes from averaging the depth for 10 frames and removing
        # the pixels with jittery depth between those 10 frames.
        object_pc = data['smoothed_object_pc']
        latents = estimator.sample_latents()
        ###################################
        #### Test PCD #####################
        #import open3d as o3d

        #from matplotlib import pyplot
        #from mpl_toolkits.mplot3d import Axes3D

        #pcd = o3d.io.read_point_cloud("./demo/data/tomato_soup_0.pcd")
        #object_pc = np.asarray(pcd.points)
        #color_c1 = 200*np.ones((object_pc.shape[0],1))
        #color_c23 = np.zeros((object_pc.shape[0],2))
        #pc_colors = np.concatenate((color_c1,color_c23),axis=1)
        #pc = object_pc

        ###################################



        print('HERE1')
        generated_grasps, generated_scores, _ = estimator.predict_grasps(
            sess,
            object_pc,
            latents,
            num_refine_steps=cfg.num_refine_steps,
        )
        print('HERE2')
        mlab.figure(bgcolor=(1,1,1))
        draw_scene(
            pc,
            pc_color=pc_colors,
            grasps=generated_grasps,
            grasp_scores=generated_scores,
        )
        print('close the window to continue to next object . . .')
        mlab.show()