def __init__(self,
                 agent_rate: float,
                 robot_model: str,
                 restrict_position_goal_to_workspace: bool,
                 sparse_reward: bool,
                 act_quick_reward: float,
                 required_accuracy: float,
                 verbose: bool,
                 **kwargs):

        # Initialize the Task base class
        Reach.__init__(self,
                       agent_rate=agent_rate,
                       robot_model=robot_model,
                       restrict_position_goal_to_workspace=restrict_position_goal_to_workspace,
                       sparse_reward=sparse_reward,
                       act_quick_reward=act_quick_reward,
                       required_accuracy=required_accuracy,
                       verbose=verbose,
                       **kwargs)

        # Perception (RGB camera)
        self.camera_sub = CameraSubscriber(topic=f'/{self._camera_type}',
                                           is_point_cloud=False,
                                           node_name=f'drl_grasping_rgb_camera_sub_{self.id}')
    def __init__(self, agent_rate: float, robot_model: str,
                 restrict_position_goal_to_workspace: bool,
                 sparse_reward: bool, act_quick_reward: float,
                 required_accuracy: float, octree_depth: int,
                 octree_full_depth: int, octree_include_color: bool,
                 octree_n_stacked: int, octree_max_size: int, verbose: bool,
                 **kwargs):

        # Initialize the Task base class
        Reach.__init__(self,
                       agent_rate=agent_rate,
                       robot_model=robot_model,
                       restrict_position_goal_to_workspace=
                       restrict_position_goal_to_workspace,
                       sparse_reward=sparse_reward,
                       act_quick_reward=act_quick_reward,
                       required_accuracy=required_accuracy,
                       verbose=verbose,
                       **kwargs)

        if octree_include_color:
            self._camera_type = 'rgbd_camera'
        else:
            self._camera_type = 'depth_camera'

        # Perception (RGB-D camera - point cloud)
        self.camera_sub = CameraSubscriber(
            topic=f'/{self._camera_type}/points',
            is_point_cloud=True,
            node_name=f'drl_grasping_point_cloud_sub_{self.id}')

        self.octree_creator = OctreeCreator(
            min_bound=self._octree_min_bound,
            max_bound=self._octree_max_bound,
            depth=octree_depth,
            full_depth=octree_full_depth,
            include_color=octree_include_color,
            use_sim_time=True,
            debug_draw=False,
            debug_write_octree=False,
            robot_frame_id='panda_link0'
            if 'panda' == robot_model else 'base_link',
            node_name=f'drl_grasping_octree_creator_{self.id}')

        # Additional parameters
        self._octree_n_stacked = octree_n_stacked
        self._octree_max_size = octree_max_size

        # List of all octrees
        self.__stacked_octrees = deque([], maxlen=self._octree_n_stacked)
Example #3
0
    def __init__(self,
                 agent_rate: float,
                 robot_model: str,
                 restrict_position_goal_to_workspace: bool,
                 gripper_dead_zone: float,
                 full_3d_orientation: bool,
                 sparse_reward: bool,
                 normalize_reward: bool,
                 required_reach_distance: float,
                 required_lift_height: float,
                 reach_dense_reward_multiplier: float,
                 lift_dense_reward_multiplier: float,
                 act_quick_reward: float,
                 outside_workspace_reward: float,
                 ground_collision_reward: float,
                 n_ground_collisions_till_termination: int,
                 curriculum_enable_workspace_scale: bool,
                 curriculum_min_workspace_scale: float,
                 curriculum_enable_object_count_increase: bool,
                 curriculum_max_object_count: int,
                 curriculum_enable_stages: bool,
                 curriculum_stage_reward_multiplier: float,
                 curriculum_stage_increase_rewards: bool,
                 curriculum_success_rate_threshold: float,
                 curriculum_success_rate_rolling_average_n: int,
                 curriculum_restart_every_n_steps: int,
                 curriculum_skip_reach_stage: bool,
                 curriculum_skip_grasp_stage: bool,
                 curriculum_restart_exploration_at_start: bool,
                 max_episode_length: int,
                 octree_depth: int,
                 octree_full_depth: int,
                 octree_include_color: bool,
                 octree_n_stacked: int,
                 octree_max_size: int,
                 proprieceptive_observations: bool,
                 verbose: bool,
                 preload_replay_buffer: bool = False,
                 **kwargs):

        # Initialize the Task base class
        Grasp.__init__(
            self,
            agent_rate=agent_rate,
            robot_model=robot_model,
            restrict_position_goal_to_workspace=
            restrict_position_goal_to_workspace,
            gripper_dead_zone=gripper_dead_zone,
            full_3d_orientation=full_3d_orientation,
            sparse_reward=sparse_reward,
            normalize_reward=normalize_reward,
            required_reach_distance=required_reach_distance,
            required_lift_height=required_lift_height,
            reach_dense_reward_multiplier=reach_dense_reward_multiplier,
            lift_dense_reward_multiplier=lift_dense_reward_multiplier,
            act_quick_reward=act_quick_reward,
            outside_workspace_reward=outside_workspace_reward,
            ground_collision_reward=ground_collision_reward,
            n_ground_collisions_till_termination=
            n_ground_collisions_till_termination,
            curriculum_enable_workspace_scale=curriculum_enable_workspace_scale,
            curriculum_min_workspace_scale=curriculum_min_workspace_scale,
            curriculum_enable_object_count_increase=
            curriculum_enable_object_count_increase,
            curriculum_max_object_count=curriculum_max_object_count,
            curriculum_enable_stages=curriculum_enable_stages,
            curriculum_stage_reward_multiplier=
            curriculum_stage_reward_multiplier,
            curriculum_stage_increase_rewards=curriculum_stage_increase_rewards,
            curriculum_success_rate_threshold=curriculum_success_rate_threshold,
            curriculum_success_rate_rolling_average_n=
            curriculum_success_rate_rolling_average_n,
            curriculum_restart_every_n_steps=curriculum_restart_every_n_steps,
            curriculum_skip_reach_stage=curriculum_skip_reach_stage,
            curriculum_skip_grasp_stage=curriculum_skip_grasp_stage,
            curriculum_restart_exploration_at_start=
            curriculum_restart_exploration_at_start,
            max_episode_length=max_episode_length,
            verbose=verbose,
            preload_replay_buffer=preload_replay_buffer,
            **kwargs)

        if octree_include_color:
            self._camera_type = 'rgbd_camera'
        else:
            self._camera_type = 'depth_camera'

        # Perception (RGB-D camera - point cloud)
        self.camera_sub = CameraSubscriber(
            topic=f'/{self._camera_type}/points',
            is_point_cloud=True,
            node_name=f'drl_grasping_point_cloud_sub_{self.id}')

        self.octree_creator = OctreeCreator(
            min_bound=self._octree_min_bound,
            max_bound=self._octree_max_bound,
            depth=octree_depth,
            full_depth=octree_full_depth,
            include_color=octree_include_color,
            use_sim_time=True,
            debug_draw=False,
            debug_write_octree=False,
            robot_frame_id='panda_link0'
            if 'panda' == robot_model else 'base_link',
            node_name=f'drl_grasping_octree_creator_{self.id}')

        # Additional parameters
        self._octree_n_stacked = octree_n_stacked
        self._octree_max_size = octree_max_size
        self._proprieceptive_observations = proprieceptive_observations

        # List of all octrees
        self.__stacked_octrees = deque([], maxlen=self._octree_n_stacked)