def __init__(self):
     super(RLreconSimpleV2Env, self).__init__()
     rospy.init_node('gym_RLrecon_simple_node', anonymous=False)
     world_bounding_box = math_utils.BoundingBox(
         [-20, -20, 0],
         [+20, +20, +20],
     )
     score_bounding_box = math_utils.BoundingBox([-3, -3, -0.5],
                                                 [+3, +3, +5])
     engine = DummyEngine()
     self._environment = SimpleV2Environment(
         world_bounding_box,
         engine=engine,
         random_reset=True,
         clear_size=6.0,
         filter_depth_map=False,
         score_bounding_box=score_bounding_box)
     self.action_space = spaces.Discrete(self._environment.num_actions())
     # self.observation_space = spaces.Box(
     #     0.0,
     #     1.0,
     #     shape=self._environment.observation_shape()
     # )
     self._obs_level = 3
     self._obs_size_x = 16
     self._obs_size_y = self._obs_size_x
     self._obs_size_z = self._obs_size_x
     self.observation_space = spaces.Box(0.0,
                                         1.0,
                                         shape=(self._obs_size_x,
                                                self._obs_size_y,
                                                self._obs_size_z, 1))
     self.observation_space = spaces.Box(-1000, +1000, shape=(14, ))
 def __init__(self):
     super(RLreconDummyEnv, self).__init__()
     rospy.init_node('gym_RLrecon_dummy_node', anonymous=False)
     bounding_box = math_utils.BoundingBox([-10, -10, 0], [+10, +10, +10])
     engine = DummyEngine()
     self._environment = SimpleEnvironment(bounding_box,
                                           engine=engine,
                                           random_reset=True,
                                           clear_size=6.0)
     self.action_space = spaces.Discrete(self._environment.num_actions())
     self.observation_space = spaces.Box(-1000, +1000, shape=(2, ))
 def __init__(self, environment_class):
     super(RLreconEnvWrapper, self).__init__()
     rospy.init_node('gym_RLrecon_env_wrapper', anonymous=False)
     world_bounding_box = math_utils.BoundingBox(
         [-20, -20, 0],
         [+20, +20, +20],
     )
     score_bounding_box = math_utils.BoundingBox([-3, -3, -0.5],
                                                 [+3, +3, +5])
     engine = DummyEngine()
     self._environment = environment_class(
         world_bounding_box,
         engine=engine,
         random_reset=True,
         clear_size=6.0,
         filter_depth_map=False,
         score_bounding_box=score_bounding_box)
     self.action_space = spaces.Discrete(
         self._environment.get_num_of_actions())
     # self.observation_space = spaces.Box(
     #     0.0,
     #     1.0,
     #     shape=self._environment.observation_shape()
     # )
     # self._obs_level = 3
     # self._obs_size_x = 16
     # self._obs_level = 4
     # self._obs_size_x = 8
     # self._obs_size_y = self._obs_size_x
     # self._obs_size_z = self._obs_size_x
     # self.observation_space = spaces.Box(
     #     0.0,
     #     1.0,
     #     shape=(self._obs_size_x, self._obs_size_y, self._obs_size_z, 2)
     # )
     # self.observation_space = spaces.Box(
     #     -1000,
     #     +1000,
     #     shape=(14,)
     # )
     spaces_tuple = []
     for obs_shape in self._environment.get_observation_shapes():
         spaces_tuple.append(
             spaces.Box(-np.finfo(np.float32).max,
                        +np.finfo(np.float32).max,
                        shape=obs_shape))
     self.observation_space = spaces.Tuple(spaces_tuple)