def __init__(self, env): """ Wrap the environment in this wrapper, that will make the observation egocentric :param env object: the environment to wrap. """ super(EgocentricCostmap, self).__init__(env) # As openai gym style requires knowing resolution of the image up front self._egomap_x_bounds = np.array([-0.5, 3. ]) # aligned with robot's direction self._egomap_y_bounds = np.array([-2., 2. ]) # orthogonal to robot's direction resulting_size = np.array([ self._egomap_x_bounds[1] - self._egomap_x_bounds[0], self._egomap_y_bounds[1] - self._egomap_y_bounds[0] ]) pixel_size = world_to_pixel(resulting_size, np.zeros((2, )), resolution=0.03) data_shape = (pixel_size[1], pixel_size[0], 1) self.observation_space = spaces.Dict( OrderedDict((('env', spaces.Box(low=0, high=255, shape=data_shape, dtype=np.uint8)), ('goal', spaces.Box(low=-1., high=1., shape=(3, 1), dtype=np.float64)))))
def __init__(self): super(ColoredEgoCostmapRandomAisleTurnEnv, self).__init__() # TODO: Will need some trickery to do it fully openai gym style # As openai gym style requires knowing resolution of the image up front self._egomap_x_bounds = np.array([-0.5, 3.5 ]) # aligned with robot's direction self._egomap_y_bounds = np.array([-2., 2. ]) # orthogonal to robot's direction resulting_size = (self._egomap_x_bounds[1] - self._egomap_x_bounds[0], self._egomap_y_bounds[1] - self._egomap_y_bounds[0]) pixel_size = world_to_pixel(np.asarray(resulting_size, dtype=np.float64), np.zeros((2, )), resolution=0.03) data_shape = (pixel_size[1], pixel_size[0], 1) self.observation_space = spaces.Dict( OrderedDict((('environment', spaces.Box(low=0, high=255, shape=data_shape, dtype=np.uint8)), ('goal', spaces.Box(low=-1., high=1., shape=(5, 1), dtype=np.float64)))))
def __init__(self, costmap, path, params): """ :param costmap CostMap2D: costmap denoting obstacles :param path array(N, 3): oriented path, presented as way points :param params EnvParams: parametrization of the environment """ # Stateful things self._robot = TricycleRobot(dimensions=get_dimensions_example(params.robot_name)) reward_provider_example = get_reward_provider_example(params.reward_provider_name) self._reward_provider = reward_provider_example(params=params.reward_provider_params) # Properties, things without state self.action_space = spaces.Box( low=np.array([-self._robot.get_max_front_wheel_speed() / 2, -np.pi/2]), high=np.array([self._robot.get_max_front_wheel_speed(), np.pi/2]), dtype=np.float32) self.reward_range = (0.0, 1.0) self._gui = OpenCVGui() self._params = params # State self._state = make_initial_state(path, costmap, self._robot, self._reward_provider, params) self._initial_state = self._state.copy() self.set_state(self._state)
def __init__(self): super(ColoredCostmapRandomAisleTurnEnv, self).__init__() # TODO: Will need some trickery to do it fully openai gym style # As openai gym style requires knowing resolution of the image up front self.observation_space = spaces.Box(low=0, high=255, shape=(510, 264, 1), dtype=np.uint8)