示例#1
0
    def __init__(self, env):
        """
        Wrap the environment in this wrapper, that will make the observation egocentric
        :param env object: the environment to wrap.
        """
        super(EgocentricCostmap, self).__init__(env)
        # As openai gym style requires knowing resolution of the image up front
        self._egomap_x_bounds = np.array([-0.5, 3.
                                          ])  # aligned with robot's direction
        self._egomap_y_bounds = np.array([-2., 2.
                                          ])  # orthogonal to robot's direction
        resulting_size = np.array([
            self._egomap_x_bounds[1] - self._egomap_x_bounds[0],
            self._egomap_y_bounds[1] - self._egomap_y_bounds[0]
        ])

        pixel_size = world_to_pixel(resulting_size,
                                    np.zeros((2, )),
                                    resolution=0.03)
        data_shape = (pixel_size[1], pixel_size[0], 1)
        self.observation_space = spaces.Dict(
            OrderedDict((('env',
                          spaces.Box(low=0,
                                     high=255,
                                     shape=data_shape,
                                     dtype=np.uint8)),
                         ('goal',
                          spaces.Box(low=-1.,
                                     high=1.,
                                     shape=(3, 1),
                                     dtype=np.float64)))))
示例#2
0
    def __init__(self):
        super(ColoredEgoCostmapRandomAisleTurnEnv, self).__init__()
        # TODO: Will need some trickery to do it fully openai gym style
        # As openai gym style requires knowing resolution of the image up front
        self._egomap_x_bounds = np.array([-0.5, 3.5
                                          ])  # aligned with robot's direction
        self._egomap_y_bounds = np.array([-2., 2.
                                          ])  # orthogonal to robot's direction
        resulting_size = (self._egomap_x_bounds[1] - self._egomap_x_bounds[0],
                          self._egomap_y_bounds[1] - self._egomap_y_bounds[0])

        pixel_size = world_to_pixel(np.asarray(resulting_size,
                                               dtype=np.float64),
                                    np.zeros((2, )),
                                    resolution=0.03)
        data_shape = (pixel_size[1], pixel_size[0], 1)
        self.observation_space = spaces.Dict(
            OrderedDict((('environment',
                          spaces.Box(low=0,
                                     high=255,
                                     shape=data_shape,
                                     dtype=np.uint8)),
                         ('goal',
                          spaces.Box(low=-1.,
                                     high=1.,
                                     shape=(5, 1),
                                     dtype=np.float64)))))