Пример #1
0
    def __init__(
        self,
        agent_view_size: int,
        view_channels: int = 1,
        uuid: str = "minigrid_ego_image",
        **kwargs: Any
    ):
        self.agent_view_size = agent_view_size
        self.view_channels = view_channels
        self.num_objects = (
            cast(
                int, max(map(abs, gym_minigrid.minigrid.OBJECT_TO_IDX.values()))  # type: ignore
            )
            + 1
        )
        self.num_colors = (
            cast(int, max(map(abs, gym_minigrid.minigrid.COLOR_TO_IDX.values())))  # type: ignore
            + 1
        )
        self.num_states = (
            cast(int, max(map(abs, gym_minigrid.minigrid.STATE_TO_IDX.values())))  # type: ignore
            + 1
        )

        observation_space = self._get_observation_space()

        super().__init__(**prepare_locals_for_super(locals()))
Пример #2
0
    def __init__(self,
                 view_radius: int,
                 world_dim: int,
                 degree: int,
                 uuid: str = "corner_fixed_radius_categorical",
                 **kwargs: Any):
        self.view_radius = view_radius
        self.world_dim = world_dim
        self.degree = degree

        if self.world_dim > 2:
            raise NotImplementedError(
                "When using the `FactorialDesignCornerSensor`,"
                "`world_dim` must be <= 2 due to memory constraints."
                "In the current implementation, creating the design"
                "matrix in the `world_dim == 3` case would require"
                "instantiating a matrix of size ~ 3Mx3M (9 trillion entries).")

        self.view_corner_offsets: Optional[np.ndarray] = None
        # self.world_corners_offset: Optional[List[typing.Tuple[int, ...]]] = None

        self.corner_sensor = CornerSensor(self.view_radius, self.world_dim)

        self.variables_and_levels = self._get_variables_and_levels(
            world_dim=self.world_dim)
        self._design_mat_formula = self._create_formula(
            variables_and_levels=self._get_variables_and_levels(
                world_dim=self.world_dim),
            degree=self.degree,
        )
        self.single_row_df = pd.DataFrame(
            data=[[0] * len(self.variables_and_levels)],
            columns=[x[0] for x in self.variables_and_levels],
        )
        self._view_tuple_to_design_array: Dict[Tuple[int, ...],
                                               np.ndarray] = {}

        (
            design_matrix,
            tuple_to_ind,
        ) = self._create_full_design_matrix_and_tuple_to_ind_dict(
            variables_and_levels=tuple(self.variables_and_levels),
            degree=self.degree)

        self.design_matrix = design_matrix
        self.tuple_to_ind = tuple_to_ind

        observation_space = gym.spaces.Box(
            low=min(LightHouseEnvironment.SPACE_LEVELS),
            high=max(LightHouseEnvironment.SPACE_LEVELS),
            shape=(len(
                self.view_tuple_to_design_array(
                    (0, ) * len(self.variables_and_levels))), ),
            dtype=int,
        )

        super().__init__(**prepare_locals_for_super(locals()))
Пример #3
0
    def __init__(self,
                 view_radius: int,
                 world_dim: int,
                 uuid: str = "corner_fixed_radius",
                 **kwargs: Any):
        self.view_radius = view_radius
        self.world_dim = world_dim
        self.view_corner_offsets: Optional[np.ndarray] = None

        observation_space = gym.spaces.Box(
            low=min(LightHouseEnvironment.SPACE_LEVELS),
            high=max(LightHouseEnvironment.SPACE_LEVELS),
            shape=(2**world_dim + 2, ),
            dtype=int,
        )

        super().__init__(**prepare_locals_for_super(locals()))
Пример #4
0
    def __init__(self, instr_len: int, uuid: str = "minigrid_mission", **kwargs: Any):

        self.instr_preprocessor = InstructionsPreprocessor(
            model_name="TMP_SENSOR", load_vocab_from=None
        )

        # We initialize the vocabulary with a fixed collection of tokens
        # and then ensure that the size cannot exceed this number. This
        # guarantees that sensors on all processes will produce the same
        # values.
        for token in ALL_VOCAB_TOKENS:
            _ = self.instr_preprocessor.vocab[token]
        self.instr_preprocessor.vocab.max_size = len(ALL_VOCAB_TOKENS)

        self.instr_len = instr_len

        observation_space = self._get_observation_space()

        super().__init__(**prepare_locals_for_super(locals()))