Example #1
0
    def __init__(self, fruit_environment, **kwargs):
        super().__init__()
        if isinstance(fruit_environment, BaseEnvironment):
            self.environment = fruit_environment
            if self.environment.get_processor() is not None:
                raise ValueError('Do not use state processor with TensorForce !')
        else:
            raise ValueError('Environment must be from FruitAPI !')

        self.__max_episode_timesteps = False

        state_space = self.environment.get_state_space()
        self.states_spec = dict(type='float', shape=tuple(state_space.get_shape()))

        action_ranges, _ = self.environment.get_action_space().get_range()
        self.actions_spec = dict(type='int', num_values=len(action_ranges))

        self.__timesteps = 0

        if isinstance(fruit_environment, GymEnvironment):
            self.__max_episode_timesteps = None
            _, self.__max_episode_timesteps = OpenAIGym.create_level(level=self.environment.env_name,
                                                                     max_episode_timesteps=self.__max_episode_timesteps,
                                                                     reward_threshold=None, tags=None, **kwargs)

            self.states_spec = OpenAIGym.specs_from_gym_space(
                space=self.environment.env.observation_space, ignore_value_bounds=True)

            self.actions_spec = OpenAIGym.specs_from_gym_space(
                space=self.environment.env.action_space, ignore_value_bounds=False)
Example #2
0
    def __init__(self,
                 level,
                 visualize=False,
                 monitor_directory=None,
                 **kwargs):
        import retro

        self._max_episode_timesteps = False

        self.level = level
        self.visualize = visualize

        self.environment = retro.make(game=self.level, **kwargs)

        if monitor_directory is not None:
            self.environment = gym.wrappers.Monitor(
                env=self.environment, directory=monitor_directory)

        self.states_spec = OpenAIGym.specs_from_gym_space(
            space=self.environment.observation_space,
            ignore_value_bounds=True  # TODO: not ignore?
        )
        self.actions_spec = OpenAIGym.specs_from_gym_space(
            space=self.environment.action_space, ignore_value_bounds=False)