def _get_action_space_(self) -> D.T_agent[Space[D.T_event]]:
     return EnumSpace(Action)
Example #2
0
 def _get_observation_space_(self) -> D.T_agent[Space[D.T_observation]]:
     return {'player1': EnumSpace(Move), 'player2': EnumSpace(Move)}
 def decode(val):
     return EnumSpace(Action)
Example #4
0
 def _get_action_space_(self) -> D.T_agent[Space[D.T_event]]:
     return {'player1': EnumSpace(Move), 'player2': EnumSpace(Move)}
Example #5
0
 def items(self) -> D.T_agent[D.T_concurrency[EnumSpace]]:
     # Used by random walk that needs independent agent action spaces.
     # It may lead to infeasible actions in which case _state_sample(...)
     # returns a large cost
     return [(a, EnumSpace(AgentAction)) for a in self._memory.keys()]
Example #6
0
 def _get_observation_space_(self) -> D.T_agent[Space[D.T_observation]]:
     return {"player1": EnumSpace(Move), "player2": EnumSpace(Move)}
Example #7
0
 def _get_action_space_(self) -> D.T_agent[Space[D.T_event]]:
     return {"player1": EnumSpace(Move), "player2": EnumSpace(Move)}
Example #8
0
 def _get_action_space_(self) -> Space[D.T_event]:
     # Define action space
     return EnumSpace(Action)