def _get_action_space_(self) -> D.T_agent[Space[D.T_event]]: return EnumSpace(Action)
def _get_observation_space_(self) -> D.T_agent[Space[D.T_observation]]: return {'player1': EnumSpace(Move), 'player2': EnumSpace(Move)}
def decode(val): return EnumSpace(Action)
def _get_action_space_(self) -> D.T_agent[Space[D.T_event]]: return {'player1': EnumSpace(Move), 'player2': EnumSpace(Move)}
def items(self) -> D.T_agent[D.T_concurrency[EnumSpace]]: # Used by random walk that needs independent agent action spaces. # It may lead to infeasible actions in which case _state_sample(...) # returns a large cost return [(a, EnumSpace(AgentAction)) for a in self._memory.keys()]
def _get_observation_space_(self) -> D.T_agent[Space[D.T_observation]]: return {"player1": EnumSpace(Move), "player2": EnumSpace(Move)}
def _get_action_space_(self) -> D.T_agent[Space[D.T_event]]: return {"player1": EnumSpace(Move), "player2": EnumSpace(Move)}
def _get_action_space_(self) -> Space[D.T_event]: # Define action space return EnumSpace(Action)