def exchange(self, inputs: UnityInput) -> UnityOutput:
     dict_agent_info = {}
     if self.is_discrete:
         vector_action = [1]
     else:
         vector_action = [1, 2]
     list_agent_info = []
     for i in range(3):
         list_agent_info.append(
             AgentInfoProto(stacked_vector_observation=[1, 2, 3, 1, 2, 3],
                            reward=1,
                            stored_vector_actions=vector_action,
                            stored_text_actions="",
                            text_observation="",
                            memories=[],
                            done=(i == 2),
                            max_step_reached=False,
                            id=i))
     dict_agent_info["RealFakeBrain"] = \
         UnityRLOutput.ListAgentInfoProto(value=list_agent_info)
     global_done = False
     try:
         global_done = (inputs.rl_input.agent_actions["RealFakeBrain"].
                        value[0].vector_actions[0] == -1)
     except:
         pass
     result = UnityRLOutput(global_done=global_done,
                            agentInfos=dict_agent_info)
     return UnityOutput(rl_output=result)
예제 #2
0
 def initialize(self, inputs: UnityInput) -> UnityOutput:
     resolutions = [
         ResolutionProto(width=30, height=40, gray_scale=False)
         for i in range(self.visual_inputs)
     ]
     bp = BrainParametersProto(
         vector_observation_size=3,
         num_stacked_vector_observations=self.num_stacks,
         vector_action_size=[2],
         camera_resolutions=resolutions,
         vector_action_descriptions=["", ""],
         vector_action_space_type=int(not self.is_discrete),
         brain_name="RealFakeBrain",
         brain_type=2)
     rl_init = UnityRLInitializationOutput(name="RealFakeAcademy",
                                           version="API-4",
                                           log_path="",
                                           brain_parameters=[bp])
     return UnityOutput(rl_initialization_output=rl_init)