Example #1
0
 def plan(self, observation):
     envs = [
         preprocess_env(self.true_env, preprocessors)
         for preprocessors in self.config["models"]
     ]
     self.env = JointEnv(envs)
     return super().plan(observation)
Example #2
0
 def display(cls, agent, agent_surface, sim_surface):
     import pygame
     horizon = 2
     robust_env = preprocess_env(agent.env,
                                 agent.config["env_preprocessors"])
     for vehicle in robust_env.road.vehicles:
         vehicle.COLLISIONS_ENABLED = False
     plan = agent.get_plan()
     if plan:
         plan = plan[1:]  # First action has already been performed
     plan = plan[:horizon] + (horizon - len(plan)) * [1]
     for action in plan:
         robust_env.step(action)
     for vehicle in robust_env.road.vehicles:
         if not hasattr(vehicle, 'interval_trajectory'):
             continue
         min_traj = [o.position[0] for o in vehicle.interval_trajectory]
         max_traj = [o.position[1] for o in vehicle.interval_trajectory]
         uncertainty_surface = pygame.Surface(sim_surface.get_size(),
                                              pygame.SRCALPHA, 32)
         cls.display_uncertainty(min_traj, max_traj, uncertainty_surface,
                                 sim_surface, cls.UNCERTAINTY_TIME_COLORMAP)
         cls.display_trajectory(vehicle.trajectory, uncertainty_surface,
                                sim_surface, cls.MODEL_TRAJ_COLOR)
         sim_surface.blit(uncertainty_surface, (0, 0))
         if agent_surface and hasattr(agent, "sub_agent"):
             TreeGraphics.display(agent.sub_agent, agent_surface)
Example #3
0
    def plan(self, observation):
        """
            Plan an optimal sequence of actions.

            Start by updating the previously found tree with the last action performed.

        :param observation: the current state
        :return: the list of actions
        """
        self.planner.step(self.previous_action)
        env = preprocess_env(self.env, self.config["env_preprocessors"])
        actions = self.planner.plan(state=env, observation=observation)

        self.previous_action = actions[0]
        return actions
Example #4
0
 def display(cls, agent, agent_surface, sim_surface):
     import pygame
     horizon = 2
     plan = agent.planner.get_plan()
     for env in [preprocess_env(agent.true_env, preprocessors) for preprocessors in agent.config["models"]]:
         for vehicle in env.road.vehicles:
             vehicle.trajectory = []
         for action in plan[:horizon] + (horizon - len(plan)) * [1]:
             env.step(action)
         for vehicle in env.road.vehicles:
             if vehicle is env.vehicle:
                 continue
             uncertainty_surface = pygame.Surface(sim_surface.get_size(), pygame.SRCALPHA, 32)
             IntervalRobustPlannerGraphics.display_trajectory(vehicle.trajectory, uncertainty_surface, sim_surface,
                                                              IntervalRobustPlannerGraphics.MODEL_TRAJ_COLOR)
             sim_surface.blit(uncertainty_surface, (0, 0))
     TreeGraphics.display(agent, agent_surface)
 def display(cls, agent, agent_surface, sim_surface):
     plan = agent.planner.get_plan()
     for env in [preprocess_env(agent.true_env, preprocessors) for preprocessors in agent.config["models"]]:
         IntervalRobustPlannerGraphics.display_uncertainty(env, plan, sim_surface, trajectory=False)
         # for vehicle in env.road.vehicles:
         #     if hasattr(vehicle, ):
         #         IntervalRobustPlannerGraphics.display(vehicle)
         # for vehicle in env.road.vehicles:
         #     vehicle.trajectory = []
         # for action in plan[:horizon] + (horizon - len(plan)) * [1]:
         #     env.step(action)
         # for vehicle in env.road.vehicles:
         #     if vehicle is env.vehicle:
         #         continue
         #     uncertainty_surface = pygame.Surface(sim_surface.get_size(), pygame.SRCALPHA, 32)
         #     IntervalRobustPlannerGraphics.display_trajectory(vehicle.trajectory, uncertainty_surface, sim_surface,
         #                                                      IntervalRobustPlannerGraphics.MODEL_TRAJ_COLOR)
         #     sim_surface.blit(uncertainty_surface, (0, 0))
     TreeGraphics.display(agent, agent_surface)
    def plan(self, observation):
        """
            Plan an optimal sequence of actions.

            Start by updating the previously found tree with the last action performed.

        :param observation: the current state
        :return: the list of actions
        """
        self.steps += 1
        replanning_required = self.step(self.previous_actions)
        if replanning_required:
            env = preprocess_env(self.env, self.config["env_preprocessors"])
            actions = self.planner.plan(state=env, observation=observation)
        else:
            actions = self.previous_actions[1:]
        self.write_tree()

        self.previous_actions = actions
        return actions
 def plan(self, observation):
     self.sub_agent.env = preprocess_env(self.env, self.config["env_preprocessors"])
     return self.sub_agent.plan(observation)
 def plan(self, observation):
     envs = [preprocess_env(self.true_env, preprocessors) for preprocessors in self.config["models"]]
     self.env = JointEnv(envs)
     return super(DiscreteRobustPlannerAgent, self).plan(observation)
 def display(cls, agent, agent_surface, sim_surface):
     robust_env = preprocess_env(agent.env, agent.config["env_preprocessors"])
     cls.display_uncertainty(robust_env, plan=agent.get_plan(), surface=sim_surface)
     if agent_surface and hasattr(agent, "sub_agent"):
         TreeGraphics.display(agent.sub_agent, agent_surface)