Ejemplo n.º 1
0
    def test_lane_change(self):
        # World Definition
        params = ParameterServer()
        world = World(params)

        # Model Definitions
        behavior_model = BehaviorMobil(params)
        execution_model = ExecutionModelInterpolate(params)
        dynamic_model = SingleTrackModel(params)

        behavior_model2 = BehaviorIDMLaneTracking(params)
        execution_model2 = ExecutionModelInterpolate(params)
        dynamic_model2 = SingleTrackModel(params)

        # Map Definition
        map_interface = MapInterface()
        xodr_map = MakeXodrMapOneRoadTwoLanes()
        map_interface.SetOpenDriveMap(xodr_map)
        world.SetMap(map_interface)

        #agent_2d_shape = CarLimousine()
        agent_2d_shape = CarRectangle()
        init_state = np.array([0, 3, -1.75, 0, 5])
        agent_params = params.AddChild("agent1")
        goal_polygon = Polygon2d(
            [1, 1, 0],
            [Point2d(0, 0),
             Point2d(0, 2),
             Point2d(2, 2),
             Point2d(2, 0)])
        goal_polygon = goal_polygon.Translate(Point2d(50, -2))

        agent = Agent(init_state, behavior_model, dynamic_model,
                      execution_model, agent_2d_shape, agent_params,
                      GoalDefinitionPolygon(goal_polygon), map_interface)
        world.AddAgent(agent)

        init_state2 = np.array([0, 15, -1.75, 0, 2])
        agent2 = Agent(init_state2, behavior_model2, dynamic_model2,
                       execution_model2, agent_2d_shape, agent_params,
                       GoalDefinitionPolygon(goal_polygon), map_interface)
        world.AddAgent(agent2)

        # viewer
        viewer = MPViewer(params=params, use_world_bounds=True)

        # World Simulation
        sim_step_time = params["simulation"]["step_time",
                                             "Step-time in simulation", 0.05]
        sim_real_time_factor = params["simulation"][
            "real_time_factor", "execution in real-time or faster", 100]

        # Draw map
        for _ in range(0, 10):
            viewer.clear()
            world.Step(sim_step_time)
            viewer.drawWorld(world)
            viewer.show(block=False)
            time.sleep(sim_step_time / sim_real_time_factor)
Ejemplo n.º 2
0
  def test_python_model(self):
    param_server = ParameterServer(
      filename="modules/runtime/tests/data/deterministic_scenario.json")
    scenario_generation = DeterministicScenarioGeneration(num_scenarios=3,
                                                          random_seed=0,
                                                          params=param_server)
    viewer = MPViewer(params=param_server,
                      follow_agent_id=False,
                      use_world_bounds=True)
    env = Runtime(0.2,
                  viewer,
                  scenario_generation,
                  render=True)
    
    single_track_model = SingleTrackModel(param_server)
    behavior_model = DynamicBehaviorModel(single_track_model, param_server)

    env.reset()
    env._world.get_agent(0).behavior_model = behavior_model
    env._world.get_agent(0).behavior_model.clone()


    env.reset()
    env._world.get_agent(0).behavior_model = behavior_model
    env._world.get_agent(0).behavior_model.set_last_action(np.array([1., 2.]))
    print(env._world.get_agent(0).behavior_model.get_last_action())
    env._world.step(0.2)
    print(env._world.get_agent(0).behavior_model.get_last_action())
Ejemplo n.º 3
0
    def test_one_agent_at_goal_state_limits(self):
        param_server = ParameterServer()
        # Model Definition
        behavior_model = BehaviorConstantVelocity(param_server)
        execution_model = ExecutionModelInterpolate(param_server)
        dynamic_model = SingleTrackModel(param_server)

        # Agent Definition
        agent_2d_shape = CarLimousine()
        init_state = np.array(
            [0, -191.789, -50.1725, 3.14 * 3.0 / 4.0, 150 / 3.6])
        agent_params = param_server.AddChild("agent1")
        goal_polygon = Polygon2d(
            [0, 0, 0],
            [Point2d(-1, -1),
             Point2d(-1, 1),
             Point2d(1, 1),
             Point2d(1, -1)])
        goal_polygon = goal_polygon.Translate(Point2d(-191.789, -50.1725))

        agent = Agent(
            init_state, behavior_model, dynamic_model, execution_model,
            agent_2d_shape, agent_params,
            GoalDefinitionStateLimits(
                goal_polygon,
                (3.14 * 3.0 / 4.0 - 0.08, 3.14 * 3.0 / 4.0 + 0.08)), None)

        world = World(param_server)
        world.AddAgent(agent)
        evaluator = EvaluatorGoalReached(agent.id)
        world.AddEvaluator("success", evaluator)

        info = world.Evaluate()
        self.assertEqual(info["success"], True)
    def __init__(self):
        self.carla_server = None
        self.carla_client = None
        self.carla_controller = None
        self.bark_viewer = None
        self.cosimulation_viewer = None
        self.launch_args = ["external/carla/CarlaUE4.sh", "-quality-level=Low"]

        # Bark parameter server
        self.param_server = ParameterServer(
            filename=BARK_PATH +
            "examples/params/od8_const_vel_one_agent.json")

        # World Definition
        self.bark_world = World(self.param_server)

        # Model Definitions
        self.behavior_model = BehaviorIDMClassic(self.param_server)
        self.execution_model = ExecutionModelInterpolate(self.param_server)
        self.dynamic_model = SingleTrackModel(self.param_server)

        # Map Definition
        xodr_parser = XodrParser(BARK_PATH + "modules/runtime/tests/data/" +
                                 BARK_MAP + ".xodr")
        self.map_interface = MapInterface()
        self.map_interface.SetOpenDriveMap(xodr_parser.map)
        self.bark_world.SetMap(self.map_interface)

        # Bark agent definition
        self.agent_2d_shape = CarLimousine()

        # use for converting carla actor id to bark agent id
        self.carla_2_bark_id = dict()
        # store the camera id attached to an agent
        self.carla_agents_cam = dict()
Ejemplo n.º 5
0
  def test_one_agent_at_goal_sequential(self):
    param_server = ParameterServer()
    # Model Definition
    dynamic_model = SingleTrackModel(param_server)
    behavior_model = BehaviorMPContinuousActions(param_server)
    idx = behavior_model.AddMotionPrimitive(np.array([1, 0]))
    behavior_model.ActionToBehavior(idx)
    execution_model = ExecutionModelInterpolate(param_server)


    # Agent Definition
    agent_2d_shape = CarLimousine()
    init_state = np.array([0, 0, 0, 0, 0])
    agent_params = param_server.AddChild("agent1")
    goal_frame = Polygon2d([0, 0, 0],
                             [Point2d(-1,-1),
                              Point2d(-1,1),
                              Point2d(1,1),
                              Point2d(1,-1)])

    goal_polygon1 = goal_frame.Translate(Point2d(10, 0))
    goal_polygon2 = goal_frame.Translate(Point2d(20, 0))
    goal_polygon3 = goal_frame.Translate(Point2d(30, 0))

    goal_def1 = GoalDefinitionStateLimits(goal_polygon1, [-0.08, 0.08])
    goal_def2 = GoalDefinitionStateLimits(goal_polygon2, [-0.08, 0.08])
    goal_def3 = GoalDefinitionStateLimits(goal_polygon3, [-0.08, 0.08])

    goal_definition = GoalDefinitionSequential([goal_def1,
                                                goal_def2,
                                                goal_def3])

    self.assertEqual(len(goal_definition.sequential_goals),3)
    agent = Agent(init_state,
                behavior_model,
                dynamic_model,
                execution_model,
                agent_2d_shape,
                agent_params,
                goal_definition,
                  None)

    world = World(param_server)
    world.AddAgent(agent)
    evaluator = EvaluatorGoalReached(agent.id)
    world.AddEvaluator("success", evaluator)

    # just drive with the single motion primitive should be successful 
    for _ in range(0,1000):
        world.Step(0.2)
        info = world.Evaluate()
        if info["success"]:
            break
    
    self.assertEqual(info["success"], True)
    self.assertAlmostEqual(agent.state[int(StateDefinition.X_POSITION)], 30, delta=0.5)
Ejemplo n.º 6
0
    def Plan(self, delta_time, observed_world):
        # Get state of observer
        ego_agent = observed_world.ego_agent
        ego_agent_state = observed_world.ego_state  # or via ego_agent.state
        ego_velocity = ego_agent_state[int(StateDefinition.VEL_POSITION)]
        ego_x = ego_agent_state[int(StateDefinition.X_POSITION)]
        ego_y = ego_agent_state[int(StateDefinition.Y_POSITION)]
        ego_theta = ego_agent_state[int(StateDefinition.THETA_POSITION)]
        print("Ego x: {}, y: {}, v: {}, theta: {}".format(
            ego_x, ego_y, ego_velocity, ego_theta))

        # get distance and state of observer's leading vehicle
        # 1) get tuple with agent and frenet position
        leading_agent_pair = observed_world.GetAgentInFront(
        )  # to get agent behind use GetAgentBehind()

        # 2) get state of leading agent from tuple
        leading_agent = leading_agent_pair[0]
        leading_agent_state = leading_agent.state
        leading_agent_x = leading_agent_state[int(StateDefinition.X_POSITION)]
        # to get other state values, same procedure as for ego agent
        # ....
        # 3) get frenet longitudinal distance to leading agent from tuple
        leading_frenet = leading_agent_pair[1]
        longitudinal_dist = leading_frenet.lon
        # 4) subtract vehicle shapes rear front to get true physical distance
        vehicle_margins = ego_agent.shape.front_dist + leading_agent.shape.rear_dist
        longitudinal_dist_margins = longitudinal_dist - vehicle_margins

        print("Leading x: {}, ... long dist: {}, long dist margins: {}".format(
            leading_agent_x, longitudinal_dist, longitudinal_dist_margins))

        # select some action
        # we choose a very simple heuristic to select among actions
        # decelerate, constant velocity or accelerate
        acceleration = None
        if longitudinal_dist_margins < self._distance_range_constant_velocity[
                0]:
            acceleration = -4.0
        elif longitudinal_dist_margins > self._distance_range_constant_velocity[0] and \
              longitudinal_dist_margins < self._distance_range_constant_velocity[1]:
            acceleration = 0.0
        else:
            acceleration = 4.0

        # a motion primitive model converts it to trajectory
        single_track_model = SingleTrackModel(self._params)
        behavior = PrimitiveConstAccStayLane(self._params, single_track_model,
                                             acceleration, 0.0)
        traj = behavior.Plan(delta_time, observed_world)

        # set internal behavior parameters
        super(PythonDistanceBehavior, self).SetLastTrajectory(traj)
        super(PythonDistanceBehavior, self).SetLastAction(acceleration)
        print("Trajectory: {}".format(traj))
        return traj
Ejemplo n.º 7
0
 def reset(self, world, agents_to_act):
     super(MotionPrimitives, self).reset(world=world,
                                         agents_to_act=agents_to_act)
     self.behavior_model = BehaviorMotionPrimitives(SingleTrackModel(),
                                                    self.params)
     for control_input in self.control_inputs:
         self.behavior_model.add_motion_primitive(np.array(control_input))
     ego_agent_id = agents_to_act[0]
     world.agents[ego_agent_id].behavior_model = self.behavior_model
     return world
Ejemplo n.º 8
0
 def reset(self, world, agents_to_act):
   """see base class
   """
   super(MotionPrimitives, self).reset(world=world,
                                       agents_to_act=agents_to_act)
   self._behavior_model = BehaviorMotionPrimitives(SingleTrackModel(self._params),
                                                   self._params)
   for control_input in self._control_inputs:
       self._behavior_model.add_motion_primitive(np.array(control_input))
   ego_agent_id = agents_to_act[0]
   if ego_agent_id in world.agents:
       world.agents[ego_agent_id].behavior_model = self._behavior_model
   else:
       raise ValueError("AgentID does not exist in world.")
   return world
Ejemplo n.º 9
0
 def test_write_params_agent(self):
   params = ParameterServer()
   behavior = BehaviorConstantVelocity(params)
   execution = ExecutionModelInterpolate(params)
   dynamic = SingleTrackModel(params)
   shape = Polygon2d([1.25, 1, 0], [
       Point2d(0, 0),
       Point2d(0, 2),
       Point2d(4, 2),
       Point2d(4, 0),
       Point2d(0, 0)
   ])
   init_state = np.zeros(4)
   agent = Agent(init_state, behavior, dynamic, execution, shape,
                 params.AddChild("agent"))
   params.Save("written_agents_param_test.json")
Ejemplo n.º 10
0
 def test_bark_agent(self):
     params = ParameterServer(
         filename="configurations/sac_highway/config.json")
     configuration = SACHighwayConfiguration(params)
     scenario_generator = configuration._scenario_generator
     scenario, idx = scenario_generator.get_next_scenario()
     world = scenario.get_world_state()
     dynamic_model = SingleTrackModel(params)
     bark_agent = BARKMLBehaviorModel(configuration, dynamic_model,
                                      scenario._eval_agent_ids)
     bark_agent.plan(0.2, world)
     bark_agent.plan(0.2, world)
     bark_agent.plan(0.2, world)
     new_agent = bark_agent.clone()
     new_agent.plan(0.2, world)
     new_agent.plan(0.2, world)
     new_agent.plan(0.2, world)
Ejemplo n.º 11
0
 def test_python_model(self):
     param_server = ParameterServer(
         filename="modules/runtime/tests/data/deterministic_scenario.json")
     scenario_generation = DeterministicScenarioGeneration(
         num_scenarios=3, random_seed=0, params=param_server)
     viewer = MPViewer(params=param_server,
                       follow_agent_id=False,
                       use_world_bounds=True)
     scenario, idx = scenario_generation.get_next_scenario()
     world = scenario.get_world_state()
     single_track_model = SingleTrackModel(param_server)
     behavior_model = PythonBehaviorModelWrapper(single_track_model,
                                                 param_server)
     world.GetAgent(0).behavior_model = behavior_model
     world.GetAgent(0).behavior_model.SetLastAction(
         np.array([1., 1.], dtype=np.float32))
     world.Step(0.2)
Ejemplo n.º 12
0
  def test_draw_agents(self):
    params = ParameterServer()
    behavior = BehaviorConstantVelocity(params)
    execution = ExecutionModelInterpolate(params)
    dynamic = SingleTrackModel(params)
    shape = Polygon2d([1.25, 1, 0], [
        Point2d(0, 0),
        Point2d(0, 2),
        Point2d(4, 2),
        Point2d(4, 0),
        Point2d(0, 0)
    ])
    shape2 = CarLimousine()

    init_state = [0, 3, 2, 1]
    init_state2 = [0, 0, 5, 4]

    agent = Agent(init_state, behavior, dynamic, execution, shape,
                  params.AddChild("agent"))
    agent2 = Agent(init_state2, behavior, dynamic, execution, shape2,
                    params.AddChild("agent"))
Ejemplo n.º 13
0
    def test_world(self):
        # create agent
        params = ParameterServer()
        behavior = BehaviorConstantVelocity(params)
        execution = ExecutionModelInterpolate(params)
        dynamic = SingleTrackModel(params)
        shape = Polygon2d([1.25, 1, 0], [
            Point2d(0, 0),
            Point2d(0, 2),
            Point2d(4, 2),
            Point2d(4, 0),
            Point2d(0, 0)
        ])
        init_state = np.array([0, 0, 0, 0, 5])
        agent = Agent(init_state, behavior, dynamic, execution, shape,
                      params.AddChild("agent"))
        road_map = OpenDriveMap()
        newXodrRoad = XodrRoad()
        newXodrRoad.id = 1
        newXodrRoad.name = "Autobahn A9"
        newPlanView = PlanView()
        newPlanView.AddLine(Point2d(0, 0), 1.57079632679, 10)
        newXodrRoad.plan_view = newPlanView
        line = newXodrRoad.plan_view.GetReferenceLine().ToArray()
        p = Point2d(line[-1][0], line[-1][1])
        newXodrRoad.plan_view.AddSpiral(p, 1.57079632679, 50.0, 0.0, 0.3, 0.4)
        line = newXodrRoad.plan_view.GetReferenceLine()
        lane_section = XodrLaneSection(0)
        lane = XodrLane()
        lane.line = line
        lane_section.AddLane(lane)
        newXodrRoad.AddLaneSection(lane_section)
        road_map.AddRoad(newXodrRoad)

        r = Roadgraph()
        map_interface = MapInterface()
        map_interface.SetOpenDriveMap(road_map)
        map_interface.SetRoadgraph(r)
        world = World(params)
        world.AddAgent(agent)
Ejemplo n.º 14
0
    def test_evaluator_drivable_area(self):
        # World Definition
        params = ParameterServer()
        world = World(params)

        # Model Definitions
        behavior_model = BehaviorConstantVelocity(params)
        execution_model = ExecutionModelInterpolate(params)
        dynamic_model = SingleTrackModel(params)

        # Map Definition
        map_interface = MapInterface()
        xodr_map = MakeXodrMapOneRoadTwoLanes()
        map_interface.SetOpenDriveMap(xodr_map)
        world.SetMap(map_interface)
        #open_drive_map = world.map.GetOpenDriveMap()

        #agent_2d_shape = CarLimousine()
        agent_2d_shape = Polygon2d(
            [1.25, 1, 0],
            [Point2d(-1, -1),
             Point2d(-1, 1),
             Point2d(3, 1),
             Point2d(3, -1)])
        init_state = np.array([0, 3, -1.75, 0, 5])
        agent_params = params.AddChild("agent1")
        goal_polygon = Polygon2d(
            [1, 1, 0],
            [Point2d(0, 0),
             Point2d(0, 2),
             Point2d(2, 2),
             Point2d(2, 0)])
        goal_polygon = goal_polygon.Translate(Point2d(50, -2))

        agent = Agent(
            init_state,
            behavior_model,
            dynamic_model,
            execution_model,
            agent_2d_shape,
            agent_params,
            GoalDefinitionPolygon(goal_polygon),  # goal_lane_id
            map_interface)
        world.AddAgent(agent)

        evaluator = EvaluatorDrivableArea()
        world.AddEvaluator("drivable_area", evaluator)

        info = world.Evaluate()
        self.assertFalse(info["drivable_area"])

        viewer = MPViewer(params=params, use_world_bounds=True)

        # Draw map
        viewer.drawGoalDefinition(goal_polygon,
                                  color=(1, 0, 0),
                                  alpha=0.5,
                                  facecolor=(1, 0, 0))
        viewer.drawWorld(world)
        viewer.drawRoadCorridor(agent.road_corridor)
        viewer.show(block=False)
Ejemplo n.º 15
0
    def test_uct_single_agent(self):
        try:
            from bark.models.behavior import BehaviorUCTSingleAgentMacroActions
        except:
            print("Rerun with --define planner_uct=true")
            return
        # World Definition
        scenario_param_file = "macro_actions_test.json"  # must be within examples params folder
        params = ParameterServer(filename=os.path.join(
            "modules/world/tests/params/", scenario_param_file))

        world = World(params)

        # Model Definitions
        behavior_model = BehaviorUCTSingleAgentMacroActions(params)
        execution_model = ExecutionModelInterpolate(params)
        dynamic_model = SingleTrackModel(params)

        behavior_model2 = BehaviorConstantVelocity(params)
        execution_model2 = ExecutionModelInterpolate(params)
        dynamic_model2 = SingleTrackModel(params)

        # Map Definition
        map_interface = MapInterface()
        xodr_map = MakeXodrMapOneRoadTwoLanes()
        map_interface.SetOpenDriveMap(xodr_map)
        world.SetMap(map_interface)

        # agent_2d_shape = CarLimousine()
        agent_2d_shape = CarRectangle()
        init_state = np.array([0, 3, -5.25, 0, 20])
        agent_params = params.AddChild("agent1")

        # goal_polygon = Polygon2d(
        #     [1, 1, 0], [Point2d(0, 0), Point2d(0, 2), Point2d(2, 2), Point2d(2, 0)])
        # goal_definition = GoalDefinitionPolygon(goal_polygon)
        # goal_polygon = goal_polygon.Translate(Point2d(90, -2))

        center_line = Line2d()
        center_line.AddPoint(Point2d(0.0, -1.75))
        center_line.AddPoint(Point2d(100.0, -1.75))

        max_lateral_dist = (0.4, 0.5)
        max_orientation_diff = (0.08, 0.1)
        velocity_range = (5.0, 20.0)
        goal_definition = GoalDefinitionStateLimitsFrenet(
            center_line, max_lateral_dist, max_orientation_diff,
            velocity_range)

        agent = Agent(init_state, behavior_model, dynamic_model,
                      execution_model, agent_2d_shape, agent_params,
                      goal_definition, map_interface)
        world.AddAgent(agent)

        init_state2 = np.array([0, 25, -5.25, 0, 0])
        agent2 = Agent(init_state2, behavior_model2, dynamic_model2,
                       execution_model2, agent_2d_shape, agent_params,
                       goal_definition, map_interface)
        world.AddAgent(agent2)

        # viewer
        viewer = MPViewer(params=params, use_world_bounds=True)

        # World Simulation
        sim_step_time = params["simulation"]["step_time",
                                             "Step-time in simulation", 0.2]
        sim_real_time_factor = params["simulation"][
            "real_time_factor", "execution in real-time or faster", 1]

        # Draw map
        video_renderer = VideoRenderer(renderer=viewer,
                                       world_step_time=sim_step_time)

        for _ in range(0, 5):
            world.Step(sim_step_time)
            viewer.clear()
            video_renderer.drawWorld(world)
            video_renderer.drawGoalDefinition(goal_definition)
            time.sleep(sim_step_time / sim_real_time_factor)

        video_renderer.export_video(filename="./test_video_intermediate",
                                    remove_image_dir=True)
Ejemplo n.º 16
0
    def test_python_behavior_model(self):
        # World Definition
        scenario_param_file = "macro_actions_test.json"  # must be within examples params folder
        params = ParameterServer(filename=os.path.join(
            "modules/world/tests/params/", scenario_param_file))

        world = World(params)

        # Define two behavior models one python one standard c++ model
        behavior_model = PythonDistanceBehavior(params)
        execution_model = ExecutionModelInterpolate(params)
        dynamic_model = SingleTrackModel(params)

        behavior_model2 = BehaviorConstantVelocity(params)
        execution_model2 = ExecutionModelInterpolate(params)
        dynamic_model2 = SingleTrackModel(params)

        # Define the map interface and load a testing map
        map_interface = MapInterface()
        xodr_map = MakeXodrMapOneRoadTwoLanes()
        map_interface.SetOpenDriveMap(xodr_map)
        world.SetMap(map_interface)

        # Define the agent shapes
        agent_2d_shape = CarRectangle()
        init_state = np.array([0, 3, -5.25, 0, 20])

        # Define the goal definition for agents
        center_line = Line2d()
        center_line.AddPoint(Point2d(0.0, -1.75))
        center_line.AddPoint(Point2d(100.0, -1.75))

        max_lateral_dist = (0.4, 0.5)
        max_orientation_diff = (0.08, 0.1)
        velocity_range = (5.0, 20.0)
        goal_definition = GoalDefinitionStateLimitsFrenet(
            center_line, max_lateral_dist, max_orientation_diff,
            velocity_range)

        # define two agents with the different behavior models
        agent_params = params.AddChild("agent1")
        agent = Agent(init_state, behavior_model, dynamic_model,
                      execution_model, agent_2d_shape, agent_params,
                      goal_definition, map_interface)
        world.AddAgent(agent)

        init_state2 = np.array([0, 25, -5.25, 0, 15])
        agent2 = Agent(init_state2, behavior_model2, dynamic_model2,
                       execution_model2, agent_2d_shape, agent_params,
                       goal_definition, map_interface)
        world.AddAgent(agent2)

        # viewer
        viewer = MPViewer(params=params, use_world_bounds=True)

        # World Simulation
        sim_step_time = params["simulation"]["step_time",
                                             "Step-time in simulation", 0.2]
        sim_real_time_factor = params["simulation"][
            "real_time_factor", "execution in real-time or faster", 1]

        # Draw map
        video_renderer = VideoRenderer(renderer=viewer,
                                       world_step_time=sim_step_time)

        for _ in range(0, 20):
            world.Step(sim_step_time)
            viewer.clear()
            video_renderer.drawWorld(world)
            video_renderer.drawGoalDefinition(goal_definition, "red", 0.5,
                                              "red")
            time.sleep(sim_step_time / sim_real_time_factor)

        video_renderer.export_video(filename="./test_video_intermediate",
                                    remove_image_dir=True)
Ejemplo n.º 17
0
    def test_one_agent_at_goal_state_limits_frenet(self):
        param_server = ParameterServer()
        # Model Definition
        behavior_model = BehaviorConstantVelocity(param_server)
        execution_model = ExecutionModelInterpolate(param_server)
        dynamic_model = SingleTrackModel(param_server)

        # Agent Definition
        agent_2d_shape = CarLimousine()
        agent_params = param_server.AddChild("agent1")

        center_line = Line2d()
        center_line.AddPoint(Point2d(5.0, 5.0))
        center_line.AddPoint(Point2d(10.0, 10.0))
        center_line.AddPoint(Point2d(20.0, 10.0))

        max_lateral_dist = (0.4, 1)
        max_orientation_diff = (0.08, 0.1)
        velocity_range = (20.0, 25.0)
        goal_definition = GoalDefinitionStateLimitsFrenet(
            center_line, max_lateral_dist, max_orientation_diff,
            velocity_range)

        # not at goal x,y, others yes
        agent1 = Agent(np.array([0, 6, 8, 3.14 / 4.0, velocity_range[0]]),
                       behavior_model, dynamic_model, execution_model,
                       agent_2d_shape, agent_params, goal_definition, None)

        # at goal x,y and others
        agent2 = Agent(np.array([0, 5.0, 5.5, 3.14 / 4.0, velocity_range[1]]),
                       behavior_model, dynamic_model, execution_model,
                       agent_2d_shape, agent_params, goal_definition, None)

        # not at goal x,y,v yes but not orientation
        agent3 = Agent(
            np.array(
                [0, 5, 5.5, 3.14 / 4.0 + max_orientation_diff[1] + 0.001,
                 20]), behavior_model, dynamic_model, execution_model,
            agent_2d_shape, agent_params, goal_definition, None)

        # not at goal x,y, orientation but not v
        agent4 = Agent(
            np.array([
                0, 5, 4.5, 3.14 / 4 - max_orientation_diff[0],
                velocity_range[0] - 0.01
            ]), behavior_model, dynamic_model, execution_model, agent_2d_shape,
            agent_params, goal_definition, None)

        # at goal x,y, at lateral limit
        agent5 = Agent(
            np.array([
                0, 15, 10 - max_lateral_dist[0] + 0.05, 0, velocity_range[1]
            ]), behavior_model, dynamic_model, execution_model, agent_2d_shape,
            agent_params, goal_definition, None)

        # not at goal x,y slightly out of lateral limit
        agent6 = Agent(
            np.array([
                0, 15, 10 + max_lateral_dist[0] + 0.05,
                3.14 / 4 + max_orientation_diff[0], velocity_range[0]
            ]), behavior_model, dynamic_model, execution_model, agent_2d_shape,
            agent_params, goal_definition, None)

        # not at goal x,y,v yes but not orientation
        agent7 = Agent(
            np.array(
                [0, 5, 5.5, 3.14 / 4.0 - max_orientation_diff[0] - 0.001,
                 20]), behavior_model, dynamic_model, execution_model,
            agent_2d_shape, agent_params, goal_definition, None)

        world = World(param_server)
        world.AddAgent(agent1)
        world.AddAgent(agent2)
        world.AddAgent(agent3)
        world.AddAgent(agent4)
        world.AddAgent(agent5)
        world.AddAgent(agent6)
        world.AddAgent(agent7)

        evaluator1 = EvaluatorGoalReached(agent1.id)
        evaluator2 = EvaluatorGoalReached(agent2.id)
        evaluator3 = EvaluatorGoalReached(agent3.id)
        evaluator4 = EvaluatorGoalReached(agent4.id)
        evaluator5 = EvaluatorGoalReached(agent5.id)
        evaluator6 = EvaluatorGoalReached(agent6.id)
        evaluator7 = EvaluatorGoalReached(agent7.id)
        world.AddEvaluator("success1", evaluator1)
        world.AddEvaluator("success2", evaluator2)
        world.AddEvaluator("success3", evaluator3)
        world.AddEvaluator("success4", evaluator4)
        world.AddEvaluator("success5", evaluator5)
        world.AddEvaluator("success6", evaluator6)
        world.AddEvaluator("success7", evaluator7)

        info = world.Evaluate()
        self.assertEqual(info["success1"], False)
        self.assertEqual(info["success2"], True)
        self.assertEqual(info["success3"], False)
        self.assertEqual(info["success4"], False)
        self.assertEqual(info["success5"], True)
        self.assertEqual(info["success6"], False)
        self.assertEqual(info["success7"], False)
Ejemplo n.º 18
0
from bark.geometry.standard_shapes import CarLimousine
from bark.geometry import Point2d, Polygon2d

# Parameters Definitions
param_server = ParameterServer(
    filename="examples/params/od8_const_vel_one_agent.json")
# set parameter that is accessible in Python as well as cpp
# param_server.setReal("wheel_base", 0.8)

# World Definition
world = World(param_server)

# Model Definitions
behavior_model = BehaviorConstantVelocity(param_server)
execution_model = ExecutionModelInterpolate(param_server)
dynamic_model = SingleTrackModel(param_server)

# Map Definition
xodr_parser = XodrParser("modules/runtime/tests/data/Crossing8Course.xodr")
map_interface = MapInterface()
map_interface.SetOpenDriveMap(xodr_parser.map)
world.SetMap(map_interface)

# Agent Definition
agent_2d_shape = CarLimousine()
init_state = np.array([0, -15, -13, 3.14 * 5.0 / 4.0, 10 / 3.6])
agent_params = param_server.addChild("agent1")
goal_polygon = Polygon2d(
    [0, 0, 0],
    [Point2d(-1, -1),
     Point2d(-1, 1),