def create_source_config_agents(self, agent_states, agent_geometries,
                                 behavior_models, execution_models,
                                 dynamic_models, goal_definitions,
                                 controlled_agent_ids, world, agent_params,
                                 **kwargs):
     num_agents = len(agent_states)
     if any(
             len(lst) != num_agents for lst in [
                 agent_geometries, behavior_models, execution_models,
                 dynamic_models, goal_definitions, controlled_agent_ids
             ]):
         raise ValueError(
             "Config readers did not return equal sized of lists")
     agents = []
     controlled_ids = []
     for idx, agent_state in enumerate(agent_states):
         bark_agent = Agent(np.array(agent_state), behavior_models[idx],
                            dynamic_models[idx], execution_models[idx],
                            agent_geometries[idx], agent_params,
                            goal_definitions[idx], world.map)
         if "agent_ids" in kwargs:
             bark_agent.SetAgentId(kwargs["agent_ids"][idx])
             if controlled_agent_ids[idx]:
                 controlled_ids.append(kwargs["agent_ids"][idx])
         else:
             bark_agent.SetAgentId(idx)
             if controlled_agent_ids[idx]:
                 controlled_ids.append(idx)
         agents.append(bark_agent)
     return agents, controlled_ids
    def test_number_of_agents(self):
        # World Definition
        params = ParameterServer()
        world = World(params)

        # Model Definitions
        behavior_model = BehaviorConstantAcceleration(params)
        execution_model = ExecutionModelInterpolate(params)
        dynamic_model = SingleTrackModel(params)

        behavior_model2 = BehaviorConstantAcceleration(params)
        execution_model2 = ExecutionModelInterpolate(params)
        dynamic_model2 = SingleTrackModel(params)

        # Map Definition
        map_interface = MapInterface()
        xodr_map = MakeXodrMapOneRoadTwoLanes()
        map_interface.SetOpenDriveMap(xodr_map)
        world.SetMap(map_interface)

        agent_2d_shape = CarLimousine()
        init_state = np.array([0, 13, -1.75, 0, 5])
        agent_params = params.AddChild("agent1")
        goal_polygon = Polygon2d(
            [1, 1, 0],
            [Point2d(0, 0),
             Point2d(0, 2),
             Point2d(2, 2),
             Point2d(2, 0)])
        goal_polygon = goal_polygon.Translate(Point2d(50, -2))

        agent = Agent(init_state, behavior_model, dynamic_model,
                      execution_model, agent_2d_shape, agent_params,
                      GoalDefinitionPolygon(goal_polygon), map_interface)
        world.AddAgent(agent)

        init_state2 = np.array([0, 16, -1.75, 0, 5])
        agent2 = Agent(init_state2, behavior_model2, dynamic_model2,
                       execution_model2, agent_2d_shape, agent_params,
                       GoalDefinitionPolygon(goal_polygon), map_interface)
        world.AddAgent(agent2)

        evaluator = EvaluatorNumberOfAgents(agent.id)
        world.AddEvaluator("num_agents", evaluator)

        info = world.Evaluate()
        self.assertEqual(info["num_agents"], len(world.agents))
        # do it once more
        self.assertEqual(info["num_agents"], len(world.agents))

        world.RemoveAgentById(agent2.id)
        info = world.Evaluate()
        # evaluator should still hold two
        self.assertNotEqual(info["num_agents"], len(world.agents))
        self.assertEqual(info["num_agents"], 2)

        world.Step(0.1)
        info = world.Evaluate()
        # evaluator should still hold two
        self.assertEqual(info["num_agents"], 2)
    def AgentFromTrackfile(self, track_params, param_server,
                           scenario_track_info, agent_id):
        if scenario_track_info.GetEgoTrackInfo().GetTrackId() == agent_id:
            agent_track_info = scenario_track_info.GetEgoTrackInfo()
        elif agent_id in scenario_track_info.GetOtherTrackInfos().keys():
            agent_track_info = scenario_track_info.GetOtherTrackInfos(
            )[agent_id]
        else:
            raise ValueError("unknown agent id {}".format(agent_id))
        fname = agent_track_info.GetFileName()  # track_params["filename"]
        track_id = agent_track_info.GetTrackId()  # track_params["track_id"]
        agent_id = agent_track_info.GetTrackId()
        track = self.TrackFromTrackfile(fname, track_id)
        start_time = scenario_track_info.GetStartTs()
        end_time = scenario_track_info.GetEndTs()

        behavior_model = track_params["behavior_model"]
        model_converter = ModelJsonConversion()
        if behavior_model is None:
            # each agent need's its own param server
            behavior = BehaviorFromTrack(
                track, param_server.AddChild("agent{}".format(agent_id)),
                start_time, end_time)
        else:
            behavior = behavior_model

        try:
            initial_state = InitStateFromTrack(track, start_time)
        except:
            raise ValueError(
                "Could not retrieve initial state of agent {} at t={}.".format(
                    agent_id, start_time))

        try:
            dynamic_model = model_converter.convert_model(
                track_params["dynamic_model"], param_server)
        except:
            raise ValueError("Could not create dynamic_model")

        try:
            execution_model = model_converter.convert_model(
                track_params["execution_model"], param_server)
        except:
            raise ValueError("Could not retrieve execution_model")

        try:
            vehicle_shape = ShapeFromTrack(
                track, param_server["DynamicModel"]["wheel_base", "Wheel base",
                                                    2.7])
        except:
            raise ValueError("Could not create vehicle_shape")

        bark_agent = Agent(initial_state, behavior, dynamic_model,
                           execution_model, vehicle_shape,
                           param_server.AddChild("agent{}".format(agent_id)),
                           GoalDefinitionFromTrack(track, end_time),
                           track_params["map_interface"])
        # set agent id from track
        bark_agent.SetAgentId(track_id)
        return bark_agent
Exemple #4
0
    def test_lane_change(self):
        # World Definition
        params = ParameterServer()
        world = World(params)

        # Model Definitions
        behavior_model = BehaviorMobil(params)
        execution_model = ExecutionModelInterpolate(params)
        dynamic_model = SingleTrackModel(params)

        behavior_model2 = BehaviorIDMLaneTracking(params)
        execution_model2 = ExecutionModelInterpolate(params)
        dynamic_model2 = SingleTrackModel(params)

        # Map Definition
        map_interface = MapInterface()
        xodr_map = MakeXodrMapOneRoadTwoLanes()
        map_interface.SetOpenDriveMap(xodr_map)
        world.SetMap(map_interface)

        #agent_2d_shape = CarLimousine()
        agent_2d_shape = CarRectangle()
        init_state = np.array([0, 3, -1.75, 0, 5])
        agent_params = params.AddChild("agent1")
        goal_polygon = Polygon2d(
            [1, 1, 0],
            [Point2d(0, 0),
             Point2d(0, 2),
             Point2d(2, 2),
             Point2d(2, 0)])
        goal_polygon = goal_polygon.Translate(Point2d(50, -2))

        agent = Agent(init_state, behavior_model, dynamic_model,
                      execution_model, agent_2d_shape, agent_params,
                      GoalDefinitionPolygon(goal_polygon), map_interface)
        world.AddAgent(agent)

        init_state2 = np.array([0, 15, -1.75, 0, 2])
        agent2 = Agent(init_state2, behavior_model2, dynamic_model2,
                       execution_model2, agent_2d_shape, agent_params,
                       GoalDefinitionPolygon(goal_polygon), map_interface)
        world.AddAgent(agent2)

        # viewer
        viewer = MPViewer(params=params, use_world_bounds=True)

        # World Simulation
        sim_step_time = params["simulation"]["step_time",
                                             "Step-time in simulation", 0.05]
        sim_real_time_factor = params["simulation"][
            "real_time_factor", "execution in real-time or faster", 100]

        # Draw map
        for _ in range(0, 10):
            viewer.clear()
            world.Step(sim_step_time)
            viewer.drawWorld(world)
            viewer.show(block=False)
            time.sleep(sim_step_time / sim_real_time_factor)
Exemple #5
0
def make_initial_world(primitives):
    # must be within examples params folder
    params = ParameterServer()
    world = World(params)

    # Define two behavior models
    behavior_model = BehaviorMPContinuousActions(params)
    primitive_mapping = {}
    for prim in primitives:
        idx = behavior_model.AddMotionPrimitive(
            np.array(prim))  # adding action
        primitive_mapping[idx] = prim

    behavior_model.ActionToBehavior(0)  # setting initial action

    execution_model = ExecutionModelInterpolate(params)
    dynamic_model = SingleTrackModel(params)

    behavior_model2 = BehaviorConstantVelocity(params)
    execution_model2 = ExecutionModelInterpolate(params)
    dynamic_model2 = SingleTrackModel(params)

    # Define the map interface and load a testing map
    map_interface = MapInterface()
    xodr_map = MakeXodrMapOneRoadTwoLanes()
    map_interface.SetOpenDriveMap(xodr_map)
    world.SetMap(map_interface)

    # Define the agent shapes
    agent_2d_shape = CarRectangle()
    init_state = np.array([0, 3, -5.25, 0, 20])

    # Define the goal definition for agents
    center_line = Line2d()
    center_line.AddPoint(Point2d(0.0, -1.75))
    center_line.AddPoint(Point2d(100.0, -1.75))

    max_lateral_dist = (0.4, 0.5)
    max_orientation_diff = (0.08, 0.1)
    velocity_range = (5.0, 20.0)
    goal_definition = GoalDefinitionStateLimitsFrenet(center_line,
                                                      max_lateral_dist,
                                                      max_orientation_diff,
                                                      velocity_range)

    # define two agents with the different behavior models
    agent_params = params.AddChild("agent1")
    agent = Agent(init_state, behavior_model, dynamic_model, execution_model,
                  agent_2d_shape, agent_params, goal_definition, map_interface)
    world.AddAgent(agent)

    init_state2 = np.array([0, 25, -5.25, 0, 15])
    agent2 = Agent(init_state2, behavior_model2, dynamic_model2,
                   execution_model2, agent_2d_shape, agent_params,
                   goal_definition, map_interface)
    world.AddAgent(agent2)

    return world
    def test_gap_distance_front(self):
        # World Definition
        params = ParameterServer()
        world = World(params)

        gap = 10

        # Model Definitions
        behavior_model = BehaviorConstantAcceleration(params)
        execution_model = ExecutionModelInterpolate(params)
        dynamic_model = SingleTrackModel(params)

        behavior_model2 = BehaviorConstantAcceleration(params)
        execution_model2 = ExecutionModelInterpolate(params)
        dynamic_model2 = SingleTrackModel(params)

        # Map Definition
        map_interface = MapInterface()
        xodr_map = MakeXodrMapOneRoadTwoLanes()
        map_interface.SetOpenDriveMap(xodr_map)
        world.SetMap(map_interface)

        agent_2d_shape = CarLimousine()
        init_state = np.array([0, 13, -1.75, 0, 5])
        agent_params = params.AddChild("agent1")
        goal_polygon = Polygon2d(
            [1, 1, 0],
            [Point2d(0, 0),
             Point2d(0, 2),
             Point2d(2, 2),
             Point2d(2, 0)])
        goal_polygon = goal_polygon.Translate(Point2d(50, -2))

        agent = Agent(init_state, behavior_model, dynamic_model,
                      execution_model, agent_2d_shape, agent_params,
                      GoalDefinitionPolygon(goal_polygon), map_interface)
        world.AddAgent(agent)

        init_state2 = np.array([0, 13 + gap, -1.75, 0, 5])
        agent2 = Agent(init_state2, behavior_model2, dynamic_model2,
                       execution_model2, agent_2d_shape, agent_params,
                       GoalDefinitionPolygon(goal_polygon), map_interface)
        world.AddAgent(agent2)

        world.Step(0.1)

        evaluator = EvaluatorGapDistanceFront(agent.id)
        world.AddEvaluator("gap", evaluator)

        info = world.Evaluate()
        self.assertAlmostEqual(info["gap"],
                               gap - agent_2d_shape.front_dist -
                               agent_2d_shape.rear_dist,
                               places=4)
Exemple #7
0
    def test_lateral_same_direction(self):
        map = "bark/runtime/tests/data/city_highway_straight.xodr"
        map_interface = EvaluatorRSSTests.load_map(map)
        world = self.defaults["world"].Copy()
        world.SetMap(map_interface)

        goal_polygon_1 = Polygon2d(
            [0, 0, 0],
            [Point2d(-1, -1),
             Point2d(-1, 1),
             Point2d(1, 1),
             Point2d(1, -1)])
        goal_polygon_1 = goal_polygon_1.Translate(Point2d(5.5, 120))

        goal_polygon_2 = Polygon2d(
            [0, 0, 0],
            [Point2d(-1, -1),
             Point2d(-1, 1),
             Point2d(1, 1),
             Point2d(1, -1)])
        goal_polygon_2 = goal_polygon_2.Translate(Point2d(1.8, 120))

        # Hard coded
        ego_state = np.array([0, 5.5, 10, 0, 10])
        other_state = np.array([0, 1.8, -10, 0, 15])

        ego = Agent(ego_state, self.defaults["ego_behavior"],
                    self.defaults["ego_dynamic"],
                    self.defaults["ego_execution"], self.defaults["ego_shape"],
                    self.defaults["agent_params"],
                    GoalDefinitionPolygon(goal_polygon_1), map_interface)
        other = Agent(other_state, self.defaults["other_behavior"],
                      self.defaults["other_dynamic"],
                      self.defaults["other_execution"],
                      self.defaults["other_shape"],
                      self.defaults["agent_params"],
                      GoalDefinitionPolygon(goal_polygon_2), map_interface)

        world.AddAgent(ego)
        world.AddAgent(other)

        evaluator_rss = EvaluatorRss(ego.id, map,
                                     self.defaults["default_vehicle_dynamics"])

        for _ in range(7):
            world.Step(1)
            self.assertEqual(
                True,
                evaluator_rss.PairwiseDirectionalEvaluate(world)[other.id][1])
Exemple #8
0
    def test_one_agent_at_goal_state_limits(self):
        param_server = ParameterServer()
        # Model Definition
        behavior_model = BehaviorConstantVelocity(param_server)
        execution_model = ExecutionModelInterpolate(param_server)
        dynamic_model = SingleTrackModel(param_server)

        # Agent Definition
        agent_2d_shape = CarLimousine()
        init_state = np.array(
            [0, -191.789, -50.1725, 3.14 * 3.0 / 4.0, 150 / 3.6])
        agent_params = param_server.AddChild("agent1")
        goal_polygon = Polygon2d(
            [0, 0, 0],
            [Point2d(-1, -1),
             Point2d(-1, 1),
             Point2d(1, 1),
             Point2d(1, -1)])
        goal_polygon = goal_polygon.Translate(Point2d(-191.789, -50.1725))

        agent = Agent(
            init_state, behavior_model, dynamic_model, execution_model,
            agent_2d_shape, agent_params,
            GoalDefinitionStateLimits(
                goal_polygon,
                (3.14 * 3.0 / 4.0 - 0.08, 3.14 * 3.0 / 4.0 + 0.08)), None)

        world = World(param_server)
        world.AddAgent(agent)
        evaluator = EvaluatorGoalReached(agent.id)
        world.AddEvaluator("success", evaluator)

        info = world.Evaluate()
        self.assertEqual(info["success"], True)
Exemple #9
0
  def test_planning_time(self):
    param_server = ParameterServer()
    # Model Definition
    behavior_model = BehaviorConstantAcceleration(param_server)
    execution_model = ExecutionModelInterpolate(param_server)
    dynamic_model = SingleTrackModel(param_server)

    # Agent Definition
    agent_2d_shape = CarLimousine()
    init_state = np.array([0, -191.789,-50.1725, 3.14*3.0/4.0, 150/3.6])
    agent_params = param_server.AddChild("agent1")
    goal_polygon = Polygon2d([0, 0, 0],
                             [Point2d(-4,-4),
                              Point2d(-4,4),
                              Point2d(4,4),
                              Point2d(4,-4)])
    goal_polygon = goal_polygon.Translate(Point2d(-191.789,-50.1725))

    agent = Agent(init_state,
                behavior_model,
                dynamic_model,
                execution_model,
                agent_2d_shape,
                agent_params,
                GoalDefinitionPolygon(goal_polygon),
                  None)

    world = World(param_server)
    world.AddAgent(agent)
    evaluator = EvaluatorPlanningTime(agent.id)
    world.AddEvaluator("time", evaluator)


    info = world.Evaluate()
    self.assertEqual(info["time"], 0.0)
Exemple #10
0
    def test_longitude_ego_follow_other(self):
        map = "bark/runtime/tests/data/city_highway_straight.xodr"
        map_interface = EvaluatorRSSTests.load_map(map)
        world = self.defaults["world"].Copy()
        world.SetMap(map_interface)

        goal_polygon = Polygon2d(
            [0, 0, 0],
            [Point2d(-1, -1),
             Point2d(-1, 1),
             Point2d(1, 1),
             Point2d(1, -1)])
        goal_polygon = goal_polygon.Translate(Point2d(1.8, 120))

        # The safety distance seems more conservative than in the paper
        # Hard coded
        ego_state = np.array([0, 1.8, -114.9, 0, 10])
        other_state = np.array([0, 1.8, -72.95, 0, 7])

        ego = Agent(ego_state, self.defaults["ego_behavior"],
                    self.defaults["ego_dynamic"],
                    self.defaults["ego_execution"], self.defaults["ego_shape"],
                    self.defaults["agent_params"],
                    GoalDefinitionPolygon(goal_polygon), map_interface)
        other = Agent(other_state, self.defaults["other_behavior"],
                      self.defaults["other_dynamic"],
                      self.defaults["other_execution"],
                      self.defaults["other_shape"],
                      self.defaults["agent_params"],
                      GoalDefinitionPolygon(goal_polygon), map_interface)

        world.AddAgent(ego)
        world.AddAgent(other)

        evaluator_rss = EvaluatorRss(ego.id,
                                     map,
                                     self.defaults["default_vehicle_dynamics"],
                                     checking_relevent_range=1.5)
        world.Step(0.01)
        self.assertEqual(
            True,
            evaluator_rss.PairwiseDirectionalEvaluate(world)[other.id][0])
        world.Step(0.01)
        self.assertEqual(
            False,
            evaluator_rss.PairwiseDirectionalEvaluate(world)[other.id][0])
Exemple #11
0
    def test_lateral_merging(self):
        map = "bark/runtime/tests/data/DR_DEU_Merging_MT_v01_centered.xodr"
        map_interface = EvaluatorRSSTests.load_map(map)
        world = self.defaults["world"].Copy()
        world.SetMap(map_interface)

        goal_polygon = Polygon2d(
            [0, 0, 0],
            [Point2d(-1, -1),
             Point2d(-1, 1),
             Point2d(1, 1),
             Point2d(1, -1)])
        goal_polygon = goal_polygon.Translate(Point2d(-16, 108))

        # Hard coded
        ego_state = np.array([0, 68, 108, 0, 5])
        other_state = np.array([0, 64, 104, 0, 5])

        ego = Agent(ego_state, self.defaults["ego_behavior"],
                    self.defaults["ego_dynamic"],
                    self.defaults["ego_execution"], self.defaults["ego_shape"],
                    self.defaults["agent_params"],
                    GoalDefinitionPolygon(goal_polygon), map_interface)
        other = Agent(other_state, self.defaults["other_behavior"],
                      self.defaults["other_dynamic"],
                      self.defaults["other_execution"],
                      self.defaults["other_shape"],
                      self.defaults["agent_params"],
                      GoalDefinitionPolygon(goal_polygon), map_interface)

        world.AddAgent(ego)
        world.AddAgent(other)

        evaluator_rss = EvaluatorRss(ego.id, map,
                                     self.defaults["default_vehicle_dynamics"])
        world.Step(1)
        self.assertEqual(
            True,
            evaluator_rss.PairwiseDirectionalEvaluate(world)[other.id][1])

        world.Step(1)
        self.assertEqual(
            False,
            evaluator_rss.PairwiseDirectionalEvaluate(world)[other.id][1])
Exemple #12
0
  def test_one_agent_at_goal_sequential(self):
    param_server = ParameterServer()
    # Model Definition
    dynamic_model = SingleTrackModel(param_server)
    behavior_model = BehaviorMPContinuousActions(param_server)
    idx = behavior_model.AddMotionPrimitive(np.array([1, 0]))
    behavior_model.ActionToBehavior(idx)
    execution_model = ExecutionModelInterpolate(param_server)


    # Agent Definition
    agent_2d_shape = CarLimousine()
    init_state = np.array([0, 0, 0, 0, 0])
    agent_params = param_server.AddChild("agent1")
    goal_frame = Polygon2d([0, 0, 0],
                             [Point2d(-1,-1),
                              Point2d(-1,1),
                              Point2d(1,1),
                              Point2d(1,-1)])

    goal_polygon1 = goal_frame.Translate(Point2d(10, 0))
    goal_polygon2 = goal_frame.Translate(Point2d(20, 0))
    goal_polygon3 = goal_frame.Translate(Point2d(30, 0))

    goal_def1 = GoalDefinitionStateLimits(goal_polygon1, [-0.08, 0.08])
    goal_def2 = GoalDefinitionStateLimits(goal_polygon2, [-0.08, 0.08])
    goal_def3 = GoalDefinitionStateLimits(goal_polygon3, [-0.08, 0.08])

    goal_definition = GoalDefinitionSequential([goal_def1,
                                                goal_def2,
                                                goal_def3])

    self.assertEqual(len(goal_definition.sequential_goals),3)
    agent = Agent(init_state,
                behavior_model,
                dynamic_model,
                execution_model,
                agent_2d_shape,
                agent_params,
                goal_definition,
                  None)

    world = World(param_server)
    world.AddAgent(agent)
    evaluator = EvaluatorGoalReached(agent.id)
    world.AddEvaluator("success", evaluator)

    # just drive with the single motion primitive should be successful 
    for _ in range(0,1000):
        world.Step(0.2)
        info = world.Evaluate()
        if info["success"]:
            break
    
    self.assertEqual(info["success"], True)
    self.assertAlmostEqual(agent.state[int(StateDefinition.X_POSITION)], 30, delta=0.5)
Exemple #13
0
    def test_draw_agents(self):
        params = ParameterServer()
        behavior = BehaviorConstantAcceleration(params)
        execution = ExecutionModelInterpolate(params)
        dynamic = SingleTrackModel(params)
        shape = Polygon2d([1.25, 1, 0], [
            Point2d(0, 0),
            Point2d(0, 2),
            Point2d(4, 2),
            Point2d(4, 0),
            Point2d(0, 0)
        ])
        shape2 = CarLimousine()

        init_state = [0, 3, 2, 1]
        init_state2 = [0, 0, 5, 4]

        agent = Agent(init_state, behavior, dynamic, execution, shape,
                      params.AddChild("agent"))
        agent2 = Agent(init_state2, behavior, dynamic, execution, shape2,
                       params.AddChild("agent"))
Exemple #14
0
 def test_write_params_agent(self):
     params = ParameterServer()
     behavior = BehaviorConstantAcceleration(params)
     execution = ExecutionModelInterpolate(params)
     dynamic = SingleTrackModel(params)
     shape = Polygon2d([1.25, 1, 0], [
         Point2d(0, 0),
         Point2d(0, 2),
         Point2d(4, 2),
         Point2d(4, 0),
         Point2d(0, 0)
     ])
     init_state = np.zeros(4)
     agent = Agent(init_state, behavior, dynamic, execution, shape,
                   params.AddChild("agent"))
     params.Save("written_agents_param_test.json")
Exemple #15
0
    def test_world(self):
        # create agent
        params = ParameterServer()
        behavior = BehaviorConstantAcceleration(params)
        execution = ExecutionModelInterpolate(params)
        dynamic = SingleTrackModel(params)
        shape = Polygon2d([1.25, 1, 0], [
            Point2d(0, 0),
            Point2d(0, 2),
            Point2d(4, 2),
            Point2d(4, 0),
            Point2d(0, 0)
        ])
        init_state = np.array([0, 0, 0, 0, 5])
        agent = Agent(init_state, behavior, dynamic, execution, shape,
                      params.AddChild("agent"))
        road_map = OpenDriveMap()
        newXodrRoad = XodrRoad()
        newXodrRoad.id = 1
        newXodrRoad.name = "Autobahn A9"
        newPlanView = PlanView()
        newPlanView.AddLine(Point2d(0, 0), 1.57079632679, 10)
        newXodrRoad.plan_view = newPlanView
        line = newXodrRoad.plan_view.GetReferenceLine().ToArray()
        p = Point2d(line[-1][0], line[-1][1])
        newXodrRoad.plan_view.AddSpiral(p, 1.57079632679, 50.0, 0.0, 0.3, 0.4)
        line = newXodrRoad.plan_view.GetReferenceLine()
        lane_section = XodrLaneSection(0)
        lane = XodrLane()
        lane.line = line
        lane_section.AddLane(lane)
        newXodrRoad.AddLaneSection(lane_section)
        road_map.AddRoad(newXodrRoad)

        r = Roadgraph()
        map_interface = MapInterface()
        map_interface.SetOpenDriveMap(road_map)
        map_interface.SetRoadgraph(r)
        world = World(params)
        world.AddAgent(agent)
Exemple #16
0
    def AgentFromTrackfile(self, track_params, param_server,
                           scenario_track_info, agent_id, goal_def):

        if scenario_track_info.GetEgoTrackInfo().GetTrackId() == agent_id:
            agent_track_info = scenario_track_info.GetEgoTrackInfo()
        elif agent_id in scenario_track_info.GetOtherTrackInfos().keys():
            agent_track_info = scenario_track_info.GetOtherTrackInfos(
            )[agent_id]
        else:
            raise ValueError("unknown agent id {}".format(agent_id))
        fname = agent_track_info.GetFileName()  # track_params["filename"]
        track_id = agent_track_info.GetTrackId()  # track_params["track_id"]
        agent_id = agent_track_info.GetTrackId()
        track = self.TrackFromTrackfile(fname, track_id)

        xy_offset = scenario_track_info.GetXYOffset()

        # create behavior model from track, we use start time of scenario here
        start_time = scenario_track_info.GetStartTimeMs()
        end_time = scenario_track_info.GetEndTimeMs()
        behavior_model = track_params["behavior_model"]
        model_converter = ModelJsonConversion()
        if behavior_model is None:
            # each agent need's its own param server
            behavior = BehaviorFromTrack(
                track, param_server.AddChild("agent{}".format(agent_id)),
                xy_offset, start_time, end_time)
        else:
            behavior = behavior_model

        # retrieve initial state of valid agent
        if agent_id in scenario_track_info._other_agents_track_infos:
            start_time_init_state = max(
                scenario_track_info._other_agents_track_infos[agent_id].
                GetStartTimeMs(), start_time)
        else:
            start_time_init_state = start_time
        try:
            initial_state = InitStateFromTrack(track, xy_offset,
                                               start_time_init_state)
        except:
            raise ValueError(
                "Could not retrieve initial state of agent {} at t={}.".format(
                    agent_id, start_time_init_state))

        if self._use_shape_from_track:
            wb = WheelbaseFromTrack(track)
            crad = ColRadiusFromTrack(track)
        else:
            wb = self._wb
            crad = self._crad

        param_server["DynamicModel"]["wheel_base"] = wb
        try:
            dynamic_model = model_converter.convert_model(
                track_params["dynamic_model"], param_server)
        except:
            raise ValueError("Could not create dynamic_model")

        try:
            execution_model = model_converter.convert_model(
                track_params["execution_model"], param_server)
        except:
            raise ValueError("Could not retrieve execution_model")

        try:
            if self._use_rectangle_shape:
                vehicle_shape = GenerateCarRectangle(wb, crad)
            else:
                vehicle_shape = GenerateCarLimousine(wb, crad)
        except:
            raise ValueError("Could not create vehicle_shape")

        if goal_def is None:
            goal_def = GoalDefinitionFromTrack(track,
                                               end_time,
                                               xy_offset=xy_offset)

        bark_agent = Agent(initial_state, behavior, dynamic_model,
                           execution_model, vehicle_shape,
                           param_server.AddChild("agent{}".format(agent_id)),
                           goal_def, track_params["map_interface"])
        # set agent id from track
        bark_agent.SetAgentId(track_id)
        return bark_agent
Exemple #17
0
    def test_relevent_agents(self):
        map = "bark/runtime/tests/data/city_highway_straight.xodr"
        map_interface = EvaluatorRSSTests.load_map(map)
        world = self.defaults["world"].Copy()
        world.SetMap(map_interface)

        goal_polygon_1 = Polygon2d(
            [0, 0, 0],
            [Point2d(-1, -1),
             Point2d(-1, 1),
             Point2d(1, 1),
             Point2d(1, -1)])
        goal_polygon_1 = goal_polygon_1.Translate(Point2d(5.5, 120))

        goal_polygon_2 = Polygon2d(
            [0, 0, 0],
            [Point2d(-1, -1),
             Point2d(-1, 1),
             Point2d(1, 1),
             Point2d(1, -1)])
        goal_polygon_2 = goal_polygon_2.Translate(Point2d(1.8, 120))

        # Hard coded
        ego_state = np.array([0, 5.5, 10, 0, 10])
        other_1_state = np.array([0, 1.8, -10, 0, 15])
        other_2_state = np.array([0, 1.8, -120, 0, 10])

        ego = Agent(ego_state, self.defaults["ego_behavior"],
                    self.defaults["ego_dynamic"],
                    self.defaults["ego_execution"], self.defaults["ego_shape"],
                    self.defaults["agent_params"],
                    GoalDefinitionPolygon(goal_polygon_1), map_interface)
        other_1 = Agent(other_1_state, self.defaults["other_behavior"],
                        self.defaults["other_dynamic"],
                        self.defaults["other_execution"],
                        self.defaults["other_shape"],
                        self.defaults["agent_params"],
                        GoalDefinitionPolygon(goal_polygon_2), map_interface)

        other_2_behavior = BehaviorConstantAcceleration(
            self.defaults["agent_params"])
        other_2_dynamic = SingleTrackModel(self.defaults["agent_params"])
        other_2_execution = ExecutionModelInterpolate(
            self.defaults["agent_params"])

        other_2 = Agent(other_2_state, other_2_behavior, other_2_dynamic,
                        other_2_execution, self.defaults["other_shape"],
                        self.defaults["agent_params"],
                        GoalDefinitionPolygon(goal_polygon_2), map_interface)

        world.AddAgent(ego)
        world.AddAgent(other_1)
        world.AddAgent(other_2)

        evaluator_rss = EvaluatorRss(ego.id, map,
                                     self.defaults["default_vehicle_dynamics"])
        world.Step(1)
        responses = evaluator_rss.PairwiseEvaluate(world)

        self.assertEqual(1, len(responses))  # Test GetRelevantAgents
        self.assertTrue(responses[other_1.id])
        self.assertFalse(other_2.id in responses)
Exemple #18
0
    def test_evaluator_drivable_area(self):
        # World Definition
        params = ParameterServer()
        world = World(params)

        # Model Definitions
        behavior_model = BehaviorConstantAcceleration(params)
        execution_model = ExecutionModelInterpolate(params)
        dynamic_model = SingleTrackModel(params)

        # Map Definition
        map_interface = MapInterface()
        xodr_map = MakeXodrMapOneRoadTwoLanes()
        map_interface.SetOpenDriveMap(xodr_map)
        world.SetMap(map_interface)
        #open_drive_map = world.map.GetOpenDriveMap()

        #agent_2d_shape = CarLimousine()
        agent_2d_shape = Polygon2d(
            [1.25, 1, 0],
            [Point2d(-1, -1),
             Point2d(-1, 1),
             Point2d(3, 1),
             Point2d(3, -1)])
        init_state = np.array([0, 3, -1.75, 0, 5])
        agent_params = params.AddChild("agent1")
        goal_polygon = Polygon2d(
            [1, 1, 0],
            [Point2d(0, 0),
             Point2d(0, 2),
             Point2d(2, 2),
             Point2d(2, 0)])
        goal_polygon = goal_polygon.Translate(Point2d(50, -2))

        agent = Agent(
            init_state,
            behavior_model,
            dynamic_model,
            execution_model,
            agent_2d_shape,
            agent_params,
            GoalDefinitionPolygon(goal_polygon),  # goal_lane_id
            map_interface)
        world.AddAgent(agent)

        evaluator = EvaluatorDrivableArea()
        world.AddEvaluator("drivable_area", evaluator)

        info = world.Evaluate()
        self.assertFalse(info["drivable_area"])

        viewer = MPViewer(params=params, use_world_bounds=True)

        # Draw map
        viewer.drawGoalDefinition(goal_polygon,
                                  color=(1, 0, 0),
                                  alpha=0.5,
                                  facecolor=(1, 0, 0))
        viewer.drawWorld(world)
        viewer.drawRoadCorridor(agent.road_corridor)
        viewer.show(block=False)
Exemple #19
0
    def test_one_agent_at_goal_state_limits_frenet(self):
        param_server = ParameterServer()
        # Model Definition
        behavior_model = BehaviorConstantVelocity(param_server)
        execution_model = ExecutionModelInterpolate(param_server)
        dynamic_model = SingleTrackModel(param_server)

        # Agent Definition
        agent_2d_shape = CarLimousine()
        agent_params = param_server.AddChild("agent1")

        center_line = Line2d()
        center_line.AddPoint(Point2d(5.0, 5.0))
        center_line.AddPoint(Point2d(10.0, 10.0))
        center_line.AddPoint(Point2d(20.0, 10.0))

        max_lateral_dist = (0.4, 1)
        max_orientation_diff = (0.08, 0.1)
        velocity_range = (20.0, 25.0)
        goal_definition = GoalDefinitionStateLimitsFrenet(
            center_line, max_lateral_dist, max_orientation_diff,
            velocity_range)

        # not at goal x,y, others yes
        agent1 = Agent(np.array([0, 6, 8, 3.14 / 4.0, velocity_range[0]]),
                       behavior_model, dynamic_model, execution_model,
                       agent_2d_shape, agent_params, goal_definition, None)

        # at goal x,y and others
        agent2 = Agent(np.array([0, 5.0, 5.5, 3.14 / 4.0, velocity_range[1]]),
                       behavior_model, dynamic_model, execution_model,
                       agent_2d_shape, agent_params, goal_definition, None)

        # not at goal x,y,v yes but not orientation
        agent3 = Agent(
            np.array(
                [0, 5, 5.5, 3.14 / 4.0 + max_orientation_diff[1] + 0.001,
                 20]), behavior_model, dynamic_model, execution_model,
            agent_2d_shape, agent_params, goal_definition, None)

        # not at goal x,y, orientation but not v
        agent4 = Agent(
            np.array([
                0, 5, 4.5, 3.14 / 4 - max_orientation_diff[0],
                velocity_range[0] - 0.01
            ]), behavior_model, dynamic_model, execution_model, agent_2d_shape,
            agent_params, goal_definition, None)

        # at goal x,y, at lateral limit
        agent5 = Agent(
            np.array([
                0, 15, 10 - max_lateral_dist[0] + 0.05, 0, velocity_range[1]
            ]), behavior_model, dynamic_model, execution_model, agent_2d_shape,
            agent_params, goal_definition, None)

        # not at goal x,y slightly out of lateral limit
        agent6 = Agent(
            np.array([
                0, 15, 10 + max_lateral_dist[0] + 0.05,
                3.14 / 4 + max_orientation_diff[0], velocity_range[0]
            ]), behavior_model, dynamic_model, execution_model, agent_2d_shape,
            agent_params, goal_definition, None)

        # not at goal x,y,v yes but not orientation
        agent7 = Agent(
            np.array(
                [0, 5, 5.5, 3.14 / 4.0 - max_orientation_diff[0] - 0.001,
                 20]), behavior_model, dynamic_model, execution_model,
            agent_2d_shape, agent_params, goal_definition, None)

        world = World(param_server)
        world.AddAgent(agent1)
        world.AddAgent(agent2)
        world.AddAgent(agent3)
        world.AddAgent(agent4)
        world.AddAgent(agent5)
        world.AddAgent(agent6)
        world.AddAgent(agent7)

        evaluator1 = EvaluatorGoalReached(agent1.id)
        evaluator2 = EvaluatorGoalReached(agent2.id)
        evaluator3 = EvaluatorGoalReached(agent3.id)
        evaluator4 = EvaluatorGoalReached(agent4.id)
        evaluator5 = EvaluatorGoalReached(agent5.id)
        evaluator6 = EvaluatorGoalReached(agent6.id)
        evaluator7 = EvaluatorGoalReached(agent7.id)
        world.AddEvaluator("success1", evaluator1)
        world.AddEvaluator("success2", evaluator2)
        world.AddEvaluator("success3", evaluator3)
        world.AddEvaluator("success4", evaluator4)
        world.AddEvaluator("success5", evaluator5)
        world.AddEvaluator("success6", evaluator6)
        world.AddEvaluator("success7", evaluator7)

        info = world.Evaluate()
        self.assertEqual(info["success1"], False)
        self.assertEqual(info["success2"], True)
        self.assertEqual(info["success3"], False)
        self.assertEqual(info["success4"], False)
        self.assertEqual(info["success5"], True)
        self.assertEqual(info["success6"], False)
        self.assertEqual(info["success7"], False)
Exemple #20
0
    def test_python_behavior_model(self):
        # World Definition
        scenario_param_file = "macro_actions_test.json"  # must be within examples params folder
        params = ParameterServer(filename=os.path.join(
            os.path.dirname(__file__), "params/", scenario_param_file))

        world = World(params)

        # Define two behavior models one python one standard c++ model
        behavior_model = PythonDistanceBehavior(params)
        execution_model = ExecutionModelInterpolate(params)
        dynamic_model = SingleTrackModel(params)

        behavior_model2 = BehaviorConstantAcceleration(params)
        execution_model2 = ExecutionModelInterpolate(params)
        dynamic_model2 = SingleTrackModel(params)

        # Define the map interface and load a testing map
        map_interface = MapInterface()
        xodr_map = MakeXodrMapOneRoadTwoLanes()
        map_interface.SetOpenDriveMap(xodr_map)
        world.SetMap(map_interface)

        # Define the agent shapes
        agent_2d_shape = CarRectangle()
        init_state = np.array([0, 3, -5.25, 0, 20])

        # Define the goal definition for agents
        center_line = Line2d()
        center_line.AddPoint(Point2d(0.0, -1.75))
        center_line.AddPoint(Point2d(100.0, -1.75))

        max_lateral_dist = (0.4, 0.5)
        max_orientation_diff = (0.08, 0.1)
        velocity_range = (5.0, 20.0)
        goal_definition = GoalDefinitionStateLimitsFrenet(
            center_line, max_lateral_dist, max_orientation_diff,
            velocity_range)

        # define two agents with the different behavior models
        agent_params = params.AddChild("agent1")
        agent = Agent(init_state, behavior_model, dynamic_model,
                      execution_model, agent_2d_shape, agent_params,
                      goal_definition, map_interface)
        world.AddAgent(agent)

        init_state2 = np.array([0, 25, -5.25, 0, 15])
        agent2 = Agent(init_state2, behavior_model2, dynamic_model2,
                       execution_model2, agent_2d_shape, agent_params,
                       goal_definition, map_interface)
        world.AddAgent(agent2)

        # viewer
        viewer = MPViewer(params=params, use_world_bounds=True)

        # World Simulation
        sim_step_time = params["simulation"]["step_time",
                                             "Step-time in simulation", 0.2]
        sim_real_time_factor = params["simulation"][
            "real_time_factor", "execution in real-time or faster", 1]

        # Draw map
        video_renderer = VideoRenderer(renderer=viewer,
                                       world_step_time=sim_step_time)

        for _ in range(0, 20):
            world.Step(sim_step_time)
            viewer.clear()
            video_renderer.drawWorld(world)
            video_renderer.drawGoalDefinition(goal_definition, "red", 0.5,
                                              "red")
            time.sleep(sim_step_time / sim_real_time_factor)

        video_renderer.export_video(filename="./test_video_intermediate",
                                    remove_image_dir=True)
Exemple #21
0
    def test_uct_single_agent(self):
        try:
            from bark.core.models.behavior import BehaviorUCTSingleAgentMacroActions
        except:
            print("Rerun with --define planner_uct=true")
            return
        # World Definition
        scenario_param_file = "macro_actions_test.json"  # must be within examples params folder
        params = ParameterServer(filename=os.path.join(
            os.path.dirname(__file__), "params/", scenario_param_file))

        world = World(params)

        # Model Definitions
        behavior_model = BehaviorUCTSingleAgentMacroActions(params)
        execution_model = ExecutionModelInterpolate(params)
        dynamic_model = SingleTrackModel(params)

        behavior_model2 = BehaviorConstantAcceleration(params)
        execution_model2 = ExecutionModelInterpolate(params)
        dynamic_model2 = SingleTrackModel(params)

        # Map Definition
        map_interface = MapInterface()
        xodr_map = MakeXodrMapOneRoadTwoLanes()
        map_interface.SetOpenDriveMap(xodr_map)
        world.SetMap(map_interface)

        # agent_2d_shape = CarLimousine()
        agent_2d_shape = CarRectangle()
        init_state = np.array([0, 3, -5.25, 0, 20])
        agent_params = params.AddChild("agent1")

        # goal_polygon = Polygon2d(
        #     [1, 1, 0], [Point2d(0, 0), Point2d(0, 2), Point2d(2, 2), Point2d(2, 0)])
        # goal_definition = GoalDefinitionPolygon(goal_polygon)
        # goal_polygon = goal_polygon.Translate(Point2d(90, -2))

        center_line = Line2d()
        center_line.AddPoint(Point2d(0.0, -1.75))
        center_line.AddPoint(Point2d(100.0, -1.75))

        max_lateral_dist = (0.4, 0.5)
        max_orientation_diff = (0.08, 0.1)
        velocity_range = (5.0, 20.0)
        goal_definition = GoalDefinitionStateLimitsFrenet(
            center_line, max_lateral_dist, max_orientation_diff,
            velocity_range)

        agent = Agent(init_state, behavior_model, dynamic_model,
                      execution_model, agent_2d_shape, agent_params,
                      goal_definition, map_interface)
        world.AddAgent(agent)

        init_state2 = np.array([0, 25, -5.25, 0, 0])
        agent2 = Agent(init_state2, behavior_model2, dynamic_model2,
                       execution_model2, agent_2d_shape, agent_params,
                       goal_definition, map_interface)
        world.AddAgent(agent2)

        # viewer
        viewer = MPViewer(params=params, use_world_bounds=True)

        # World Simulation
        sim_step_time = params["simulation"]["step_time",
                                             "Step-time in simulation", 0.2]
        sim_real_time_factor = params["simulation"][
            "real_time_factor", "execution in real-time or faster", 1]

        # Draw map
        video_renderer = VideoRenderer(renderer=viewer,
                                       world_step_time=sim_step_time)

        for _ in range(0, 5):
            world.Step(sim_step_time)
            viewer.clear()
            video_renderer.drawWorld(world)
            video_renderer.drawGoalDefinition(goal_definition)
            time.sleep(sim_step_time / sim_real_time_factor)

        video_renderer.export_video(filename="./test_video_intermediate",
                                    remove_image_dir=True)