def test_number_of_agents(self): # World Definition params = ParameterServer() world = World(params) # Model Definitions behavior_model = BehaviorConstantAcceleration(params) execution_model = ExecutionModelInterpolate(params) dynamic_model = SingleTrackModel(params) behavior_model2 = BehaviorConstantAcceleration(params) execution_model2 = ExecutionModelInterpolate(params) dynamic_model2 = SingleTrackModel(params) # Map Definition map_interface = MapInterface() xodr_map = MakeXodrMapOneRoadTwoLanes() map_interface.SetOpenDriveMap(xodr_map) world.SetMap(map_interface) agent_2d_shape = CarLimousine() init_state = np.array([0, 13, -1.75, 0, 5]) agent_params = params.AddChild("agent1") goal_polygon = Polygon2d( [1, 1, 0], [Point2d(0, 0), Point2d(0, 2), Point2d(2, 2), Point2d(2, 0)]) goal_polygon = goal_polygon.Translate(Point2d(50, -2)) agent = Agent(init_state, behavior_model, dynamic_model, execution_model, agent_2d_shape, agent_params, GoalDefinitionPolygon(goal_polygon), map_interface) world.AddAgent(agent) init_state2 = np.array([0, 16, -1.75, 0, 5]) agent2 = Agent(init_state2, behavior_model2, dynamic_model2, execution_model2, agent_2d_shape, agent_params, GoalDefinitionPolygon(goal_polygon), map_interface) world.AddAgent(agent2) evaluator = EvaluatorNumberOfAgents(agent.id) world.AddEvaluator("num_agents", evaluator) info = world.Evaluate() self.assertEqual(info["num_agents"], len(world.agents)) # do it once more self.assertEqual(info["num_agents"], len(world.agents)) world.RemoveAgentById(agent2.id) info = world.Evaluate() # evaluator should still hold two self.assertNotEqual(info["num_agents"], len(world.agents)) self.assertEqual(info["num_agents"], 2) world.Step(0.1) info = world.Evaluate() # evaluator should still hold two self.assertEqual(info["num_agents"], 2)
def test_lane_change(self): # World Definition params = ParameterServer() world = World(params) # Model Definitions behavior_model = BehaviorMobil(params) execution_model = ExecutionModelInterpolate(params) dynamic_model = SingleTrackModel(params) behavior_model2 = BehaviorIDMLaneTracking(params) execution_model2 = ExecutionModelInterpolate(params) dynamic_model2 = SingleTrackModel(params) # Map Definition map_interface = MapInterface() xodr_map = MakeXodrMapOneRoadTwoLanes() map_interface.SetOpenDriveMap(xodr_map) world.SetMap(map_interface) #agent_2d_shape = CarLimousine() agent_2d_shape = CarRectangle() init_state = np.array([0, 3, -1.75, 0, 5]) agent_params = params.AddChild("agent1") goal_polygon = Polygon2d( [1, 1, 0], [Point2d(0, 0), Point2d(0, 2), Point2d(2, 2), Point2d(2, 0)]) goal_polygon = goal_polygon.Translate(Point2d(50, -2)) agent = Agent(init_state, behavior_model, dynamic_model, execution_model, agent_2d_shape, agent_params, GoalDefinitionPolygon(goal_polygon), map_interface) world.AddAgent(agent) init_state2 = np.array([0, 15, -1.75, 0, 2]) agent2 = Agent(init_state2, behavior_model2, dynamic_model2, execution_model2, agent_2d_shape, agent_params, GoalDefinitionPolygon(goal_polygon), map_interface) world.AddAgent(agent2) # viewer viewer = MPViewer(params=params, use_world_bounds=True) # World Simulation sim_step_time = params["simulation"]["step_time", "Step-time in simulation", 0.05] sim_real_time_factor = params["simulation"][ "real_time_factor", "execution in real-time or faster", 100] # Draw map for _ in range(0, 10): viewer.clear() world.Step(sim_step_time) viewer.drawWorld(world) viewer.show(block=False) time.sleep(sim_step_time / sim_real_time_factor)
def test_gap_distance_front(self): # World Definition params = ParameterServer() world = World(params) gap = 10 # Model Definitions behavior_model = BehaviorConstantAcceleration(params) execution_model = ExecutionModelInterpolate(params) dynamic_model = SingleTrackModel(params) behavior_model2 = BehaviorConstantAcceleration(params) execution_model2 = ExecutionModelInterpolate(params) dynamic_model2 = SingleTrackModel(params) # Map Definition map_interface = MapInterface() xodr_map = MakeXodrMapOneRoadTwoLanes() map_interface.SetOpenDriveMap(xodr_map) world.SetMap(map_interface) agent_2d_shape = CarLimousine() init_state = np.array([0, 13, -1.75, 0, 5]) agent_params = params.AddChild("agent1") goal_polygon = Polygon2d( [1, 1, 0], [Point2d(0, 0), Point2d(0, 2), Point2d(2, 2), Point2d(2, 0)]) goal_polygon = goal_polygon.Translate(Point2d(50, -2)) agent = Agent(init_state, behavior_model, dynamic_model, execution_model, agent_2d_shape, agent_params, GoalDefinitionPolygon(goal_polygon), map_interface) world.AddAgent(agent) init_state2 = np.array([0, 13 + gap, -1.75, 0, 5]) agent2 = Agent(init_state2, behavior_model2, dynamic_model2, execution_model2, agent_2d_shape, agent_params, GoalDefinitionPolygon(goal_polygon), map_interface) world.AddAgent(agent2) world.Step(0.1) evaluator = EvaluatorGapDistanceFront(agent.id) world.AddEvaluator("gap", evaluator) info = world.Evaluate() self.assertAlmostEqual(info["gap"], gap - agent_2d_shape.front_dist - agent_2d_shape.rear_dist, places=4)
def test_lateral_same_direction(self): map = "bark/runtime/tests/data/city_highway_straight.xodr" map_interface = EvaluatorRSSTests.load_map(map) world = self.defaults["world"].Copy() world.SetMap(map_interface) goal_polygon_1 = Polygon2d( [0, 0, 0], [Point2d(-1, -1), Point2d(-1, 1), Point2d(1, 1), Point2d(1, -1)]) goal_polygon_1 = goal_polygon_1.Translate(Point2d(5.5, 120)) goal_polygon_2 = Polygon2d( [0, 0, 0], [Point2d(-1, -1), Point2d(-1, 1), Point2d(1, 1), Point2d(1, -1)]) goal_polygon_2 = goal_polygon_2.Translate(Point2d(1.8, 120)) # Hard coded ego_state = np.array([0, 5.5, 10, 0, 10]) other_state = np.array([0, 1.8, -10, 0, 15]) ego = Agent(ego_state, self.defaults["ego_behavior"], self.defaults["ego_dynamic"], self.defaults["ego_execution"], self.defaults["ego_shape"], self.defaults["agent_params"], GoalDefinitionPolygon(goal_polygon_1), map_interface) other = Agent(other_state, self.defaults["other_behavior"], self.defaults["other_dynamic"], self.defaults["other_execution"], self.defaults["other_shape"], self.defaults["agent_params"], GoalDefinitionPolygon(goal_polygon_2), map_interface) world.AddAgent(ego) world.AddAgent(other) evaluator_rss = EvaluatorRss(ego.id, map, self.defaults["default_vehicle_dynamics"]) for _ in range(7): world.Step(1) self.assertEqual( True, evaluator_rss.PairwiseDirectionalEvaluate(world)[other.id][1])
def controlled_goal(self, world): world.map.GetRoadCorridor(self._road_ids, XodrDrivingDirection.forward) lane_corr = self._road_corridor.lane_corridors[0] # lanes are sorted by their s-value lanes = lane_corr.lanes s_val = max(lanes.keys()) return GoalDefinitionPolygon(lanes[s_val].polygon)
def test_one_agent_at_goal_polygon(self): param_server = ParameterServer() # Model Definition behavior_model = BehaviorConstantVelocity(param_server) execution_model = ExecutionModelInterpolate(param_server) dynamic_model = SingleTrackModel(param_server) # Agent Definition agent_2d_shape = CarLimousine() init_state = np.array( [0, -191.789, -50.1725, 3.14 * 3.0 / 4.0, 150 / 3.6]) agent_params = param_server.AddChild("agent1") goal_polygon = Polygon2d( [0, 0, 0], [Point2d(-4, -4), Point2d(-4, 4), Point2d(4, 4), Point2d(4, -4)]) goal_polygon = goal_polygon.Translate(Point2d(-191.789, -50.1725)) agent = Agent(init_state, behavior_model, dynamic_model, execution_model, agent_2d_shape, agent_params, GoalDefinitionPolygon(goal_polygon), None) world = World(param_server) world.AddAgent(agent) evaluator = EvaluatorGoalReached(agent.id) world.AddEvaluator("success", evaluator) info = world.Evaluate() self.assertEqual(info["success"], True)
def goal(self, world): goal_polygon = Polygon2d( [-1000, -1000, -1000], [Point2d(-1, -1), Point2d(-1, 1), Point2d(1, 1), Point2d(1, -1)]) return GoalDefinitionPolygon(goal_polygon)
def test_longitude_ego_follow_other(self): map = "bark/runtime/tests/data/city_highway_straight.xodr" map_interface = EvaluatorRSSTests.load_map(map) world = self.defaults["world"].Copy() world.SetMap(map_interface) goal_polygon = Polygon2d( [0, 0, 0], [Point2d(-1, -1), Point2d(-1, 1), Point2d(1, 1), Point2d(1, -1)]) goal_polygon = goal_polygon.Translate(Point2d(1.8, 120)) # The safety distance seems more conservative than in the paper # Hard coded ego_state = np.array([0, 1.8, -114.9, 0, 10]) other_state = np.array([0, 1.8, -72.95, 0, 7]) ego = Agent(ego_state, self.defaults["ego_behavior"], self.defaults["ego_dynamic"], self.defaults["ego_execution"], self.defaults["ego_shape"], self.defaults["agent_params"], GoalDefinitionPolygon(goal_polygon), map_interface) other = Agent(other_state, self.defaults["other_behavior"], self.defaults["other_dynamic"], self.defaults["other_execution"], self.defaults["other_shape"], self.defaults["agent_params"], GoalDefinitionPolygon(goal_polygon), map_interface) world.AddAgent(ego) world.AddAgent(other) evaluator_rss = EvaluatorRss(ego.id, map, self.defaults["default_vehicle_dynamics"], checking_relevent_range=1.5) world.Step(0.01) self.assertEqual( True, evaluator_rss.PairwiseDirectionalEvaluate(world)[other.id][0]) world.Step(0.01) self.assertEqual( False, evaluator_rss.PairwiseDirectionalEvaluate(world)[other.id][0])
def test_lateral_merging(self): map = "bark/runtime/tests/data/DR_DEU_Merging_MT_v01_centered.xodr" map_interface = EvaluatorRSSTests.load_map(map) world = self.defaults["world"].Copy() world.SetMap(map_interface) goal_polygon = Polygon2d( [0, 0, 0], [Point2d(-1, -1), Point2d(-1, 1), Point2d(1, 1), Point2d(1, -1)]) goal_polygon = goal_polygon.Translate(Point2d(-16, 108)) # Hard coded ego_state = np.array([0, 68, 108, 0, 5]) other_state = np.array([0, 64, 104, 0, 5]) ego = Agent(ego_state, self.defaults["ego_behavior"], self.defaults["ego_dynamic"], self.defaults["ego_execution"], self.defaults["ego_shape"], self.defaults["agent_params"], GoalDefinitionPolygon(goal_polygon), map_interface) other = Agent(other_state, self.defaults["other_behavior"], self.defaults["other_dynamic"], self.defaults["other_execution"], self.defaults["other_shape"], self.defaults["agent_params"], GoalDefinitionPolygon(goal_polygon), map_interface) world.AddAgent(ego) world.AddAgent(other) evaluator_rss = EvaluatorRss(ego.id, map, self.defaults["default_vehicle_dynamics"]) world.Step(1) self.assertEqual( True, evaluator_rss.PairwiseDirectionalEvaluate(world)[other.id][1]) world.Step(1) self.assertEqual( False, evaluator_rss.PairwiseDirectionalEvaluate(world)[other.id][1])
def _build_sequential_goal_definition(self): goal_list = [] for goal_pose in self.goal_frame_poses: goal_polygon = Polygon2d(self.goal_frame_center, np.array(self.goal_frame_points)) goal_polygon = goal_polygon.Transform(goal_pose) goal_definition = GoalDefinitionPolygon(goal_polygon) goal_list.append(goal_definition) return GoalDefinitionSequential(goal_list)
def __init__(self, init_state, goal_polygon, map_interface, params): behavior_model = BehaviorConstantAcceleration(params) execution_model = ExecutionModelInterpolate(params) dynamic_model = SingleTrackModel(params) agent_2d_shape = CarLimousine() agent_params = params.AddChild("agent") super(TestAgent, self).__init__(init_state, behavior_model, dynamic_model, execution_model, agent_2d_shape, agent_params, GoalDefinitionPolygon(goal_polygon), map_interface)
def GoalDefinitionFromTrack(track, end): states = list(dict_utils.get_item_iterator(track.motion_states)) motion_state = states[-1][1] bark_state = BarkStateFromMotionState(motion_state) goal_polygon = Polygon2d( np.array([0.0, 0.0, 0.0]), [Point2d(-1.5, 0), Point2d(-1.5, 8), Point2d(1.5, 8), Point2d(1.5, 0)]) goal_polygon = goal_polygon.Translate( Point2d(bark_state[0, int(StateDefinition.X_POSITION)], bark_state[0, int(StateDefinition.Y_POSITION)])) goal_definition = GoalDefinitionPolygon(goal_polygon) return goal_definition
def create_single_scenario(self): scenario = Scenario(map_file_name=self._map_file_name, json_params=self._params.ConvertToDict()) world = scenario.GetWorldState() agent_list = [] scenario._agent_list = [] for agent_json_ in self._local_params["Agents"]: agent_json = agent_json_["VehicleModel"].clone() agent_json["map_interface"] = world.map goal_polygon = Polygon2d([0, 0, 0], np.array( agent_json["goal"]["polygon_points"])) goal_polygon = goal_polygon.Translate( Point2d(agent_json["goal"]["center_pose"][0], agent_json["goal"]["center_pose"][1])) sequential_goals = [] goal = GoalDefinitionPolygon(goal_polygon) if "goal_type" in agent_json["goal"]: goal_type = agent_json["goal"]["goal_type"] if goal_type == "GoalDefinitionStateLimits": goal = GoalDefinitionStateLimits(goal_polygon, (1.49, 1.65)) # state_limit_goal = GoalDefinitionStateLimits(goal_polygon, (1.49, 1.65)) for _ in range(self._local_params["goal"]["num_reached", "num", 5]): sequential_goals.append(goal) sequential_goal = GoalDefinitionSequential(sequential_goals) agent_json["goal_definition"] = sequential_goal agent_state = np.array(agent_json["state"]) if len(np.shape(agent_state)) > 1: agent_state = np.random.uniform(low=agent_state[:, 0], high=agent_state[:, 1]) agent_json["state"] = agent_state.tolist() agent = self._json_converter.agent_from_json( agent_json, param_server=self._params) agent.SetAgentId(agent_json["id"]) scenario._agent_list.append(agent) # TODO(@hart): this could be mult. agents scenario._eval_agent_ids = self._local_params[ "controlled_ids", "IDs of agents to be controlled. ", [0]] return scenario
def GoalDefinitionFromTrack(track, end, xy_offset): goal_size = 12.0 states = list(dict_utils.get_item_iterator(track.motion_states)) # Goal position is spatial position of last state motion_state = states[-1][1] bark_state = BarkStateFromMotionState(motion_state, xy_offset=xy_offset) goal_polygon = Polygon2d(np.array([0.5 * goal_size, 0.5 * goal_size, 0.0]), [ Point2d(0.0, 0.0), Point2d(goal_size, 0.0), Point2d(goal_size, goal_size), Point2d(0.0, goal_size), Point2d(0.0, 0.0) ]) goal_point = Point2d( bark_state[0, int(StateDefinition.X_POSITION)] - 0.5 * goal_size, bark_state[0, int(StateDefinition.Y_POSITION)] - 0.5 * goal_size) goal_polygon = goal_polygon.Translate(goal_point) goal_definition = GoalDefinitionPolygon(goal_polygon) return goal_definition
def test_relevent_agents(self): map = "bark/runtime/tests/data/city_highway_straight.xodr" map_interface = EvaluatorRSSTests.load_map(map) world = self.defaults["world"].Copy() world.SetMap(map_interface) goal_polygon_1 = Polygon2d( [0, 0, 0], [Point2d(-1, -1), Point2d(-1, 1), Point2d(1, 1), Point2d(1, -1)]) goal_polygon_1 = goal_polygon_1.Translate(Point2d(5.5, 120)) goal_polygon_2 = Polygon2d( [0, 0, 0], [Point2d(-1, -1), Point2d(-1, 1), Point2d(1, 1), Point2d(1, -1)]) goal_polygon_2 = goal_polygon_2.Translate(Point2d(1.8, 120)) # Hard coded ego_state = np.array([0, 5.5, 10, 0, 10]) other_1_state = np.array([0, 1.8, -10, 0, 15]) other_2_state = np.array([0, 1.8, -120, 0, 10]) ego = Agent(ego_state, self.defaults["ego_behavior"], self.defaults["ego_dynamic"], self.defaults["ego_execution"], self.defaults["ego_shape"], self.defaults["agent_params"], GoalDefinitionPolygon(goal_polygon_1), map_interface) other_1 = Agent(other_1_state, self.defaults["other_behavior"], self.defaults["other_dynamic"], self.defaults["other_execution"], self.defaults["other_shape"], self.defaults["agent_params"], GoalDefinitionPolygon(goal_polygon_2), map_interface) other_2_behavior = BehaviorConstantAcceleration( self.defaults["agent_params"]) other_2_dynamic = SingleTrackModel(self.defaults["agent_params"]) other_2_execution = ExecutionModelInterpolate( self.defaults["agent_params"]) other_2 = Agent(other_2_state, other_2_behavior, other_2_dynamic, other_2_execution, self.defaults["other_shape"], self.defaults["agent_params"], GoalDefinitionPolygon(goal_polygon_2), map_interface) world.AddAgent(ego) world.AddAgent(other_1) world.AddAgent(other_2) evaluator_rss = EvaluatorRss(ego.id, map, self.defaults["default_vehicle_dynamics"]) world.Step(1) responses = evaluator_rss.PairwiseEvaluate(world) self.assertEqual(1, len(responses)) # Test GetRelevantAgents self.assertTrue(responses[other_1.id]) self.assertFalse(other_2.id in responses)
def goal(self, world): road_corr = world.map.GetRoadCorridor(self._road_ids, XodrDrivingDirection.forward) lane_corr = self._road_corridor.lane_corridors[0] return GoalDefinitionPolygon(lane_corr.polygon)
def test_evaluator_drivable_area(self): # World Definition params = ParameterServer() world = World(params) # Model Definitions behavior_model = BehaviorConstantAcceleration(params) execution_model = ExecutionModelInterpolate(params) dynamic_model = SingleTrackModel(params) # Map Definition map_interface = MapInterface() xodr_map = MakeXodrMapOneRoadTwoLanes() map_interface.SetOpenDriveMap(xodr_map) world.SetMap(map_interface) #open_drive_map = world.map.GetOpenDriveMap() #agent_2d_shape = CarLimousine() agent_2d_shape = Polygon2d( [1.25, 1, 0], [Point2d(-1, -1), Point2d(-1, 1), Point2d(3, 1), Point2d(3, -1)]) init_state = np.array([0, 3, -1.75, 0, 5]) agent_params = params.AddChild("agent1") goal_polygon = Polygon2d( [1, 1, 0], [Point2d(0, 0), Point2d(0, 2), Point2d(2, 2), Point2d(2, 0)]) goal_polygon = goal_polygon.Translate(Point2d(50, -2)) agent = Agent( init_state, behavior_model, dynamic_model, execution_model, agent_2d_shape, agent_params, GoalDefinitionPolygon(goal_polygon), # goal_lane_id map_interface) world.AddAgent(agent) evaluator = EvaluatorDrivableArea() world.AddEvaluator("drivable_area", evaluator) info = world.Evaluate() self.assertFalse(info["drivable_area"]) viewer = MPViewer(params=params, use_world_bounds=True) # Draw map viewer.drawGoalDefinition(goal_polygon, color=(1, 0, 0), alpha=0.5, facecolor=(1, 0, 0)) viewer.drawWorld(world) viewer.drawRoadCorridor(agent.road_corridor) viewer.show(block=False)
def create_single_scenario(self): scenario = Scenario(map_file_name=self._map_file_name, json_params=self._params.ConvertToDict()) world = scenario.GetWorldState() agent_list = [] # OTHER AGENTS for idx, source in enumerate(self._others_source): connecting_center_line, s_start, s_end = \ self.center_line_between_source_and_sink(world.map, source, self._others_sink[idx]) goal_polygon = Polygon2d([0, 0, 0], [ Point2d(-1.5, 0), Point2d(-1.5, 8), Point2d(1.5, 8), Point2d(1.5, 0) ]) goal_polygon = goal_polygon.Translate( Point2d(self._others_sink[idx][0], self._others_sink[idx][1])) goal_definition = GoalDefinitionPolygon(goal_polygon) agent_list.extend( self.place_agents_along_linestring(world, connecting_center_line, s_start, s_end, self._agent_params, goal_definition)) description = self._params.ConvertToDict() description["ScenarioGenerator"] = "UniformVehicleDistribution" # EGO AGENT ego_agent = None if len(self._ego_route) == 0: # take agent in the middle of list num_agents = len(agent_list) ego_agent = agent_list[math.floor(num_agents / 4)] else: connecting_center_line, s_start, s_end = \ self.center_line_between_source_and_sink(world.map, self._ego_route[0], self._ego_route[1]) sego = self.sample_srange_uniform([s_start, s_end]) xy_point = GetPointAtS(connecting_center_line, sego) angle = GetTangentAngleAtS(connecting_center_line, sego) velocity = self.sample_velocity_uniform(self._ego_velocity_range) agent_state = np.array( [0, xy_point.x(), xy_point.y(), angle, velocity]) agent_params = self._agent_params.copy() agent_params["state"] = agent_state # goal for driving corridor generation goal_polygon = Polygon2d([0, 0, 0], [ Point2d(-1.5, 0), Point2d(-1.5, 8), Point2d(1.5, 8), Point2d(1.5, 0) ]) goal_polygon = goal_polygon.Translate( Point2d(self._ego_route[1][0], self._ego_route[1][1])) goal_definition = GoalDefinitionPolygon(goal_polygon) agent_params["goal_definition"] = goal_definition agent_params["map_interface"] = world.map converter = ModelJsonConversion() ego_agent = converter.agent_from_json(agent_params, self._params["Agent"]) # TODO(@bernhard): ensure that ego agent not collides with others agent_list.append(ego_agent) # EGO Agent Goal Definition if len(self._ego_goal_start) == 0: if len(self._ego_route) == 0: # ego agent is one of the random agents, so the goal definition is # already set pass else: goal_polygon = Polygon2d([0, 0, 0], [ Point2d(-1.5, 0), Point2d(-1.5, 8), Point2d(1.5, 8), Point2d(1.5, 0) ]) goal_polygon = goal_polygon.Translate( Point2d(self._ego_goal_end[0], self._ego_goal_end[1])) ego_agent.goal_definition = GoalDefinitionPolygon(goal_polygon) else: connecting_center_line, s_start, s_end = \ self.center_line_between_source_and_sink(world.map, self._ego_goal_start, self._ego_goal_end) goal_center_line = GetLineFromSInterval(connecting_center_line, s_start, s_end) # build polygon representing state limits lims = self._ego_goal_state_limits goal_limits_left = goal_center_line.Translate( Point2d(-lims[0], -lims[1])) goal_limits_right = goal_center_line.Translate( Point2d(lims[0], lims[1])) goal_limits_right.Reverse() goal_limits_left.AppendLinestring(goal_limits_right) polygon = Polygon2d([0, 0, 0], goal_limits_left) ego_agent.goal_definition = GoalDefinitionStateLimits( polygon, (1.57 - 0.08, 1.57 + 0.08)) # only one agent is ego in the middle of all other agents scenario._agent_list = agent_list scenario._eval_agent_ids = [ego_agent.id] return scenario