コード例 #1
0
ファイル: py_evaluation_tests.py プロジェクト: weigaofei/bark
    def test_number_of_agents(self):
        # World Definition
        params = ParameterServer()
        world = World(params)

        # Model Definitions
        behavior_model = BehaviorConstantAcceleration(params)
        execution_model = ExecutionModelInterpolate(params)
        dynamic_model = SingleTrackModel(params)

        behavior_model2 = BehaviorConstantAcceleration(params)
        execution_model2 = ExecutionModelInterpolate(params)
        dynamic_model2 = SingleTrackModel(params)

        # Map Definition
        map_interface = MapInterface()
        xodr_map = MakeXodrMapOneRoadTwoLanes()
        map_interface.SetOpenDriveMap(xodr_map)
        world.SetMap(map_interface)

        agent_2d_shape = CarLimousine()
        init_state = np.array([0, 13, -1.75, 0, 5])
        agent_params = params.AddChild("agent1")
        goal_polygon = Polygon2d(
            [1, 1, 0],
            [Point2d(0, 0),
             Point2d(0, 2),
             Point2d(2, 2),
             Point2d(2, 0)])
        goal_polygon = goal_polygon.Translate(Point2d(50, -2))

        agent = Agent(init_state, behavior_model, dynamic_model,
                      execution_model, agent_2d_shape, agent_params,
                      GoalDefinitionPolygon(goal_polygon), map_interface)
        world.AddAgent(agent)

        init_state2 = np.array([0, 16, -1.75, 0, 5])
        agent2 = Agent(init_state2, behavior_model2, dynamic_model2,
                       execution_model2, agent_2d_shape, agent_params,
                       GoalDefinitionPolygon(goal_polygon), map_interface)
        world.AddAgent(agent2)

        evaluator = EvaluatorNumberOfAgents(agent.id)
        world.AddEvaluator("num_agents", evaluator)

        info = world.Evaluate()
        self.assertEqual(info["num_agents"], len(world.agents))
        # do it once more
        self.assertEqual(info["num_agents"], len(world.agents))

        world.RemoveAgentById(agent2.id)
        info = world.Evaluate()
        # evaluator should still hold two
        self.assertNotEqual(info["num_agents"], len(world.agents))
        self.assertEqual(info["num_agents"], 2)

        world.Step(0.1)
        info = world.Evaluate()
        # evaluator should still hold two
        self.assertEqual(info["num_agents"], 2)
コード例 #2
0
ファイル: py_evaluation_tests.py プロジェクト: weigaofei/bark
    def test_gap_distance_front(self):
        # World Definition
        params = ParameterServer()
        world = World(params)

        gap = 10

        # Model Definitions
        behavior_model = BehaviorConstantAcceleration(params)
        execution_model = ExecutionModelInterpolate(params)
        dynamic_model = SingleTrackModel(params)

        behavior_model2 = BehaviorConstantAcceleration(params)
        execution_model2 = ExecutionModelInterpolate(params)
        dynamic_model2 = SingleTrackModel(params)

        # Map Definition
        map_interface = MapInterface()
        xodr_map = MakeXodrMapOneRoadTwoLanes()
        map_interface.SetOpenDriveMap(xodr_map)
        world.SetMap(map_interface)

        agent_2d_shape = CarLimousine()
        init_state = np.array([0, 13, -1.75, 0, 5])
        agent_params = params.AddChild("agent1")
        goal_polygon = Polygon2d(
            [1, 1, 0],
            [Point2d(0, 0),
             Point2d(0, 2),
             Point2d(2, 2),
             Point2d(2, 0)])
        goal_polygon = goal_polygon.Translate(Point2d(50, -2))

        agent = Agent(init_state, behavior_model, dynamic_model,
                      execution_model, agent_2d_shape, agent_params,
                      GoalDefinitionPolygon(goal_polygon), map_interface)
        world.AddAgent(agent)

        init_state2 = np.array([0, 13 + gap, -1.75, 0, 5])
        agent2 = Agent(init_state2, behavior_model2, dynamic_model2,
                       execution_model2, agent_2d_shape, agent_params,
                       GoalDefinitionPolygon(goal_polygon), map_interface)
        world.AddAgent(agent2)

        world.Step(0.1)

        evaluator = EvaluatorGapDistanceFront(agent.id)
        world.AddEvaluator("gap", evaluator)

        info = world.Evaluate()
        self.assertAlmostEqual(info["gap"],
                               gap - agent_2d_shape.front_dist -
                               agent_2d_shape.rear_dist,
                               places=4)
コード例 #3
0
    def test_database_runner(self):
        dbs = DatabaseSerializer(test_scenarios=4, test_world_steps=5, num_serialize_scenarios=2)
        dbs.process("data/database1")
        local_release_filename = dbs.release(version="test")

        db = BenchmarkDatabase(database_root=local_release_filename)
        evaluators = {"success" : "EvaluatorGoalReached", "collision" : "EvaluatorCollisionEgoAgent",
                      "max_steps": "EvaluatorStepCount"}
        terminal_when = {"collision" :lambda x: x, "max_steps": lambda x : x>2}
        params = ParameterServer() # only for evaluated agents not passed to scenario!
        behaviors_tested = {"IDM": BehaviorIDMClassic(params), "Const" : BehaviorConstantAcceleration(params)}
                                        

        benchmark_runner = BenchmarkRunner(benchmark_database=db,
                                           evaluators=evaluators,
                                           terminal_when=terminal_when,
                                           behaviors=behaviors_tested,
                                           log_eval_avg_every=5)

        result = benchmark_runner.run()
        df = result.get_data_frame()
        print(df)
        self.assertEqual(len(df.index), 2*2*2) # 2 Behaviors * 2 Serialize Scenarios * 1 scenario sets

        groups = result.get_evaluation_groups()
        self.assertEqual(set(groups), set(["behavior", "scen_set"]))
コード例 #4
0
    def test_database_multiprocessing_history(self):
        dbs = DatabaseSerializer(test_scenarios=4, test_world_steps=5, num_serialize_scenarios=2)
        dbs.process("data/database1")
        local_release_filename = dbs.release(version="test")

        db = BenchmarkDatabase(database_root=local_release_filename)
        evaluators = {"success" : "EvaluatorGoalReached", "collision" : "EvaluatorCollisionEgoAgent",
                      "max_steps": "EvaluatorStepCount"}
        terminal_when = {"collision" :lambda x: x, "max_steps": lambda x : x>2}
        params = ParameterServer() # only for evaluated agents not passed to scenario!
        behaviors_tested = {"IDM": BehaviorIDMClassic(params), "Const" : BehaviorConstantAcceleration(params)}

        benchmark_runner = BenchmarkRunnerMP(benchmark_database=db,
                                           evaluators=evaluators,
                                           terminal_when=terminal_when,
                                           behaviors=behaviors_tested,
                                           log_eval_avg_every=10)
        rst = benchmark_runner.run(maintain_history=True)
        self.assertEqual(len(rst.get_histories()), 2*2*2)

        rst = benchmark_runner.run_benchmark_config(3, viewer=None, maintain_history=True)
        scenario_history = rst.get_histories()[3]
        print(scenario_history)
        params = ParameterServer()
        viewer = MPViewer(
              params=params,
              x_range=[5060, 5160],
              y_range=[5070,5150],
              use_world_bounds=True)
        viewer.drawWorld(world=scenario_history[1].GetWorldState(),
                          eval_agent_ids=scenario_history[1].eval_agent_ids)

        viewer.show(block=True)
コード例 #5
0
    def test_database_multiprocessing_runner(self):
        dbs = DatabaseSerializer(test_scenarios=4, test_world_steps=5, num_serialize_scenarios=5)
        dbs.process("data/database1")
        local_release_filename = dbs.release(version="test")

        db = BenchmarkDatabase(database_root=local_release_filename)
        evaluators = {"success" : "EvaluatorGoalReached", "collision" : "EvaluatorCollisionEgoAgent",
                      "max_steps": "EvaluatorStepCount"}
        terminal_when = {"collision" :lambda x: x, "max_steps": lambda x : x>2}
        params = ParameterServer() # only for evaluated agents not passed to scenario!
        behaviors_tested = {"IDM": BehaviorIDMClassic(params), "Const" : BehaviorConstantAcceleration(params)}

        benchmark_runner = BenchmarkRunnerMP(benchmark_database=db,
                                           evaluators=evaluators,
                                           terminal_when=terminal_when,
                                           behaviors=behaviors_tested,
                                           log_eval_avg_every=10)

        result = benchmark_runner.run()

        df = result.get_data_frame()
        print(df)
        self.assertEqual(len(df.index), 20) # 2 Behaviors * 5 Serialize Scenarios * 2 scenario sets

        params2 = ParameterServer()
        viewer = MPViewer(
              params=params2,
              x_range=[5060, 5160],
              y_range=[5070,5150],
              use_world_bounds=True)
        rst  = benchmark_runner.run_benchmark_config(10, viewer=viewer)
コード例 #6
0
  def test_planning_time(self):
    param_server = ParameterServer()
    # Model Definition
    behavior_model = BehaviorConstantAcceleration(param_server)
    execution_model = ExecutionModelInterpolate(param_server)
    dynamic_model = SingleTrackModel(param_server)

    # Agent Definition
    agent_2d_shape = CarLimousine()
    init_state = np.array([0, -191.789,-50.1725, 3.14*3.0/4.0, 150/3.6])
    agent_params = param_server.AddChild("agent1")
    goal_polygon = Polygon2d([0, 0, 0],
                             [Point2d(-4,-4),
                              Point2d(-4,4),
                              Point2d(4,4),
                              Point2d(4,-4)])
    goal_polygon = goal_polygon.Translate(Point2d(-191.789,-50.1725))

    agent = Agent(init_state,
                behavior_model,
                dynamic_model,
                execution_model,
                agent_2d_shape,
                agent_params,
                GoalDefinitionPolygon(goal_polygon),
                  None)

    world = World(param_server)
    world.AddAgent(agent)
    evaluator = EvaluatorPlanningTime(agent.id)
    world.AddEvaluator("time", evaluator)


    info = world.Evaluate()
    self.assertEqual(info["time"], 0.0)
コード例 #7
0
ファイル: py_evaluation_tests.py プロジェクト: valaxkong/bark
  def test_one_agent_at_goal_state_limits(self):
    param_server = ParameterServer()
    # Model Definition
    behavior_model = BehaviorConstantAcceleration(param_server)
    execution_model = ExecutionModelInterpolate(param_server)
    dynamic_model = SingleTrackModel(param_server)

    # Agent Definition
    agent_2d_shape = CarLimousine()
    init_state = np.array([0, -191.789,-50.1725, 3.14*3.0/4.0, 150/3.6])
    agent_params = param_server.AddChild("agent1")
    goal_polygon = Polygon2d([0, 0, 0],
                             [Point2d(-1,-1),
                              Point2d(-1,1),
                              Point2d(1,1),
                              Point2d(1,-1)])
    goal_polygon = goal_polygon.Translate(Point2d(-191.789,-50.1725))

    agent = Agent(init_state,
                behavior_model,
                dynamic_model,
                execution_model,
                agent_2d_shape,
                agent_params,
                GoalDefinitionStateLimits(goal_polygon, (3.14*3.0/4.0-0.08, 3.14*3.0/4.0+0.08)),
                  None)

    world = World(param_server)
    world.AddAgent(agent)
    evaluator = EvaluatorGoalReached(agent.id)
    world.AddEvaluator("success", evaluator)


    info = world.Evaluate()
    self.assertEqual(info["success"], True)
コード例 #8
0
    def test_database_runner(self):
        dbs = DatabaseSerializer(test_scenarios=4,
                                 test_world_steps=5,
                                 num_serialize_scenarios=2)
        dbs.process("data/database1")
        local_release_filename = dbs.release(version="test")

        db = BenchmarkDatabase(database_root=local_release_filename)
        safe_dist_params = ParameterServer(log_if_default=True)
        test_python_params = ParameterServer(log_if_default=True)
        evaluators = {
            "success": "EvaluatorGoalReached",
            "collision": "EvaluatorCollisionEgoAgent",
            "max_steps": "EvaluatorStepCount",
            "safe_dist_lon": {
                "type": "EvaluatorDynamicSafeDist",
                "params": safe_dist_params
            },
            "safe_dist_lat": {
                "type": "EvaluatorStaticSafeDist",
                "params": safe_dist_params
            },
            "test_evaluator": {
                "type": "TestPythonEvaluator",
                "params": test_python_params
            },
            "test_evaluator_serializable": TestPythonEvaluatorSerializable()
        }
        terminal_when = {
            "collision": lambda x: x,
            "max_steps": lambda x: x > 2,
            "safe_dist_lon": lambda x: x
        }
        params = ParameterServer(
        )  # only for evaluated agents not passed to scenario!
        behaviors_tested = {
            "IDM": BehaviorIDMClassic(params),
            "Const": BehaviorConstantAcceleration(params)
        }

        EvaluationConfig.AddEvaluationModule(
            "bark.benchmark.tests.test_evaluator")
        benchmark_runner = BenchmarkRunner(benchmark_database=db,
                                           evaluators=evaluators,
                                           log_eval_avg_every=1,
                                           terminal_when=terminal_when,
                                           behaviors=behaviors_tested)

        result = benchmark_runner.run()
        df = result.get_data_frame()
        print(df)
        self.assertEqual(
            len(df.index),
            2 * 2 * 2)  # 2 Behaviors * 2 Serialize Scenarios * 1 scenario sets
        result.load_benchmark_configs()
        groups = result.get_evaluation_groups()
        self.assertEqual(set(groups), set(["behavior", "scen_set"]))
コード例 #9
0
    def test_database_runner_checkpoint(self):
        dbs = DatabaseSerializer(test_scenarios=4, test_world_steps=5, num_serialize_scenarios=10)
        dbs.process("data/database1")
        local_release_filename = dbs.release(version="test")

        db = BenchmarkDatabase(database_root=local_release_filename)
        evaluators = {"success" : "EvaluatorGoalReached", "collision" : "EvaluatorCollisionEgoAgent",
                      "max_steps": "EvaluatorStepCount"}
        terminal_when = {"collision" :lambda x: x, "max_steps": lambda x : x>2}
        params = ParameterServer() # only for evaluated agents not passed to scenario!
        behaviors_tested = {"IDM": BehaviorIDMClassic(params), "Const" : BehaviorConstantAcceleration(params)}
                                        

        benchmark_runner = BenchmarkRunner(benchmark_database=db,
                                           evaluators=evaluators,
                                           terminal_when=terminal_when,
                                           behaviors=behaviors_tested,
                                           log_eval_avg_every=20,
                                           checkpoint_dir="checkpoints1/")

        # one run after 30 steps benchmark dumped
        result = benchmark_runner.run(checkpoint_every = 30)
        df = result.get_data_frame()
        print(df)
        self.assertEqual(len(df.index), 40) # 2 Behaviors * 10 Serialize Scenarios * 2 scenario sets
        # check twice first, merging from checkpoints
        merged_result = BenchmarkRunner.merge_checkpoint_benchmark_results(checkpoint_dir="checkpoints1/")
        df = merged_result.get_data_frame()
        self.assertEqual(len(df.index), 30)
        # second load merged results
        self.assertTrue(os.path.exists(os.path.join("checkpoints1/merged_results.ckpnt")))
        merged_result = BenchmarkRunner.merge_checkpoint_benchmark_results(checkpoint_dir="checkpoints1/")
        df = merged_result.get_data_frame()
        self.assertEqual(len(df.index), 30)

        configs_to_run = BenchmarkRunner.get_configs_to_run(benchmark_runner.configs_to_run, merged_result)
        self.assertEqual(len(configs_to_run), 10)

        benchmark_runner2 = BenchmarkRunner(benchmark_database=db,
                                           evaluators=evaluators,
                                           terminal_when=terminal_when,
                                           behaviors=behaviors_tested,
                                           log_eval_avg_every=1,
                                           checkpoint_dir="checkpoints1/",
                                           merge_existing=True)

        result = benchmark_runner2.run(checkpoint_every = 7)
        df = result.get_data_frame()
        print(df)
        self.assertEqual(len(df.index), 40) # 2 Behaviors * 10 Serialize Scenarios * 2 scenario sets

        # check if results maintained in existing result dump, 30 from previous run + 7 after new checkpoint
        merged_result = BenchmarkRunner.merge_checkpoint_benchmark_results(checkpoint_dir="checkpoints1/")
        df = merged_result.get_data_frame()
        self.assertEqual(len(df.index), 37)
コード例 #10
0
    def test_database_multiprocessing_runner_checkpoint(self):
        dbs = DatabaseSerializer(test_scenarios=1, test_world_steps=2, num_serialize_scenarios=10)
        dbs.process("data/database1")
        local_release_filename = dbs.release(version="test")

        db = BenchmarkDatabase(database_root=local_release_filename)
        evaluators = {"success" : "EvaluatorGoalReached", "collision" : "EvaluatorCollisionEgoAgent",
                      "max_steps": "EvaluatorStepCount"}
        terminal_when = {"collision" :lambda x: x, "max_steps": lambda x : x>2}
        params = ParameterServer() # only for evaluated agents not passed to scenario!
        behaviors_tested = {"IDM": BehaviorIDMClassic(params), "Const" : BehaviorConstantAcceleration(params)}
                                        

        benchmark_runner = BenchmarkRunnerMP(benchmark_database=db,
                                           evaluators=evaluators,
                                           terminal_when=terminal_when,
                                           behaviors=behaviors_tested,
                                           log_eval_avg_every=10,
                                           num_cpus=4,
                                           checkpoint_dir="checkpoints2/",
                                           merge_existing=False)
        benchmark_runner.clear_checkpoint_dir()
        # one run after 30 steps benchmark dumped
        result = benchmark_runner.run(checkpoint_every = 3)
        df = result.get_data_frame()
        print(df)
        self.assertEqual(len(df.index), 40) # 2 Behaviors * 10 Serialize Scenarios * 2 scenario sets

        merged_result = BenchmarkRunner.merge_checkpoint_benchmark_results(checkpoint_dir="checkpoints2/")
        df = merged_result.get_data_frame()
        self.assertEqual(len(df.index), 4*9)
       # self.assertEqual(len(merged_result.get_histories()), 4*9)
        self.assertEqual(len(merged_result.get_benchmark_configs()), 4*9)

        configs_to_run = BenchmarkRunner.get_configs_to_run(benchmark_runner.configs_to_run, merged_result)
        self.assertEqual(len(configs_to_run), 4)
        ray.shutdown()
        benchmark_runner2 = BenchmarkRunnerMP(benchmark_database=db,
                                           evaluators=evaluators,
                                           terminal_when=terminal_when,
                                           behaviors=behaviors_tested,
                                           log_eval_avg_every=1,
                                           checkpoint_dir="checkpoints2/",
                                           merge_existing=True)

        result = benchmark_runner2.run(checkpoint_every = 1)
        df = result.get_data_frame()
        print(df)
        self.assertEqual(len(df.index), 40) # 2 Behaviors * 10 Serialize Scenarios * 2 scenario sets

        # check if existing result is incorporated for mergin result
        merged_result = BenchmarkRunner.merge_checkpoint_benchmark_results(checkpoint_dir="checkpoints2/")
        df = merged_result.get_data_frame()
        self.assertEqual(len(df.index), 40)
コード例 #11
0
    def __init__(self, init_state, goal_polygon, map_interface, params):

        behavior_model = BehaviorConstantAcceleration(params)
        execution_model = ExecutionModelInterpolate(params)
        dynamic_model = SingleTrackModel(params)
        agent_2d_shape = CarLimousine()

        agent_params = params.AddChild("agent")
        super(TestAgent,
              self).__init__(init_state, behavior_model, dynamic_model,
                             execution_model, agent_2d_shape, agent_params,
                             GoalDefinitionPolygon(goal_polygon),
                             map_interface)
コード例 #12
0
    def setUp(self):
        param_server = ParameterServer()
        world = World(param_server)

        self.defaults = dict()
        self.defaults["world"] = world
        self.defaults["ego_behavior"] = BehaviorConstantAcceleration(
            param_server)
        self.defaults["ego_dynamic"] = SingleTrackModel(param_server)
        self.defaults["ego_execution"] = ExecutionModelInterpolate(
            param_server)
        self.defaults["ego_shape"] = CarLimousine()
        self.defaults["other_behavior"] = BehaviorConstantAcceleration(
            param_server)
        self.defaults["other_dynamic"] = SingleTrackModel(param_server)
        self.defaults["other_execution"] = ExecutionModelInterpolate(
            param_server)
        self.defaults["other_shape"] = CarLimousine()
        self.defaults["agent_params"] = param_server.addChild("agent")
        self.defaults["default_vehicle_dynamics"] = [
            1.7, -1.7, -1.69, -1.67, 0.2, -0.8, 0.1, 1.
        ]
コード例 #13
0
ファイル: py_agent_tests.py プロジェクト: weigaofei/bark
 def test_write_params_agent(self):
     params = ParameterServer()
     behavior = BehaviorConstantAcceleration(params)
     execution = ExecutionModelInterpolate(params)
     dynamic = SingleTrackModel(params)
     shape = Polygon2d([1.25, 1, 0], [
         Point2d(0, 0),
         Point2d(0, 2),
         Point2d(4, 2),
         Point2d(4, 0),
         Point2d(0, 0)
     ])
     init_state = np.zeros(4)
     agent = Agent(init_state, behavior, dynamic, execution, shape,
                   params.AddChild("agent"))
     params.Save("written_agents_param_test.json")
コード例 #14
0
    def test_database_multiprocessing_runner(self):
        dbs = DatabaseSerializer(test_scenarios=4,
                                 test_world_steps=5,
                                 num_serialize_scenarios=5)
        dbs.process("data/database1")
        local_release_filename = dbs.release(version="test")

        db = BenchmarkDatabase(database_root=local_release_filename)
        test_python_params = ParameterServer(log_if_default=True)
        evaluators = {
            "success": "EvaluatorGoalReached",
            "collision": "EvaluatorCollisionEgoAgent",
            "max_steps": "EvaluatorStepCount",
            "test_evaluator": {
                "type": "TestPythonEvaluator",
                "params": test_python_params
            }
        }
        terminal_when = {
            "collision": lambda x: x,
            "max_steps": lambda x: x > 2
        }
        params = ParameterServer(
        )  # only for evaluated agents not passed to scenario!
        behaviors_tested = {
            "IDM": BehaviorIDMClassic(params),
            "Const": BehaviorConstantAcceleration(params)
        }

        EvaluationConfig.AddEvaluationModule(
            "bark.benchmark.tests.test_evaluator")
        benchmark_runner = BenchmarkRunnerMP(benchmark_database=db,
                                             evaluators=evaluators,
                                             terminal_when=terminal_when,
                                             behaviors=behaviors_tested,
                                             log_eval_avg_every=10)
        benchmark_runner.clear_checkpoint_dir()
        result = benchmark_runner.run()

        df = result.get_data_frame()
        print(df)
        self.assertEqual(
            len(df.index),
            20)  # 2 Behaviors * 5 Serialize Scenarios * 2 scenario sets
コード例 #15
0
    def test_database_runner_checkpoint(self):
        dbs = DatabaseSerializer(test_scenarios=4,
                                 test_world_steps=5,
                                 num_serialize_scenarios=10)
        dbs.process("data/database1")
        local_release_filename = dbs.release(version="test")

        db = BenchmarkDatabase(database_root=local_release_filename)
        evaluators = {
            "success": "EvaluatorGoalReached",
            "collision": "EvaluatorCollisionEgoAgent",
            "max_steps": "EvaluatorStepCount"
        }
        terminal_when = {
            "collision": lambda x: x,
            "max_steps": lambda x: x > 2
        }
        params = ParameterServer(
        )  # only for evaluated agents not passed to scenario!
        behaviors_tested = {
            "IDM": BehaviorIDMClassic(params),
            "Const": BehaviorConstantAcceleration(params)
        }

        benchmark_runner = BenchmarkRunner(benchmark_database=db,
                                           evaluators=evaluators,
                                           terminal_when=terminal_when,
                                           behaviors=behaviors_tested,
                                           log_eval_avg_every=20,
                                           checkpoint_dir="checkpoints1/")
        benchmark_runner.clear_checkpoint_dir()
        # one run after 30 steps benchmark dumped
        result = benchmark_runner.run(checkpoint_every=30)
        df = result.get_data_frame()
        print(df)
        self.assertEqual(
            len(df.index),
            40)  # 2 Behaviors * 10 Serialize Scenarios * 2 scenario sets
        # check twice first, merging from checkpoints
        merged_result = BenchmarkRunner.merge_checkpoint_benchmark_results(
            checkpoint_dir="checkpoints1/")
        df = merged_result.get_data_frame()
        self.assertEqual(len(df.index), 40)
コード例 #16
0
ファイル: py_agent_tests.py プロジェクト: weigaofei/bark
    def test_draw_agents(self):
        params = ParameterServer()
        behavior = BehaviorConstantAcceleration(params)
        execution = ExecutionModelInterpolate(params)
        dynamic = SingleTrackModel(params)
        shape = Polygon2d([1.25, 1, 0], [
            Point2d(0, 0),
            Point2d(0, 2),
            Point2d(4, 2),
            Point2d(4, 0),
            Point2d(0, 0)
        ])
        shape2 = CarLimousine()

        init_state = [0, 3, 2, 1]
        init_state2 = [0, 0, 5, 4]

        agent = Agent(init_state, behavior, dynamic, execution, shape,
                      params.AddChild("agent"))
        agent2 = Agent(init_state2, behavior, dynamic, execution, shape2,
                       params.AddChild("agent"))
コード例 #17
0
ファイル: py_world_tests.py プロジェクト: mansoorcheema/bark
    def test_world(self):
        # create agent
        params = ParameterServer()
        behavior = BehaviorConstantAcceleration(params)
        execution = ExecutionModelInterpolate(params)
        dynamic = SingleTrackModel(params)
        shape = Polygon2d([1.25, 1, 0], [
            Point2d(0, 0),
            Point2d(0, 2),
            Point2d(4, 2),
            Point2d(4, 0),
            Point2d(0, 0)
        ])
        init_state = np.array([0, 0, 0, 0, 5])
        agent = Agent(init_state, behavior, dynamic, execution, shape,
                      params.AddChild("agent"))
        road_map = OpenDriveMap()
        newXodrRoad = XodrRoad()
        newXodrRoad.id = 1
        newXodrRoad.name = "Autobahn A9"
        newPlanView = PlanView()
        newPlanView.AddLine(Point2d(0, 0), 1.57079632679, 10)
        newXodrRoad.plan_view = newPlanView
        line = newXodrRoad.plan_view.GetReferenceLine().ToArray()
        p = Point2d(line[-1][0], line[-1][1])
        newXodrRoad.plan_view.AddSpiral(p, 1.57079632679, 50.0, 0.0, 0.3, 0.4)
        line = newXodrRoad.plan_view.GetReferenceLine()
        lane_section = XodrLaneSection(0)
        lane = XodrLane()
        lane.line = line
        lane_section.AddLane(lane)
        newXodrRoad.AddLaneSection(lane_section)
        road_map.AddRoad(newXodrRoad)

        r = Roadgraph()
        map_interface = MapInterface()
        map_interface.SetOpenDriveMap(road_map)
        map_interface.SetRoadgraph(r)
        world = World(params)
        world.AddAgent(agent)
コード例 #18
0
    def test_relevent_agents(self):
        map = "bark/runtime/tests/data/city_highway_straight.xodr"
        map_interface = EvaluatorRSSTests.load_map(map)
        world = self.defaults["world"].Copy()
        world.SetMap(map_interface)

        goal_polygon_1 = Polygon2d(
            [0, 0, 0],
            [Point2d(-1, -1),
             Point2d(-1, 1),
             Point2d(1, 1),
             Point2d(1, -1)])
        goal_polygon_1 = goal_polygon_1.Translate(Point2d(5.5, 120))

        goal_polygon_2 = Polygon2d(
            [0, 0, 0],
            [Point2d(-1, -1),
             Point2d(-1, 1),
             Point2d(1, 1),
             Point2d(1, -1)])
        goal_polygon_2 = goal_polygon_2.Translate(Point2d(1.8, 120))

        # Hard coded
        ego_state = np.array([0, 5.5, 10, 0, 10])
        other_1_state = np.array([0, 1.8, -10, 0, 15])
        other_2_state = np.array([0, 1.8, -120, 0, 10])

        ego = Agent(ego_state, self.defaults["ego_behavior"],
                    self.defaults["ego_dynamic"],
                    self.defaults["ego_execution"], self.defaults["ego_shape"],
                    self.defaults["agent_params"],
                    GoalDefinitionPolygon(goal_polygon_1), map_interface)
        other_1 = Agent(other_1_state, self.defaults["other_behavior"],
                        self.defaults["other_dynamic"],
                        self.defaults["other_execution"],
                        self.defaults["other_shape"],
                        self.defaults["agent_params"],
                        GoalDefinitionPolygon(goal_polygon_2), map_interface)

        other_2_behavior = BehaviorConstantAcceleration(
            self.defaults["agent_params"])
        other_2_dynamic = SingleTrackModel(self.defaults["agent_params"])
        other_2_execution = ExecutionModelInterpolate(
            self.defaults["agent_params"])

        other_2 = Agent(other_2_state, other_2_behavior, other_2_dynamic,
                        other_2_execution, self.defaults["other_shape"],
                        self.defaults["agent_params"],
                        GoalDefinitionPolygon(goal_polygon_2), map_interface)

        world.AddAgent(ego)
        world.AddAgent(other_1)
        world.AddAgent(other_2)

        evaluator_rss = EvaluatorRss(ego.id, map,
                                     self.defaults["default_vehicle_dynamics"])
        world.Step(1)
        responses = evaluator_rss.PairwiseEvaluate(world)

        self.assertEqual(1, len(responses))  # Test GetRelevantAgents
        self.assertTrue(responses[other_1.id])
        self.assertFalse(other_2.id in responses)
コード例 #19
0
    def test_database_run_and_analyze(self):
        dbs = DatabaseSerializer(test_scenarios=2, test_world_steps=3, num_serialize_scenarios=2)
        # to find database files
        cwd = os.getcwd()
        if not debug:
          os.chdir("../benchmark_database/")
        else:
          os.chdir("bazel-bin/bark/benchmark/tests/py_benchmark_process_tests.runfiles/benchmark_database")
        dbs.process("data/database1")
        local_release_filename = dbs.release(version="test")

        db = BenchmarkDatabase(database_root=local_release_filename)
        evaluators = {"success" : "EvaluatorGoalReached", "collision" : "EvaluatorCollisionEgoAgent",
                      "max_steps": "EvaluatorStepCount"}
        terminal_when = {"collision" :lambda x: x, "max_steps": lambda x : x>2}
        params = ParameterServer() # only for evaluated agents not passed to scenario!
        behaviors_tested = {"IDM": BehaviorIDMClassic(params), "Const" : BehaviorConstantAcceleration(params)}

        benchmark_runner = BenchmarkRunnerMP(benchmark_database=db,
                                           evaluators=evaluators,
                                           terminal_when=terminal_when,
                                           behaviors=behaviors_tested,
                                           log_eval_avg_every=2)

        result = benchmark_runner.run(maintain_history=True)

        result.dump(os.path.join("./benchmark_results"), dump_configs=True, \
                         dump_histories=True, max_mb_per_file=1)
        result_loaded = BenchmarkResult.load(os.path.join("./benchmark_results"))
        result_loaded.load_histories()
        result_loaded.load_benchmark_configs()

        params2 = ParameterServer()

        fig = plt.figure(figsize=[10, 10])
        viewer = MPViewer(
              params=params2,
              center=[5112, 5165],
              y_length = 120,
              enforce_y_length=True,
              axis = fig.gca())

        analyzer = BenchmarkAnalyzer(benchmark_result=result_loaded)
        configs = analyzer.find_configs(criteria={"behavior": lambda x: x=="IDM", "success": lambda x : not x})
        configs_const = analyzer.find_configs(criteria={"behavior": lambda x: x=="Const", "success": lambda x : not x})

        os.chdir(cwd)
        #analyzer.visualize(configs_idx_list = configs,
                         # viewer = viewer, real_time_factor=10, fontsize=12)
        plt.close(fig)

        
        fig, (ax1, ax2) = plt.subplots(1, 2)
        viewer1 = MPViewer(
              params=params2,
              center=[5112, 5165],
              y_length = 120,
              enforce_y_length=True,
              axis = ax1)
        viewer2 = MPViewer(
              params=params2,
              center=[5112, 5165],
              y_length = 120,
              enforce_y_length=True,
              axis = ax2)
        analyzer.visualize(configs_idx_list = [configs[1:3], configs_const[1:3]],
                          viewer = [viewer1, viewer2], viewer_names=["IDM", "ConstVelocity"], real_time_factor=10, fontsize=12)
コード例 #20
0
def run_configuration(argv):
  params = ParameterServer()
  # NOTE: Modify these paths to specify your preferred path for checkpoints and summaries
  # params["ML"]["BehaviorTFAAgents"]["CheckpointPath"] = "/Users/hart/Development/bark-ml/checkpoints/"
  # params["ML"]["TFARunner"]["SummaryPath"] = "/Users/hart/Development/bark-ml/checkpoints/"
  params["Visualization"]["Agents"]["Alpha"]["Other"] = 0.2
  params["Visualization"]["Agents"]["Alpha"]["Controlled"] = 0.2
  params["Visualization"]["Agents"]["Alpha"]["Controlled"] = 0.2
  params["ML"]["VisualizeCfWorlds"] = False
  params["ML"]["VisualizeCfHeatmap"] = False
  # params["ML"]["ResultsFolder"] = "/Users/hart/Development/bark-ml/results/data/"

  # viewer = MPViewer(
  #   params=params,
  #   x_range=[-35, 35],
  #   y_range=[-35, 35],
  #   follow_agent_id=True)


  # create environment
  bp = ContinuousMergingBlueprint(params,
                                  num_scenarios=2500,
                                  random_seed=0)

  observer = GraphObserver(params=params)

  behavior_model_pool = []
  for count, a in enumerate([-5., 0., 5.]):
    local_params = params.AddChild("local_"+str(count))
    local_params["BehaviorConstantAcceleration"]["ConstAcceleration"] = a
    behavior = BehaviorConstantAcceleration(local_params)
    behavior_model_pool.append(behavior)

  env = CounterfactualRuntime(
    blueprint=bp,
    observer=observer,
    render=False,
    params=params,
    behavior_model_pool=behavior_model_pool)
  sac_agent = BehaviorGraphSACAgent(environment=env,
                                    observer=observer,
                                    params=params)
  env.ml_behavior = sac_agent
  runner = SACRunner(params=params,
                     environment=env,
                     agent=sac_agent)

  if FLAGS.mode == "train":
    runner.SetupSummaryWriter()
    runner.Train()
  elif FLAGS.mode == "visualize":
    runner._environment._max_col_rate = 0.
    runner.Run(num_episodes=1, render=True)
  elif FLAGS.mode == "evaluate":
    for cr in np.arange(0, 1, 0.1):
      runner._environment._max_col_rate = cr
      runner.Run(num_episodes=250, render=False, max_col_rate=cr)
    runner._environment._tracer.Save(
      params["ML"]["ResultsFolder"] + "evaluation_results_runtime.pckl")
    goal_reached = runner._tracer.success_rate
    runner._tracer.Save(
      params["ML"]["ResultsFolder"] + "evaluation_results_runner.pckl")
コード例 #21
0
ファイル: py_evaluation_tests.py プロジェクト: valaxkong/bark
  def test_one_agent_at_goal_state_limits_frenet(self):
    param_server = ParameterServer()
    # Model Definition
    behavior_model = BehaviorConstantAcceleration(param_server)
    execution_model = ExecutionModelInterpolate(param_server)
    dynamic_model = SingleTrackModel(param_server)

    # Agent Definition
    agent_2d_shape = CarLimousine()
    agent_params = param_server.AddChild("agent1")

    center_line = Line2d()
    center_line.AddPoint(Point2d(5.0, 5.0))
    center_line.AddPoint(Point2d(10.0, 10.0))
    center_line.AddPoint(Point2d(20.0, 10.0))

    max_lateral_dist = (0.4,1)
    max_orientation_diff = (0.08, 0.1)
    velocity_range = (20.0, 25.0)
    goal_definition = GoalDefinitionStateLimitsFrenet(center_line,
                    max_lateral_dist, max_orientation_diff,
                    velocity_range)

    # not at goal x,y, others yes
    agent1 = Agent(np.array([0, 6, 8, 3.14/4.0 , velocity_range[0]]),
                behavior_model,
                dynamic_model,
                execution_model,
                agent_2d_shape,
                agent_params,
                goal_definition,
                  None)

    # at goal x,y and others
    agent2 = Agent(np.array([0, 5.0, 5.5, 3.14/4.0 , velocity_range[1]]),
                behavior_model,
                dynamic_model,
                execution_model,
                agent_2d_shape,
                agent_params,
                goal_definition,
                  None)

    # not at goal x,y,v yes but not orientation
    agent3 = Agent(np.array([0, 5, 5.5, 3.14/4.0+max_orientation_diff[1]+0.001 , 20]),
                behavior_model,
                dynamic_model,
                execution_model,
                agent_2d_shape,
                agent_params,
                goal_definition,
                  None)


    # not at goal x,y, orientation but not v
    agent4 = Agent(np.array([0, 5, 4.5, 3.14/4-max_orientation_diff[0], velocity_range[0]-0.01]),
                behavior_model,
                dynamic_model,
                execution_model,
                agent_2d_shape,
                agent_params,
                goal_definition,
                  None)

    # at goal x,y, at lateral limit 
    agent5 = Agent(np.array([0, 15, 10-max_lateral_dist[0]+0.05, 0, velocity_range[1]]),
                behavior_model,
                dynamic_model,
                execution_model,
                agent_2d_shape,
                agent_params,
                goal_definition,
                  None)

    # not at goal x,y slightly out of lateral limit 
    agent6 = Agent(np.array([0, 15, 10+max_lateral_dist[0]+0.05, 3.14/4+max_orientation_diff[0], velocity_range[0]]),
                behavior_model,
                dynamic_model,
                execution_model,
                agent_2d_shape,
                agent_params,
                goal_definition,
                  None)


    # not at goal x,y,v yes but not orientation
    agent7 = Agent(np.array([0, 5, 5.5, 3.14/4.0-max_orientation_diff[0]-0.001 , 20]),
                behavior_model,
                dynamic_model,
                execution_model,
                agent_2d_shape,
                agent_params,
                goal_definition,
                  None)


    world = World(param_server)
    world.AddAgent(agent1)
    world.AddAgent(agent2)
    world.AddAgent(agent3)
    world.AddAgent(agent4)
    world.AddAgent(agent5)
    world.AddAgent(agent6)
    world.AddAgent(agent7)

    evaluator1 = EvaluatorGoalReached(agent1.id)
    evaluator2 = EvaluatorGoalReached(agent2.id)
    evaluator3 = EvaluatorGoalReached(agent3.id)
    evaluator4 = EvaluatorGoalReached(agent4.id)
    evaluator5 = EvaluatorGoalReached(agent5.id)
    evaluator6 = EvaluatorGoalReached(agent6.id)
    evaluator7 = EvaluatorGoalReached(agent7.id)
    world.AddEvaluator("success1", evaluator1)
    world.AddEvaluator("success2", evaluator2)
    world.AddEvaluator("success3", evaluator3)
    world.AddEvaluator("success4", evaluator4)
    world.AddEvaluator("success5", evaluator5)
    world.AddEvaluator("success6", evaluator6)
    world.AddEvaluator("success7", evaluator7)


    info = world.Evaluate()
    self.assertEqual(info["success1"], False)
    self.assertEqual(info["success2"], True)
    self.assertEqual(info["success3"], False)
    self.assertEqual(info["success4"], False)
    self.assertEqual(info["success5"], True)
    self.assertEqual(info["success6"], False)
    self.assertEqual(info["success7"], False)
コード例 #22
0
ファイル: py_world_tests.py プロジェクト: mansoorcheema/bark
    def test_evaluator_drivable_area(self):
        # World Definition
        params = ParameterServer()
        world = World(params)

        # Model Definitions
        behavior_model = BehaviorConstantAcceleration(params)
        execution_model = ExecutionModelInterpolate(params)
        dynamic_model = SingleTrackModel(params)

        # Map Definition
        map_interface = MapInterface()
        xodr_map = MakeXodrMapOneRoadTwoLanes()
        map_interface.SetOpenDriveMap(xodr_map)
        world.SetMap(map_interface)
        #open_drive_map = world.map.GetOpenDriveMap()

        #agent_2d_shape = CarLimousine()
        agent_2d_shape = Polygon2d(
            [1.25, 1, 0],
            [Point2d(-1, -1),
             Point2d(-1, 1),
             Point2d(3, 1),
             Point2d(3, -1)])
        init_state = np.array([0, 3, -1.75, 0, 5])
        agent_params = params.AddChild("agent1")
        goal_polygon = Polygon2d(
            [1, 1, 0],
            [Point2d(0, 0),
             Point2d(0, 2),
             Point2d(2, 2),
             Point2d(2, 0)])
        goal_polygon = goal_polygon.Translate(Point2d(50, -2))

        agent = Agent(
            init_state,
            behavior_model,
            dynamic_model,
            execution_model,
            agent_2d_shape,
            agent_params,
            GoalDefinitionPolygon(goal_polygon),  # goal_lane_id
            map_interface)
        world.AddAgent(agent)

        evaluator = EvaluatorDrivableArea()
        world.AddEvaluator("drivable_area", evaluator)

        info = world.Evaluate()
        self.assertFalse(info["drivable_area"])

        viewer = MPViewer(params=params, use_world_bounds=True)

        # Draw map
        viewer.drawGoalDefinition(goal_polygon,
                                  color=(1, 0, 0),
                                  alpha=0.5,
                                  facecolor=(1, 0, 0))
        viewer.drawWorld(world)
        viewer.drawRoadCorridor(agent.road_corridor)
        viewer.show(block=False)