コード例 #1
0
    def test_one_agent_at_goal_state_limits(self):
        param_server = ParameterServer()
        # Model Definition
        behavior_model = BehaviorConstantVelocity(param_server)
        execution_model = ExecutionModelInterpolate(param_server)
        dynamic_model = SingleTrackModel(param_server)

        # Agent Definition
        agent_2d_shape = CarLimousine()
        init_state = np.array(
            [0, -191.789, -50.1725, 3.14 * 3.0 / 4.0, 150 / 3.6])
        agent_params = param_server.AddChild("agent1")
        goal_polygon = Polygon2d(
            [0, 0, 0],
            [Point2d(-1, -1),
             Point2d(-1, 1),
             Point2d(1, 1),
             Point2d(1, -1)])
        goal_polygon = goal_polygon.Translate(Point2d(-191.789, -50.1725))

        agent = Agent(
            init_state, behavior_model, dynamic_model, execution_model,
            agent_2d_shape, agent_params,
            GoalDefinitionStateLimits(
                goal_polygon,
                (3.14 * 3.0 / 4.0 - 0.08, 3.14 * 3.0 / 4.0 + 0.08)), None)

        world = World(param_server)
        world.AddAgent(agent)
        evaluator = EvaluatorGoalReached(agent.id)
        world.AddEvaluator("success", evaluator)

        info = world.Evaluate()
        self.assertEqual(info["success"], True)
コード例 #2
0
    def test_database_multiprocessing_runner(self):
        dbs = DatabaseSerializer(test_scenarios=4,
                                 test_world_steps=5,
                                 num_serialize_scenarios=10)
        dbs.process("data/database1")
        local_release_filename = dbs.release(version="test")

        db = BenchmarkDatabase(database_root=local_release_filename)
        evaluators = {
            "success": "EvaluatorGoalReached",
            "collision": "EvaluatorCollisionEgoAgent",
            "max_steps": "EvaluatorStepCount"
        }
        terminal_when = {
            "collision": lambda x: x,
            "max_steps": lambda x: x > 5
        }
        params = ParameterServer(
        )  # only for evaluated agents not passed to scenario!
        behaviors_tested = {
            "IDM": BehaviorIDMClassic(params),
            "Const": BehaviorConstantVelocity(params)
        }

        benchmark_runner = BenchmarkRunnerMP(benchmark_database=db,
                                             evaluators=evaluators,
                                             terminal_when=terminal_when,
                                             behaviors=behaviors_tested,
                                             log_eval_avg_every=10)

        result = benchmark_runner.run()

        params2 = ParameterServer()
        viewer = MPViewer(params=params2,
                          x_range=[5060, 5160],
                          y_range=[5070, 5150],
                          use_world_bounds=True)
        rst, _ = benchmark_runner.run_benchmark_config(10, viewer=viewer)

        rst = benchmark_runner.run(maintain_history=True)
        self.assertEqual(len(rst.get_histories()), 40)

        rst, scenario_history = benchmark_runner.run_benchmark_config(
            11, viewer=None, maintain_history=True)
        print(scenario_history)
        viewer = MPViewer(params=params2,
                          x_range=[5060, 5160],
                          y_range=[5070, 5150],
                          use_world_bounds=True)
        viewer.drawWorld(world=scenario_history[5].GetWorldState(),
                         eval_agent_ids=scenario_history[5].eval_agent_ids)

        viewer.show(block=True)

        df = result.get_data_frame()
        print(df)
        self.assertEqual(
            len(df.index),
            40)  # 2 Behaviors * 10 Serialize Scenarios * 2 scenario sets
コード例 #3
0
ファイル: py_agent_tests.py プロジェクト: zeta1999/bark
 def test_write_params_agent(self):
   params = ParameterServer()
   behavior = BehaviorConstantVelocity(params)
   execution = ExecutionModelInterpolate(params)
   dynamic = SingleTrackModel(params)
   shape = Polygon2d([1.25, 1, 0], [
       Point2d(0, 0),
       Point2d(0, 2),
       Point2d(4, 2),
       Point2d(4, 0),
       Point2d(0, 0)
   ])
   init_state = np.zeros(4)
   agent = Agent(init_state, behavior, dynamic, execution, shape,
                 params.AddChild("agent"))
   params.Save("written_agents_param_test.json")
コード例 #4
0
ファイル: py_agent_tests.py プロジェクト: zeta1999/bark
  def test_draw_agents(self):
    params = ParameterServer()
    behavior = BehaviorConstantVelocity(params)
    execution = ExecutionModelInterpolate(params)
    dynamic = SingleTrackModel(params)
    shape = Polygon2d([1.25, 1, 0], [
        Point2d(0, 0),
        Point2d(0, 2),
        Point2d(4, 2),
        Point2d(4, 0),
        Point2d(0, 0)
    ])
    shape2 = CarLimousine()

    init_state = [0, 3, 2, 1]
    init_state2 = [0, 0, 5, 4]

    agent = Agent(init_state, behavior, dynamic, execution, shape,
                  params.AddChild("agent"))
    agent2 = Agent(init_state2, behavior, dynamic, execution, shape2,
                    params.AddChild("agent"))
コード例 #5
0
ファイル: py_world_tests.py プロジェクト: zeta1999/bark
    def test_world(self):
        # create agent
        params = ParameterServer()
        behavior = BehaviorConstantVelocity(params)
        execution = ExecutionModelInterpolate(params)
        dynamic = SingleTrackModel(params)
        shape = Polygon2d([1.25, 1, 0], [
            Point2d(0, 0),
            Point2d(0, 2),
            Point2d(4, 2),
            Point2d(4, 0),
            Point2d(0, 0)
        ])
        init_state = np.array([0, 0, 0, 0, 5])
        agent = Agent(init_state, behavior, dynamic, execution, shape,
                      params.AddChild("agent"))
        road_map = OpenDriveMap()
        newXodrRoad = XodrRoad()
        newXodrRoad.id = 1
        newXodrRoad.name = "Autobahn A9"
        newPlanView = PlanView()
        newPlanView.AddLine(Point2d(0, 0), 1.57079632679, 10)
        newXodrRoad.plan_view = newPlanView
        line = newXodrRoad.plan_view.GetReferenceLine().ToArray()
        p = Point2d(line[-1][0], line[-1][1])
        newXodrRoad.plan_view.AddSpiral(p, 1.57079632679, 50.0, 0.0, 0.3, 0.4)
        line = newXodrRoad.plan_view.GetReferenceLine()
        lane_section = XodrLaneSection(0)
        lane = XodrLane()
        lane.line = line
        lane_section.AddLane(lane)
        newXodrRoad.AddLaneSection(lane_section)
        road_map.AddRoad(newXodrRoad)

        r = Roadgraph()
        map_interface = MapInterface()
        map_interface.SetOpenDriveMap(road_map)
        map_interface.SetRoadgraph(r)
        world = World(params)
        world.AddAgent(agent)
コード例 #6
0
    def test_database_runner(self):
        dbs = DatabaseSerializer(test_scenarios=4,
                                 test_world_steps=5,
                                 num_serialize_scenarios=2)
        dbs.process("data/database1")
        local_release_filename = dbs.release(version="test")

        db = BenchmarkDatabase(database_root=local_release_filename)
        evaluators = {
            "success": "EvaluatorGoalReached",
            "collision": "EvaluatorCollisionEgoAgent",
            "max_steps": "EvaluatorStepCount"
        }
        terminal_when = {
            "collision": lambda x: x,
            "max_steps": lambda x: x > 2
        }
        params = ParameterServer(
        )  # only for evaluated agents not passed to scenario!
        behaviors_tested = {
            "IDM": BehaviorIDMClassic(params),
            "Const": BehaviorConstantVelocity(params)
        }

        benchmark_runner = BenchmarkRunner(benchmark_database=db,
                                           evaluators=evaluators,
                                           terminal_when=terminal_when,
                                           behaviors=behaviors_tested,
                                           log_eval_avg_every=5)

        result = benchmark_runner.run()
        df = result.get_data_frame()
        print(df)
        self.assertEqual(
            len(df.index),
            2 * 2 * 2)  # 2 Behaviors * 2 Serialize Scenarios * 1 scenario sets

        groups = result.get_evaluation_groups()
        self.assertEqual(set(groups), set(["behavior", "scen_set"]))
コード例 #7
0
    def test_one_agent_at_goal_state_limits_frenet(self):
        param_server = ParameterServer()
        # Model Definition
        behavior_model = BehaviorConstantVelocity(param_server)
        execution_model = ExecutionModelInterpolate(param_server)
        dynamic_model = SingleTrackModel(param_server)

        # Agent Definition
        agent_2d_shape = CarLimousine()
        agent_params = param_server.AddChild("agent1")

        center_line = Line2d()
        center_line.AddPoint(Point2d(5.0, 5.0))
        center_line.AddPoint(Point2d(10.0, 10.0))
        center_line.AddPoint(Point2d(20.0, 10.0))

        max_lateral_dist = (0.4, 1)
        max_orientation_diff = (0.08, 0.1)
        velocity_range = (20.0, 25.0)
        goal_definition = GoalDefinitionStateLimitsFrenet(
            center_line, max_lateral_dist, max_orientation_diff,
            velocity_range)

        # not at goal x,y, others yes
        agent1 = Agent(np.array([0, 6, 8, 3.14 / 4.0, velocity_range[0]]),
                       behavior_model, dynamic_model, execution_model,
                       agent_2d_shape, agent_params, goal_definition, None)

        # at goal x,y and others
        agent2 = Agent(np.array([0, 5.0, 5.5, 3.14 / 4.0, velocity_range[1]]),
                       behavior_model, dynamic_model, execution_model,
                       agent_2d_shape, agent_params, goal_definition, None)

        # not at goal x,y,v yes but not orientation
        agent3 = Agent(
            np.array(
                [0, 5, 5.5, 3.14 / 4.0 + max_orientation_diff[1] + 0.001,
                 20]), behavior_model, dynamic_model, execution_model,
            agent_2d_shape, agent_params, goal_definition, None)

        # not at goal x,y, orientation but not v
        agent4 = Agent(
            np.array([
                0, 5, 4.5, 3.14 / 4 - max_orientation_diff[0],
                velocity_range[0] - 0.01
            ]), behavior_model, dynamic_model, execution_model, agent_2d_shape,
            agent_params, goal_definition, None)

        # at goal x,y, at lateral limit
        agent5 = Agent(
            np.array([
                0, 15, 10 - max_lateral_dist[0] + 0.05, 0, velocity_range[1]
            ]), behavior_model, dynamic_model, execution_model, agent_2d_shape,
            agent_params, goal_definition, None)

        # not at goal x,y slightly out of lateral limit
        agent6 = Agent(
            np.array([
                0, 15, 10 + max_lateral_dist[0] + 0.05,
                3.14 / 4 + max_orientation_diff[0], velocity_range[0]
            ]), behavior_model, dynamic_model, execution_model, agent_2d_shape,
            agent_params, goal_definition, None)

        # not at goal x,y,v yes but not orientation
        agent7 = Agent(
            np.array(
                [0, 5, 5.5, 3.14 / 4.0 - max_orientation_diff[0] - 0.001,
                 20]), behavior_model, dynamic_model, execution_model,
            agent_2d_shape, agent_params, goal_definition, None)

        world = World(param_server)
        world.AddAgent(agent1)
        world.AddAgent(agent2)
        world.AddAgent(agent3)
        world.AddAgent(agent4)
        world.AddAgent(agent5)
        world.AddAgent(agent6)
        world.AddAgent(agent7)

        evaluator1 = EvaluatorGoalReached(agent1.id)
        evaluator2 = EvaluatorGoalReached(agent2.id)
        evaluator3 = EvaluatorGoalReached(agent3.id)
        evaluator4 = EvaluatorGoalReached(agent4.id)
        evaluator5 = EvaluatorGoalReached(agent5.id)
        evaluator6 = EvaluatorGoalReached(agent6.id)
        evaluator7 = EvaluatorGoalReached(agent7.id)
        world.AddEvaluator("success1", evaluator1)
        world.AddEvaluator("success2", evaluator2)
        world.AddEvaluator("success3", evaluator3)
        world.AddEvaluator("success4", evaluator4)
        world.AddEvaluator("success5", evaluator5)
        world.AddEvaluator("success6", evaluator6)
        world.AddEvaluator("success7", evaluator7)

        info = world.Evaluate()
        self.assertEqual(info["success1"], False)
        self.assertEqual(info["success2"], True)
        self.assertEqual(info["success3"], False)
        self.assertEqual(info["success4"], False)
        self.assertEqual(info["success5"], True)
        self.assertEqual(info["success6"], False)
        self.assertEqual(info["success7"], False)
コード例 #8
0
    def test_database_runner_checkpoint(self):
        dbs = DatabaseSerializer(test_scenarios=4,
                                 test_world_steps=5,
                                 num_serialize_scenarios=10)
        dbs.process("data/database1")
        local_release_filename = dbs.release(version="test")

        db = BenchmarkDatabase(database_root=local_release_filename)
        evaluators = {
            "success": "EvaluatorGoalReached",
            "collision": "EvaluatorCollisionEgoAgent",
            "max_steps": "EvaluatorStepCount"
        }
        terminal_when = {
            "collision": lambda x: x,
            "max_steps": lambda x: x > 2
        }
        params = ParameterServer(
        )  # only for evaluated agents not passed to scenario!
        behaviors_tested = {
            "IDM": BehaviorIDMClassic(params),
            "Const": BehaviorConstantVelocity(params)
        }

        benchmark_runner = BenchmarkRunner(benchmark_database=db,
                                           evaluators=evaluators,
                                           terminal_when=terminal_when,
                                           behaviors=behaviors_tested,
                                           log_eval_avg_every=20,
                                           checkpoint_dir="checkpoints1/")

        # one run after 30 steps benchmark dumped
        result = benchmark_runner.run(checkpoint_every=30)
        df = result.get_data_frame()
        print(df)
        self.assertEqual(
            len(df.index),
            40)  # 2 Behaviors * 10 Serialize Scenarios * 2 scenario sets
        # check twice first, merging from checkpoints
        merged_result = BenchmarkRunner.merge_checkpoint_benchmark_results(
            checkpoint_dir="checkpoints1/")
        df = merged_result.get_data_frame()
        self.assertEqual(len(df.index), 30)
        # second load merged results
        self.assertTrue(
            os.path.exists(os.path.join("checkpoints1/merged_results.ckpnt")))
        merged_result = BenchmarkRunner.merge_checkpoint_benchmark_results(
            checkpoint_dir="checkpoints1/")
        df = merged_result.get_data_frame()
        self.assertEqual(len(df.index), 30)

        configs_to_run = BenchmarkRunner.get_configs_to_run(
            benchmark_runner.configs_to_run, merged_result)
        self.assertEqual(len(configs_to_run), 10)

        benchmark_runner2 = BenchmarkRunner(benchmark_database=db,
                                            evaluators=evaluators,
                                            terminal_when=terminal_when,
                                            behaviors=behaviors_tested,
                                            log_eval_avg_every=1,
                                            checkpoint_dir="checkpoints1/",
                                            merge_existing=True)

        result = benchmark_runner2.run(checkpoint_every=7)
        df = result.get_data_frame()
        print(df)
        self.assertEqual(
            len(df.index),
            40)  # 2 Behaviors * 10 Serialize Scenarios * 2 scenario sets

        # check if results maintained in existing result dump, 30 from previous run + 7 after new checkpoint
        merged_result = BenchmarkRunner.merge_checkpoint_benchmark_results(
            checkpoint_dir="checkpoints1/")
        df = merged_result.get_data_frame()
        self.assertEqual(len(df.index), 37)
コード例 #9
0
    def test_database_multiprocessing_runner_checkpoint(self):
        dbs = DatabaseSerializer(test_scenarios=1,
                                 test_world_steps=2,
                                 num_serialize_scenarios=10)
        dbs.process("data/database1")
        local_release_filename = dbs.release(version="test")

        db = BenchmarkDatabase(database_root=local_release_filename)
        evaluators = {
            "success": "EvaluatorGoalReached",
            "collision": "EvaluatorCollisionEgoAgent",
            "max_steps": "EvaluatorStepCount"
        }
        terminal_when = {
            "collision": lambda x: x,
            "max_steps": lambda x: x > 2
        }
        params = ParameterServer(
        )  # only for evaluated agents not passed to scenario!
        behaviors_tested = {
            "IDM": BehaviorIDMClassic(params),
            "Const": BehaviorConstantVelocity(params)
        }

        benchmark_runner = BenchmarkRunnerMP(benchmark_database=db,
                                             evaluators=evaluators,
                                             terminal_when=terminal_when,
                                             behaviors=behaviors_tested,
                                             log_eval_avg_every=10,
                                             num_cpus=4,
                                             checkpoint_dir="checkpoints2/",
                                             merge_existing=False)
        benchmark_runner.clear_checkpoint_dir()
        # one run after 30 steps benchmark dumped
        result = benchmark_runner.run(checkpoint_every=3)
        df = result.get_data_frame()
        print(df)
        self.assertEqual(
            len(df.index),
            40)  # 2 Behaviors * 10 Serialize Scenarios * 2 scenario sets

        merged_result = BenchmarkRunner.merge_checkpoint_benchmark_results(
            checkpoint_dir="checkpoints2/")
        df = merged_result.get_data_frame()
        self.assertEqual(len(df.index), 4 * 9)
        # self.assertEqual(len(merged_result.get_histories()), 4*9)
        self.assertEqual(len(merged_result.get_benchmark_configs()), 4 * 9)

        configs_to_run = BenchmarkRunner.get_configs_to_run(
            benchmark_runner.configs_to_run, merged_result)
        self.assertEqual(len(configs_to_run), 4)
        ray.shutdown()
        benchmark_runner2 = BenchmarkRunnerMP(benchmark_database=db,
                                              evaluators=evaluators,
                                              terminal_when=terminal_when,
                                              behaviors=behaviors_tested,
                                              log_eval_avg_every=1,
                                              checkpoint_dir="checkpoints2/",
                                              merge_existing=True)

        result = benchmark_runner2.run(checkpoint_every=1)
        df = result.get_data_frame()
        print(df)
        self.assertEqual(
            len(df.index),
            40)  # 2 Behaviors * 10 Serialize Scenarios * 2 scenario sets

        # check if existing result is incorporated for mergin result
        merged_result = BenchmarkRunner.merge_checkpoint_benchmark_results(
            checkpoint_dir="checkpoints2/")
        df = merged_result.get_data_frame()
        self.assertEqual(len(df.index), 40)
コード例 #10
0
ファイル: py_world_tests.py プロジェクト: zeta1999/bark
    def test_evaluator_drivable_area(self):
        # World Definition
        params = ParameterServer()
        world = World(params)

        # Model Definitions
        behavior_model = BehaviorConstantVelocity(params)
        execution_model = ExecutionModelInterpolate(params)
        dynamic_model = SingleTrackModel(params)

        # Map Definition
        map_interface = MapInterface()
        xodr_map = MakeXodrMapOneRoadTwoLanes()
        map_interface.SetOpenDriveMap(xodr_map)
        world.SetMap(map_interface)
        #open_drive_map = world.map.GetOpenDriveMap()

        #agent_2d_shape = CarLimousine()
        agent_2d_shape = Polygon2d(
            [1.25, 1, 0],
            [Point2d(-1, -1),
             Point2d(-1, 1),
             Point2d(3, 1),
             Point2d(3, -1)])
        init_state = np.array([0, 3, -1.75, 0, 5])
        agent_params = params.AddChild("agent1")
        goal_polygon = Polygon2d(
            [1, 1, 0],
            [Point2d(0, 0),
             Point2d(0, 2),
             Point2d(2, 2),
             Point2d(2, 0)])
        goal_polygon = goal_polygon.Translate(Point2d(50, -2))

        agent = Agent(
            init_state,
            behavior_model,
            dynamic_model,
            execution_model,
            agent_2d_shape,
            agent_params,
            GoalDefinitionPolygon(goal_polygon),  # goal_lane_id
            map_interface)
        world.AddAgent(agent)

        evaluator = EvaluatorDrivableArea()
        world.AddEvaluator("drivable_area", evaluator)

        info = world.Evaluate()
        self.assertFalse(info["drivable_area"])

        viewer = MPViewer(params=params, use_world_bounds=True)

        # Draw map
        viewer.drawGoalDefinition(goal_polygon,
                                  color=(1, 0, 0),
                                  alpha=0.5,
                                  facecolor=(1, 0, 0))
        viewer.drawWorld(world)
        viewer.drawRoadCorridor(agent.road_corridor)
        viewer.show(block=False)
コード例 #11
0
    def test_database_run_and_analyze(self):
        dbs = DatabaseSerializer(test_scenarios=2,
                                 test_world_steps=3,
                                 num_serialize_scenarios=2)
        # to find database files
        cwd = os.getcwd()
        os.chdir("../benchmark_database/")
        dbs.process("data/database1")
        local_release_filename = dbs.release(version="test")

        db = BenchmarkDatabase(database_root=local_release_filename)
        evaluators = {
            "success": "EvaluatorGoalReached",
            "collision": "EvaluatorCollisionEgoAgent",
            "max_steps": "EvaluatorStepCount"
        }
        terminal_when = {
            "collision": lambda x: x,
            "max_steps": lambda x: x > 2
        }
        params = ParameterServer(
        )  # only for evaluated agents not passed to scenario!
        behaviors_tested = {
            "IDM": BehaviorIDMClassic(params),
            "Const": BehaviorConstantVelocity(params)
        }

        benchmark_runner = BenchmarkRunnerMP(benchmark_database=db,
                                             evaluators=evaluators,
                                             terminal_when=terminal_when,
                                             behaviors=behaviors_tested,
                                             log_eval_avg_every=2)

        result = benchmark_runner.run(maintain_history=True)

        result.dump(os.path.join("./benchmark_results"), dump_configs=True, \
                         dump_histories=True, max_mb_per_file=1)
        result_loaded = BenchmarkResult.load(
            os.path.join("./benchmark_results"))
        result_loaded.load_histories()
        result_loaded.load_benchmark_configs()

        params2 = ParameterServer()

        fig = plt.figure(figsize=[10, 10])
        viewer = MPViewer(params=params2,
                          center=[5112, 5165],
                          y_length=120,
                          enforce_y_length=True,
                          axis=fig.gca())

        analyzer = BenchmarkAnalyzer(benchmark_result=result_loaded)
        configs = analyzer.find_configs(criteria={
            "behavior": lambda x: x == "IDM",
            "success": lambda x: not x
        })
        configs_const = analyzer.find_configs(criteria={
            "behavior": lambda x: x == "Const",
            "success": lambda x: not x
        })

        os.chdir(cwd)
        #analyzer.visualize(configs_idx_list = configs,
        # viewer = viewer, real_time_factor=10, fontsize=12)
        plt.close(fig)

        fig, (ax1, ax2) = plt.subplots(1, 2)
        viewer1 = MPViewer(params=params2,
                           center=[5112, 5165],
                           y_length=120,
                           enforce_y_length=True,
                           axis=ax1)
        viewer2 = MPViewer(params=params2,
                           center=[5112, 5165],
                           y_length=120,
                           enforce_y_length=True,
                           axis=ax2)
        analyzer.visualize(configs_idx_list=[configs[1:3], configs_const[1:3]],
                           viewer=[viewer1, viewer2],
                           viewer_names=["IDM", "ConstVelocity"],
                           real_time_factor=10,
                           fontsize=12)