Пример #1
0
    def test_database_runner(self):
        dbs = DatabaseSerializer(test_scenarios=4, test_world_steps=5, num_serialize_scenarios=2)
        dbs.process("data/database1")
        local_release_filename = dbs.release(version="test")

        db = BenchmarkDatabase(database_root=local_release_filename)
        evaluators = {"success" : "EvaluatorGoalReached", "collision" : "EvaluatorCollisionEgoAgent",
                      "max_steps": "EvaluatorStepCount"}
        terminal_when = {"collision" :lambda x: x, "max_steps": lambda x : x>2}
        params = ParameterServer() # only for evaluated agents not passed to scenario!
        behaviors_tested = {"IDM": BehaviorIDMClassic(params), "Const" : BehaviorConstantAcceleration(params)}
                                        

        benchmark_runner = BenchmarkRunner(benchmark_database=db,
                                           evaluators=evaluators,
                                           terminal_when=terminal_when,
                                           behaviors=behaviors_tested,
                                           log_eval_avg_every=5)

        result = benchmark_runner.run()
        df = result.get_data_frame()
        print(df)
        self.assertEqual(len(df.index), 2*2*2) # 2 Behaviors * 2 Serialize Scenarios * 1 scenario sets

        groups = result.get_evaluation_groups()
        self.assertEqual(set(groups), set(["behavior", "scen_set"]))
Пример #2
0
    def test_database_multiprocessing_history(self):
        dbs = DatabaseSerializer(test_scenarios=4, test_world_steps=5, num_serialize_scenarios=2)
        dbs.process("data/database1")
        local_release_filename = dbs.release(version="test")

        db = BenchmarkDatabase(database_root=local_release_filename)
        evaluators = {"success" : "EvaluatorGoalReached", "collision" : "EvaluatorCollisionEgoAgent",
                      "max_steps": "EvaluatorStepCount"}
        terminal_when = {"collision" :lambda x: x, "max_steps": lambda x : x>2}
        params = ParameterServer() # only for evaluated agents not passed to scenario!
        behaviors_tested = {"IDM": BehaviorIDMClassic(params), "Const" : BehaviorConstantAcceleration(params)}

        benchmark_runner = BenchmarkRunnerMP(benchmark_database=db,
                                           evaluators=evaluators,
                                           terminal_when=terminal_when,
                                           behaviors=behaviors_tested,
                                           log_eval_avg_every=10)
        rst = benchmark_runner.run(maintain_history=True)
        self.assertEqual(len(rst.get_histories()), 2*2*2)

        rst = benchmark_runner.run_benchmark_config(3, viewer=None, maintain_history=True)
        scenario_history = rst.get_histories()[3]
        print(scenario_history)
        params = ParameterServer()
        viewer = MPViewer(
              params=params,
              x_range=[5060, 5160],
              y_range=[5070,5150],
              use_world_bounds=True)
        viewer.drawWorld(world=scenario_history[1].GetWorldState(),
                          eval_agent_ids=scenario_history[1].eval_agent_ids)

        viewer.show(block=True)
Пример #3
0
    def test_database_multiprocessing_runner(self):
        dbs = DatabaseSerializer(test_scenarios=4, test_world_steps=5, num_serialize_scenarios=5)
        dbs.process("data/database1")
        local_release_filename = dbs.release(version="test")

        db = BenchmarkDatabase(database_root=local_release_filename)
        evaluators = {"success" : "EvaluatorGoalReached", "collision" : "EvaluatorCollisionEgoAgent",
                      "max_steps": "EvaluatorStepCount"}
        terminal_when = {"collision" :lambda x: x, "max_steps": lambda x : x>2}
        params = ParameterServer() # only for evaluated agents not passed to scenario!
        behaviors_tested = {"IDM": BehaviorIDMClassic(params), "Const" : BehaviorConstantAcceleration(params)}

        benchmark_runner = BenchmarkRunnerMP(benchmark_database=db,
                                           evaluators=evaluators,
                                           terminal_when=terminal_when,
                                           behaviors=behaviors_tested,
                                           log_eval_avg_every=10)

        result = benchmark_runner.run()

        df = result.get_data_frame()
        print(df)
        self.assertEqual(len(df.index), 20) # 2 Behaviors * 5 Serialize Scenarios * 2 scenario sets

        params2 = ParameterServer()
        viewer = MPViewer(
              params=params2,
              x_range=[5060, 5160],
              y_range=[5070,5150],
              use_world_bounds=True)
        rst  = benchmark_runner.run_benchmark_config(10, viewer=viewer)
    def test_database_runner(self):
        dbs = DatabaseSerializer(test_scenarios=4,
                                 test_world_steps=5,
                                 num_serialize_scenarios=2)
        dbs.process("data/database1")
        local_release_filename = dbs.release(version="test")

        db = BenchmarkDatabase(database_root=local_release_filename)
        safe_dist_params = ParameterServer(log_if_default=True)
        test_python_params = ParameterServer(log_if_default=True)
        evaluators = {
            "success": "EvaluatorGoalReached",
            "collision": "EvaluatorCollisionEgoAgent",
            "max_steps": "EvaluatorStepCount",
            "safe_dist_lon": {
                "type": "EvaluatorDynamicSafeDist",
                "params": safe_dist_params
            },
            "safe_dist_lat": {
                "type": "EvaluatorStaticSafeDist",
                "params": safe_dist_params
            },
            "test_evaluator": {
                "type": "TestPythonEvaluator",
                "params": test_python_params
            },
            "test_evaluator_serializable": TestPythonEvaluatorSerializable()
        }
        terminal_when = {
            "collision": lambda x: x,
            "max_steps": lambda x: x > 2,
            "safe_dist_lon": lambda x: x
        }
        params = ParameterServer(
        )  # only for evaluated agents not passed to scenario!
        behaviors_tested = {
            "IDM": BehaviorIDMClassic(params),
            "Const": BehaviorConstantAcceleration(params)
        }

        EvaluationConfig.AddEvaluationModule(
            "bark.benchmark.tests.test_evaluator")
        benchmark_runner = BenchmarkRunner(benchmark_database=db,
                                           evaluators=evaluators,
                                           log_eval_avg_every=1,
                                           terminal_when=terminal_when,
                                           behaviors=behaviors_tested)

        result = benchmark_runner.run()
        df = result.get_data_frame()
        print(df)
        self.assertEqual(
            len(df.index),
            2 * 2 * 2)  # 2 Behaviors * 2 Serialize Scenarios * 1 scenario sets
        result.load_benchmark_configs()
        groups = result.get_evaluation_groups()
        self.assertEqual(set(groups), set(["behavior", "scen_set"]))
Пример #5
0
    def test_database_runner_checkpoint(self):
        dbs = DatabaseSerializer(test_scenarios=4, test_world_steps=5, num_serialize_scenarios=10)
        dbs.process("data/database1")
        local_release_filename = dbs.release(version="test")

        db = BenchmarkDatabase(database_root=local_release_filename)
        evaluators = {"success" : "EvaluatorGoalReached", "collision" : "EvaluatorCollisionEgoAgent",
                      "max_steps": "EvaluatorStepCount"}
        terminal_when = {"collision" :lambda x: x, "max_steps": lambda x : x>2}
        params = ParameterServer() # only for evaluated agents not passed to scenario!
        behaviors_tested = {"IDM": BehaviorIDMClassic(params), "Const" : BehaviorConstantAcceleration(params)}
                                        

        benchmark_runner = BenchmarkRunner(benchmark_database=db,
                                           evaluators=evaluators,
                                           terminal_when=terminal_when,
                                           behaviors=behaviors_tested,
                                           log_eval_avg_every=20,
                                           checkpoint_dir="checkpoints1/")

        # one run after 30 steps benchmark dumped
        result = benchmark_runner.run(checkpoint_every = 30)
        df = result.get_data_frame()
        print(df)
        self.assertEqual(len(df.index), 40) # 2 Behaviors * 10 Serialize Scenarios * 2 scenario sets
        # check twice first, merging from checkpoints
        merged_result = BenchmarkRunner.merge_checkpoint_benchmark_results(checkpoint_dir="checkpoints1/")
        df = merged_result.get_data_frame()
        self.assertEqual(len(df.index), 30)
        # second load merged results
        self.assertTrue(os.path.exists(os.path.join("checkpoints1/merged_results.ckpnt")))
        merged_result = BenchmarkRunner.merge_checkpoint_benchmark_results(checkpoint_dir="checkpoints1/")
        df = merged_result.get_data_frame()
        self.assertEqual(len(df.index), 30)

        configs_to_run = BenchmarkRunner.get_configs_to_run(benchmark_runner.configs_to_run, merged_result)
        self.assertEqual(len(configs_to_run), 10)

        benchmark_runner2 = BenchmarkRunner(benchmark_database=db,
                                           evaluators=evaluators,
                                           terminal_when=terminal_when,
                                           behaviors=behaviors_tested,
                                           log_eval_avg_every=1,
                                           checkpoint_dir="checkpoints1/",
                                           merge_existing=True)

        result = benchmark_runner2.run(checkpoint_every = 7)
        df = result.get_data_frame()
        print(df)
        self.assertEqual(len(df.index), 40) # 2 Behaviors * 10 Serialize Scenarios * 2 scenario sets

        # check if results maintained in existing result dump, 30 from previous run + 7 after new checkpoint
        merged_result = BenchmarkRunner.merge_checkpoint_benchmark_results(checkpoint_dir="checkpoints1/")
        df = merged_result.get_data_frame()
        self.assertEqual(len(df.index), 37)
Пример #6
0
    def test_pickle(self):
        params = ParameterServer()
        params_child = params["test_child"]
        del params
        value_float = params_child["Child1"]["Child2"]["ValueFloat", "Desc",
                                                       2.0]
        value_bool_false = params_child["Child1"]["Child2"]["ValueBoolFalse",
                                                            "Desc", False]
        value_bool_true = params_child["Child3"]["Child2"]["ValueBoolTrue",
                                                           "Desc", True]
        value_int = params_child["Child1"]["Child4"]["ValueInt", "Desc", 2041]
        value_list_list_float = params_child["Child1"]["Child4"][
            "ValueListListFloat", "Desc",
            [[1.0, 2.0, float(5.4545234)], [float(1.1266135), 2.0],
             [float(123.234234)]]]
        value_list_float = params_child["Child1"]["Child5"][
            "ValueListFloat", "Desc", [
                1.0, 2.0,
                float(5.4545234),
                float(1.1266135), 2.0,
                float(123.234234)
            ]]

        behavior = BehaviorIDMClassic(params_child)
        cpp_object = CppParamServerTestObject(params_child)
        cpp_unpickled = pickle_unpickle(cpp_object)

        self.assertEqual(cpp_object.GetRealValue(), 2.0)
        self.assertEqual(cpp_object.GetBoolValueFalse(), False)
        self.assertEqual(cpp_object.GetBoolValueTrue(), True)
        self.assertEqual(cpp_object.GetIntValue(), 2041)
        list1 = cpp_object.GetListListFloatValue()
        list2 = [[1.0, 2.0, float(5.4545234)], [float(1.1266135), 2.0],
                 [float(123.234234)]]

        list3 = cpp_object.GetListFloatValue()
        list4 = [
            1.0, 2.0,
            float(5.4545234),
            float(1.1266135), 2.0,
            float(123.234234)
        ]

        self.assertEqual(len(list1), len(list2))
        for idx, _ in enumerate(list1):
            self.assertEqual(len(list1[idx]), len(list2[idx]))
            for idx2, _ in enumerate(list1[idx]):
                self.assertAlmostEqual(list1[idx][idx2],
                                       list2[idx][idx2],
                                       places=5)

        self.assertEqual(len(list3), len(list4))
        for idx, _ in enumerate(list3):
            self.assertAlmostEqual(list3[idx], list4[idx], places=5)
Пример #7
0
    def test_database_multiprocessing_runner_checkpoint(self):
        dbs = DatabaseSerializer(test_scenarios=1, test_world_steps=2, num_serialize_scenarios=10)
        dbs.process("data/database1")
        local_release_filename = dbs.release(version="test")

        db = BenchmarkDatabase(database_root=local_release_filename)
        evaluators = {"success" : "EvaluatorGoalReached", "collision" : "EvaluatorCollisionEgoAgent",
                      "max_steps": "EvaluatorStepCount"}
        terminal_when = {"collision" :lambda x: x, "max_steps": lambda x : x>2}
        params = ParameterServer() # only for evaluated agents not passed to scenario!
        behaviors_tested = {"IDM": BehaviorIDMClassic(params), "Const" : BehaviorConstantAcceleration(params)}
                                        

        benchmark_runner = BenchmarkRunnerMP(benchmark_database=db,
                                           evaluators=evaluators,
                                           terminal_when=terminal_when,
                                           behaviors=behaviors_tested,
                                           log_eval_avg_every=10,
                                           num_cpus=4,
                                           checkpoint_dir="checkpoints2/",
                                           merge_existing=False)
        benchmark_runner.clear_checkpoint_dir()
        # one run after 30 steps benchmark dumped
        result = benchmark_runner.run(checkpoint_every = 3)
        df = result.get_data_frame()
        print(df)
        self.assertEqual(len(df.index), 40) # 2 Behaviors * 10 Serialize Scenarios * 2 scenario sets

        merged_result = BenchmarkRunner.merge_checkpoint_benchmark_results(checkpoint_dir="checkpoints2/")
        df = merged_result.get_data_frame()
        self.assertEqual(len(df.index), 4*9)
       # self.assertEqual(len(merged_result.get_histories()), 4*9)
        self.assertEqual(len(merged_result.get_benchmark_configs()), 4*9)

        configs_to_run = BenchmarkRunner.get_configs_to_run(benchmark_runner.configs_to_run, merged_result)
        self.assertEqual(len(configs_to_run), 4)
        ray.shutdown()
        benchmark_runner2 = BenchmarkRunnerMP(benchmark_database=db,
                                           evaluators=evaluators,
                                           terminal_when=terminal_when,
                                           behaviors=behaviors_tested,
                                           log_eval_avg_every=1,
                                           checkpoint_dir="checkpoints2/",
                                           merge_existing=True)

        result = benchmark_runner2.run(checkpoint_every = 1)
        df = result.get_data_frame()
        print(df)
        self.assertEqual(len(df.index), 40) # 2 Behaviors * 10 Serialize Scenarios * 2 scenario sets

        # check if existing result is incorporated for mergin result
        merged_result = BenchmarkRunner.merge_checkpoint_benchmark_results(checkpoint_dir="checkpoints2/")
        df = merged_result.get_data_frame()
        self.assertEqual(len(df.index), 40)
Пример #8
0
    def __init__(self,
                 params=None,
                 number_of_senarios=250,
                 random_seed=0,
                 ml_behavior=None,
                 viewer=True):
        params["BehaviorIDMClassic"]["BrakeForLaneEnd"] = True
        params["BehaviorIDMClassic"]["BrakeForLaneEndEnabledDistance"] = 100.
        params["BehaviorIDMClassic"]["BrakeForLaneEndDistanceOffset"] = 25.
        params["BehaviorIDMClassic"]["DesiredVelocity"] = 12.5
        params["World"]["remove_agents_out_of_map"] = False
        left_lane = MergingLaneCorridorConfig(params=params,
                                              road_ids=[0, 1],
                                              min_vel=10.,
                                              max_vel=15.,
                                              s_min=5.,
                                              s_max=25.,
                                              lane_corridor_id=0,
                                              controlled_ids=None)
        right_lane = MergingLaneCorridorConfig(
            params=params,
            road_ids=[0, 1],
            lane_corridor_id=1,
            s_min=5.,
            s_max=25.,
            min_vel=8.,
            max_vel=12.,
            behavior_model=BehaviorIDMClassic(params),
            controlled_ids=True)
        scenario_generation = \
          ConfigWithEase(
            num_scenarios=number_of_senarios,
            map_file_name=os.path.join(os.path.dirname(__file__), "../../../environments/blueprints/merging/DR_DEU_Merging_MT_v01_shifted.xodr"),  # NOLINT
            random_seed=random_seed,
            params=params,
            lane_corridor_configs=[left_lane, right_lane])
        if viewer:
            viewer = MPViewer(params=params,
                              x_range=[-35, 35],
                              y_range=[-35, 35],
                              follow_agent_id=True)
        dt = 0.2
        evaluator = GoalReachedGuiding(params)
        # evaluator = GoalReached(params)
        observer = NearestObserver(params)
        ml_behavior = ml_behavior

        super().__init__(scenario_generation=scenario_generation,
                         viewer=viewer,
                         dt=dt,
                         evaluator=evaluator,
                         observer=observer,
                         ml_behavior=ml_behavior)
Пример #9
0
    def test_generate_ego_trajectory_with_IDM(self):
        params = ParameterServer()
        env = self.create_runtime_and_setup_empty_world(params)
        env.ml_behavior = BehaviorIDMClassic(params)

        # add ego
        state = np.array([0, 0, 0, 0, 0])
        goal_line = Line2d(np.array([[0., 0.], [1., 1.]]))
        env.addEgoAgent(state, goal_line)

        N = 10
        state_traj, action_traj = env.generateTrajectory(0.2, N)
        env._viewer.drawTrajectory(state_traj)
        env.render()
        self.assertEqual(len(state_traj), N)
    def test_database_multiprocessing_runner(self):
        dbs = DatabaseSerializer(test_scenarios=4,
                                 test_world_steps=5,
                                 num_serialize_scenarios=5)
        dbs.process("data/database1")
        local_release_filename = dbs.release(version="test")

        db = BenchmarkDatabase(database_root=local_release_filename)
        test_python_params = ParameterServer(log_if_default=True)
        evaluators = {
            "success": "EvaluatorGoalReached",
            "collision": "EvaluatorCollisionEgoAgent",
            "max_steps": "EvaluatorStepCount",
            "test_evaluator": {
                "type": "TestPythonEvaluator",
                "params": test_python_params
            }
        }
        terminal_when = {
            "collision": lambda x: x,
            "max_steps": lambda x: x > 2
        }
        params = ParameterServer(
        )  # only for evaluated agents not passed to scenario!
        behaviors_tested = {
            "IDM": BehaviorIDMClassic(params),
            "Const": BehaviorConstantAcceleration(params)
        }

        EvaluationConfig.AddEvaluationModule(
            "bark.benchmark.tests.test_evaluator")
        benchmark_runner = BenchmarkRunnerMP(benchmark_database=db,
                                             evaluators=evaluators,
                                             terminal_when=terminal_when,
                                             behaviors=behaviors_tested,
                                             log_eval_avg_every=10)
        benchmark_runner.clear_checkpoint_dir()
        result = benchmark_runner.run()

        df = result.get_data_frame()
        print(df)
        self.assertEqual(
            len(df.index),
            20)  # 2 Behaviors * 5 Serialize Scenarios * 2 scenario sets
    def test_database_runner_checkpoint(self):
        dbs = DatabaseSerializer(test_scenarios=4,
                                 test_world_steps=5,
                                 num_serialize_scenarios=10)
        dbs.process("data/database1")
        local_release_filename = dbs.release(version="test")

        db = BenchmarkDatabase(database_root=local_release_filename)
        evaluators = {
            "success": "EvaluatorGoalReached",
            "collision": "EvaluatorCollisionEgoAgent",
            "max_steps": "EvaluatorStepCount"
        }
        terminal_when = {
            "collision": lambda x: x,
            "max_steps": lambda x: x > 2
        }
        params = ParameterServer(
        )  # only for evaluated agents not passed to scenario!
        behaviors_tested = {
            "IDM": BehaviorIDMClassic(params),
            "Const": BehaviorConstantAcceleration(params)
        }

        benchmark_runner = BenchmarkRunner(benchmark_database=db,
                                           evaluators=evaluators,
                                           terminal_when=terminal_when,
                                           behaviors=behaviors_tested,
                                           log_eval_avg_every=20,
                                           checkpoint_dir="checkpoints1/")
        benchmark_runner.clear_checkpoint_dir()
        # one run after 30 steps benchmark dumped
        result = benchmark_runner.run(checkpoint_every=30)
        df = result.get_data_frame()
        print(df)
        self.assertEqual(
            len(df.index),
            40)  # 2 Behaviors * 10 Serialize Scenarios * 2 scenario sets
        # check twice first, merging from checkpoints
        merged_result = BenchmarkRunner.merge_checkpoint_benchmark_results(
            checkpoint_dir="checkpoints1/")
        df = merged_result.get_data_frame()
        self.assertEqual(len(df.index), 40)
Пример #12
0
    def test_database_run_and_analyze(self):
        dbs = DatabaseSerializer(test_scenarios=2, test_world_steps=3, num_serialize_scenarios=2)
        # to find database files
        cwd = os.getcwd()
        if not debug:
          os.chdir("../benchmark_database/")
        else:
          os.chdir("bazel-bin/bark/benchmark/tests/py_benchmark_process_tests.runfiles/benchmark_database")
        dbs.process("data/database1")
        local_release_filename = dbs.release(version="test")

        db = BenchmarkDatabase(database_root=local_release_filename)
        evaluators = {"success" : "EvaluatorGoalReached", "collision" : "EvaluatorCollisionEgoAgent",
                      "max_steps": "EvaluatorStepCount"}
        terminal_when = {"collision" :lambda x: x, "max_steps": lambda x : x>2}
        params = ParameterServer() # only for evaluated agents not passed to scenario!
        behaviors_tested = {"IDM": BehaviorIDMClassic(params), "Const" : BehaviorConstantAcceleration(params)}

        benchmark_runner = BenchmarkRunnerMP(benchmark_database=db,
                                           evaluators=evaluators,
                                           terminal_when=terminal_when,
                                           behaviors=behaviors_tested,
                                           log_eval_avg_every=2)

        result = benchmark_runner.run(maintain_history=True)

        result.dump(os.path.join("./benchmark_results"), dump_configs=True, \
                         dump_histories=True, max_mb_per_file=1)
        result_loaded = BenchmarkResult.load(os.path.join("./benchmark_results"))
        result_loaded.load_histories()
        result_loaded.load_benchmark_configs()

        params2 = ParameterServer()

        fig = plt.figure(figsize=[10, 10])
        viewer = MPViewer(
              params=params2,
              center=[5112, 5165],
              y_length = 120,
              enforce_y_length=True,
              axis = fig.gca())

        analyzer = BenchmarkAnalyzer(benchmark_result=result_loaded)
        configs = analyzer.find_configs(criteria={"behavior": lambda x: x=="IDM", "success": lambda x : not x})
        configs_const = analyzer.find_configs(criteria={"behavior": lambda x: x=="Const", "success": lambda x : not x})

        os.chdir(cwd)
        #analyzer.visualize(configs_idx_list = configs,
                         # viewer = viewer, real_time_factor=10, fontsize=12)
        plt.close(fig)

        
        fig, (ax1, ax2) = plt.subplots(1, 2)
        viewer1 = MPViewer(
              params=params2,
              center=[5112, 5165],
              y_length = 120,
              enforce_y_length=True,
              axis = ax1)
        viewer2 = MPViewer(
              params=params2,
              center=[5112, 5165],
              y_length = 120,
              enforce_y_length=True,
              axis = ax2)
        analyzer.visualize(configs_idx_list = [configs[1:3], configs_const[1:3]],
                          viewer = [viewer1, viewer2], viewer_names=["IDM", "ConstVelocity"], real_time_factor=10, fontsize=12)