def __init__(self):
        self.carla_server = None
        self.carla_client = None
        self.carla_controller = None
        self.bark_viewer = None
        self.cosimulation_viewer = None
        self.launch_args = ["external/carla/CarlaUE4.sh", "-quality-level=Low"]

        # Bark parameter server
        self.param_server = ParameterServer(
            filename=BARK_PATH +
            "examples/params/od8_const_vel_one_agent.json")

        # World Definition
        self.bark_world = World(self.param_server)

        # Model Definitions
        self.behavior_model = BehaviorIDMClassic(self.param_server)
        self.execution_model = ExecutionModelInterpolate(self.param_server)
        self.dynamic_model = SingleTrackModel(self.param_server)

        # Map Definition
        xodr_parser = XodrParser(BARK_PATH + "modules/runtime/tests/data/" +
                                 BARK_MAP + ".xodr")
        self.map_interface = MapInterface()
        self.map_interface.SetOpenDriveMap(xodr_parser.map)
        self.bark_world.SetMap(self.map_interface)

        # Bark agent definition
        self.agent_2d_shape = CarLimousine()

        # use for converting carla actor id to bark agent id
        self.carla_2_bark_id = dict()
        # store the camera id attached to an agent
        self.carla_agents_cam = dict()
    def test_database_from_github_release(self):
        db = BenchmarkDatabase(
            database_root="external/benchmark_database_release")
        evaluators = {
            "success": EvaluatorGoalReached,
            "collision": EvaluatorCollisionEgoAgent,
            "max_steps": EvaluatorStepCount
        }
        terminal_when = {
            "collision": lambda x: x,
            "max_steps": lambda x: x > 20
        }
        params = ParameterServer(
        )  # only for evaluated agents not passed to scenario!
        behaviors_tested = {
            "IDM": BehaviorIDMClassic(params),
            "Const": BehaviorConstantVelocity(params)
        }

        benchmark_runner = BenchmarkRunner(benchmark_database=db,
                                           evaluators=evaluators,
                                           terminal_when=terminal_when,
                                           behaviors=behaviors_tested)

        benchmark_runner.run(2)

        print(benchmark_runner.dataframe.to_string())
Example #3
0
    def test_database_multiprocessing_runner(self):
        dbs = DatabaseSerializer(test_scenarios=4,
                                 test_world_steps=5,
                                 num_serialize_scenarios=10)
        dbs.process("data/database1")
        local_release_filename = dbs.release(version="test")

        db = BenchmarkDatabase(database_root=local_release_filename)
        evaluators = {
            "success": "EvaluatorGoalReached",
            "collision": "EvaluatorCollisionEgoAgent",
            "max_steps": "EvaluatorStepCount"
        }
        terminal_when = {
            "collision": lambda x: x,
            "max_steps": lambda x: x > 5
        }
        params = ParameterServer(
        )  # only for evaluated agents not passed to scenario!
        behaviors_tested = {
            "IDM": BehaviorIDMClassic(params),
            "Const": BehaviorConstantVelocity(params)
        }

        benchmark_runner = BenchmarkRunnerMP(benchmark_database=db,
                                             evaluators=evaluators,
                                             terminal_when=terminal_when,
                                             behaviors=behaviors_tested,
                                             log_eval_avg_every=10)

        result = benchmark_runner.run()

        params2 = ParameterServer()
        viewer = MPViewer(params=params2,
                          x_range=[5060, 5160],
                          y_range=[5070, 5150],
                          use_world_bounds=True)
        rst, _ = benchmark_runner.run_benchmark_config(10, viewer=viewer)

        rst = benchmark_runner.run(maintain_history=True)
        self.assertEqual(len(rst.get_histories()), 40)

        rst, scenario_history = benchmark_runner.run_benchmark_config(
            11, viewer=None, maintain_history=True)
        print(scenario_history)
        viewer = MPViewer(params=params2,
                          x_range=[5060, 5160],
                          y_range=[5070, 5150],
                          use_world_bounds=True)
        viewer.drawWorld(world=scenario_history[5].GetWorldState(),
                         eval_agent_ids=scenario_history[5].eval_agent_ids)

        viewer.show(block=True)

        df = result.get_data_frame()
        print(df)
        self.assertEqual(
            len(df.index),
            40)  # 2 Behaviors * 10 Serialize Scenarios * 2 scenario sets
Example #4
0
    def test_pickle(self):
        params = ParameterServer()
        params_child = params["test_child"]
        del params
        value_float = params_child["Child1"]["Child2"]["ValueFloat", "Desc",
                                                       2.0]
        value_bool_false = params_child["Child1"]["Child2"]["ValueBoolFalse",
                                                            "Desc", False]
        value_bool_true = params_child["Child3"]["Child2"]["ValueBoolTrue",
                                                           "Desc", True]
        value_int = params_child["Child1"]["Child4"]["ValueInt", "Desc", 2041]
        value_list_list_float = params_child["Child1"]["Child4"][
            "ValueListListFloat", "Desc",
            [[1.0, 2.0, float(5.4545234)], [float(1.1266135), 2.0],
             [float(123.234234)]]]
        value_list_float = params_child["Child1"]["Child5"][
            "ValueListFloat", "Desc", [
                1.0, 2.0,
                float(5.4545234),
                float(1.1266135), 2.0,
                float(123.234234)
            ]]

        behavior = BehaviorIDMClassic(params_child)
        cpp_object = CppParamServerTestObject(params_child)
        cpp_unpickled = pickle_unpickle(cpp_object)

        self.assertEqual(cpp_object.GetRealValue(), 2.0)
        self.assertEqual(cpp_object.GetBoolValueFalse(), False)
        self.assertEqual(cpp_object.GetBoolValueTrue(), True)
        self.assertEqual(cpp_object.GetIntValue(), 2041)
        list1 = cpp_object.GetListListFloatValue()
        list2 = [[1.0, 2.0, float(5.4545234)], [float(1.1266135), 2.0],
                 [float(123.234234)]]

        list3 = cpp_object.GetListFloatValue()
        list4 = [
            1.0, 2.0,
            float(5.4545234),
            float(1.1266135), 2.0,
            float(123.234234)
        ]

        self.assertEqual(len(list1), len(list2))
        for idx, _ in enumerate(list1):
            self.assertEqual(len(list1[idx]), len(list2[idx]))
            for idx2, _ in enumerate(list1[idx]):
                self.assertAlmostEqual(list1[idx][idx2],
                                       list2[idx][idx2],
                                       places=5)

        self.assertEqual(len(list3), len(list4))
        for idx, _ in enumerate(list3):
            self.assertAlmostEqual(list3[idx], list4[idx], places=5)
Example #5
0
    def test_database_runner(self):
      dbs = DatabaseSerializer(test_scenarios=2, test_world_steps=2, num_serialize_scenarios=5)
      cwd = os.getcwd()
      dbs.process("data/database1")
      local_release_filename = dbs.release(version="test")

      db = BenchmarkDatabase(database_root=local_release_filename)
      evaluators = {"success" : "EvaluatorGoalReached", "collision" : "EvaluatorCollisionEgoAgent",
                    "max_steps": "EvaluatorStepCount"}
      terminal_when = {"collision" :lambda x: x, "max_steps": lambda x : x>5}
      params = ParameterServer() # only for evaluated agents not passed to scenario!
      behaviors_tested = {"IDM": BehaviorIDMClassic(params), "Const" : BehaviorConstantVelocity(params)}

      benchmark_runner = BenchmarkRunner(benchmark_database=db,
                                         evaluators=evaluators,
                                         terminal_when=terminal_when,
                                         behaviors=behaviors_tested,
                                         log_eval_avg_every=1)

      result = benchmark_runner.run()
      df = result.get_data_frame()
      print(df)
      self.assertEqual(len(df.index), 20) # 2 Behaviors * 10 Serialize Scenarios * 2 scenario sets
Example #6
0
    def test_database_run_and_analyze(self):
        dbs = DatabaseSerializer(test_scenarios=4,
                                 test_world_steps=5,
                                 num_serialize_scenarios=10)
        dbs.process("database")
        local_release_filename = dbs.release(version="test")

        db = BenchmarkDatabase(database_root=local_release_filename)
        evaluators = {
            "success": "EvaluatorGoalReached",
            "collision": "EvaluatorCollisionEgoAgent",
            "max_steps": "EvaluatorStepCount"
        }
        terminal_when = {
            "collision": lambda x: x,
            "max_steps": lambda x: x > 5
        }
        params = ParameterServer(
        )  # only for evaluated agents not passed to scenario!
        behaviors_tested = {
            "IDM": BehaviorIDMClassic(params),
            "Const": BehaviorConstantVelocity(params)
        }

        benchmark_runner = BenchmarkRunnerMP(benchmark_database=db,
                                             evaluators=evaluators,
                                             terminal_when=terminal_when,
                                             behaviors=behaviors_tested,
                                             log_eval_avg_every=10)

        result = benchmark_runner.run(maintain_history=True)

        result.dump(os.path.join("./benchmark_results.pickle"))
        result_loaded = BenchmarkResult.load(
            os.path.join("./benchmark_results.pickle"))

        params2 = ParameterServer()

        fig = plt.figure(figsize=[10, 10])
        viewer = MPViewer(params=params2,
                          center=[5112, 5165],
                          y_length=120,
                          enforce_y_length=True,
                          axis=fig.gca())

        analyzer = BenchmarkAnalyzer(benchmark_result=result_loaded)
        configs = analyzer.find_configs(criteria={
            "behavior": lambda x: x == "IDM",
            "success": lambda x: not x
        })
        configs_const = analyzer.find_configs(criteria={
            "behavior": lambda x: x == "Const",
            "success": lambda x: not x
        })

        #analyzer.visualize(configs_idx_list = configs,
        # viewer = viewer, real_time_factor=10, fontsize=12)
        plt.close(fig)

        fig, (ax1, ax2) = plt.subplots(1, 2)
        viewer1 = MPViewer(params=params2,
                           center=[5112, 5165],
                           y_length=120,
                           enforce_y_length=True,
                           axis=ax1)
        viewer2 = MPViewer(params=params2,
                           center=[5112, 5165],
                           y_length=120,
                           enforce_y_length=True,
                           axis=ax2)
        analyzer.visualize(configs_idx_list=[configs[1:3], configs_const[1:3]],
                           viewer=[viewer1, viewer2],
                           viewer_names=["IDM", "ConstVelocity"],
                           real_time_factor=1,
                           fontsize=12)