def test_database_from_github_release(self):
        db = BenchmarkDatabase(
            database_root="external/benchmark_database_release")
        evaluators = {
            "success": EvaluatorGoalReached,
            "collision": EvaluatorCollisionEgoAgent,
            "max_steps": EvaluatorStepCount
        }
        terminal_when = {
            "collision": lambda x: x,
            "max_steps": lambda x: x > 20
        }
        params = ParameterServer(
        )  # only for evaluated agents not passed to scenario!
        behaviors_tested = {
            "IDM": BehaviorIDMClassic(params),
            "Const": BehaviorConstantVelocity(params)
        }

        benchmark_runner = BenchmarkRunner(benchmark_database=db,
                                           evaluators=evaluators,
                                           terminal_when=terminal_when,
                                           behaviors=behaviors_tested)

        benchmark_runner.run(2)

        print(benchmark_runner.dataframe.to_string())
Exemplo n.º 2
0
    def test_database_runner(self):
      dbs = DatabaseSerializer(test_scenarios=2, test_world_steps=2, num_serialize_scenarios=5)
      cwd = os.getcwd()
      dbs.process("data/database1")
      local_release_filename = dbs.release(version="test")

      db = BenchmarkDatabase(database_root=local_release_filename)
      evaluators = {"success" : "EvaluatorGoalReached", "collision" : "EvaluatorCollisionEgoAgent",
                    "max_steps": "EvaluatorStepCount"}
      terminal_when = {"collision" :lambda x: x, "max_steps": lambda x : x>5}
      params = ParameterServer() # only for evaluated agents not passed to scenario!
      behaviors_tested = {"IDM": BehaviorIDMClassic(params), "Const" : BehaviorConstantVelocity(params)}

      benchmark_runner = BenchmarkRunner(benchmark_database=db,
                                         evaluators=evaluators,
                                         terminal_when=terminal_when,
                                         behaviors=behaviors_tested,
                                         log_eval_avg_every=1)

      result = benchmark_runner.run()
      df = result.get_data_frame()
      print(df)
      self.assertEqual(len(df.index), 20) # 2 Behaviors * 10 Serialize Scenarios * 2 scenario sets
Exemplo n.º 3
0
print(ml_behavior)
for _ in range(0, 50):
    env.step()

# to find database files
os.chdir("../benchmark_database/")
dbs = DatabaseSerializer(test_scenarios=4,
                         test_world_steps=5,
                         num_serialize_scenarios=10)
dbs.process("database")
local_release_filename = dbs.release(version="test")
db = BenchmarkDatabase(database_root=local_release_filename)

evaluators = {
    "success": "EvaluatorGoalReached",
    "collision": "EvaluatorCollisionEgoAgent",
    "max_steps": "EvaluatorStepCount"
}
terminal_when = {"collision": lambda x: x, "max_steps": lambda x: x > 31}
behaviors_tested = {"bark_ml": ml_behavior}

benchmark_runner = BenchmarkRunner(benchmark_database=db,
                                   evaluators=evaluators,
                                   terminal_when=terminal_when,
                                   behaviors=behaviors_tested,
                                   log_eval_avg_every=1)

result = benchmark_runner.run()
print(result)
behavior_used = None
try:
    from bark.models.behavior import BehaviorUCTSingleAgent
    behavior_used = BehaviorUCTSingleAgent
except:
    print(
        "BehaviorUCTSingleAgent not available, rerun example with `bazel run //examples:uct_planner --define planner_uct=true "
    )
    exit()

db = BenchmarkDatabase(database_root="external/benchmark_database_release")
evaluators = {
    "success": EvaluatorGoalReached,
    "collision": EvaluatorCollisionEgoAgent,
    "max_steps": EvaluatorStepCount
}
terminal_when = {"collision": lambda x: x, "max_steps": lambda x: x > 2}
scenario_param_file = "uct_planner.json"  # must be within examples params folder
params = ParameterServer(
    filename=os.path.join("examples/params/", scenario_param_file))
behaviors_tested = {"search5s": behavior_used(params)}

benchmark_runner = BenchmarkRunner(benchmark_database=db,
                                   evaluators=evaluators,
                                   terminal_when=terminal_when,
                                   behaviors=behaviors_tested)

benchmark_runner.run(1)

benchmark_runner.dataframe.to_pickle("uct_planner_results.pickle")