예제 #1
0
def main():
    args = configure_args()
    if is_local:
        dir_prefix = ""
    else:
        dir_prefix="hy-iqn-lfd-full-exp.runfiles/hythe/"
    logging.info(f"Executing job: {args.jobname}")
    logging.info(f"Experiment server at: {os.getcwd()}")
    params = ParameterServer(filename=os.path.join(dir_prefix, params_file),
                             log_if_default=True)
    params = configure_params(params, seed=args.jobname)
    experiment_id = params["Experiment"]["random_seed"]
    params_filename = os.path.join(params["Experiment"]["dir"], "params_{}.json".format(experiment_id))

    behavior = BehaviorDiscreteMacroActionsML(params)
    evaluator = GoalReached(params)
    observer = NearestAgentsObserver(params)
    viewer = MPViewer(params=params,
                      x_range=[-35, 35],
                      y_range=[-35, 35],
                      follow_agent_id=True)

    # extract params and save experiment parameters
    params["ML"]["BaseAgent"]["SummaryPath"] = os.path.join(params["Experiment"]["dir"], "agent/summaries")
    params["ML"]["BaseAgent"]["CheckpointPath"] = os.path.join(params["Experiment"]["dir"], "agent/checkpoints")

    params.Save(filename=params_filename)
    logging.info('-' * 60)
    logging.info("Writing params to :{}".format(params_filename))
    logging.info('-' * 60)

    # database creation
    dbs1 = DatabaseSerializer(test_scenarios=1, test_world_steps=2,
                             num_serialize_scenarios=num_demo_scenarios)
    dbs1.process(os.path.join(dir_prefix, "configuration/database"),
      filter_sets="**/**/interaction_merging_light_dense_1D.json")
    local_release_filename = dbs1.release(version="lfd_offline")
    db1 = BenchmarkDatabase(database_root=local_release_filename)
    scenario_generator1, _, _ = db1.get_scenario_generator(0)

    env = HyDiscreteHighway(params=params,
                            scenario_generation=scenario_generator1,
                            behavior=behavior,
                            evaluator=evaluator,
                            observer=observer,
                            viewer=viewer,
                            render=False)

    scenario, _ = scenario_generator1.get_next_scenario()
    world = scenario.GetWorldState()
    observer.Reset(world)

    assert env.action_space._n == 8, "Action Space is incorrect!"
    run(args, params, env, db=db1)
    params.Save(params_filename)
    logging.info('-' * 60)
    logging.info("Writing params to :{}".format(params_filename))
    logging.info('-' * 60)
예제 #2
0
    def test_database_multiprocessing_runner(self):
        dbs = DatabaseSerializer(test_scenarios=4, test_world_steps=5, num_serialize_scenarios=5)
        dbs.process("data/database1")
        local_release_filename = dbs.release(version="test")

        db = BenchmarkDatabase(database_root=local_release_filename)
        evaluators = {"success" : "EvaluatorGoalReached", "collision" : "EvaluatorCollisionEgoAgent",
                      "max_steps": "EvaluatorStepCount"}
        terminal_when = {"collision" :lambda x: x, "max_steps": lambda x : x>2}
        params = ParameterServer() # only for evaluated agents not passed to scenario!
        behaviors_tested = {"IDM": BehaviorIDMClassic(params), "Const" : BehaviorConstantAcceleration(params)}

        benchmark_runner = BenchmarkRunnerMP(benchmark_database=db,
                                           evaluators=evaluators,
                                           terminal_when=terminal_when,
                                           behaviors=behaviors_tested,
                                           log_eval_avg_every=10)

        result = benchmark_runner.run()

        df = result.get_data_frame()
        print(df)
        self.assertEqual(len(df.index), 20) # 2 Behaviors * 5 Serialize Scenarios * 2 scenario sets

        params2 = ParameterServer()
        viewer = MPViewer(
              params=params2,
              x_range=[5060, 5160],
              y_range=[5070,5150],
              use_world_bounds=True)
        rst  = benchmark_runner.run_benchmark_config(10, viewer=viewer)
예제 #3
0
    def test_database_multiprocessing_history(self):
        dbs = DatabaseSerializer(test_scenarios=4, test_world_steps=5, num_serialize_scenarios=2)
        dbs.process("data/database1")
        local_release_filename = dbs.release(version="test")

        db = BenchmarkDatabase(database_root=local_release_filename)
        evaluators = {"success" : "EvaluatorGoalReached", "collision" : "EvaluatorCollisionEgoAgent",
                      "max_steps": "EvaluatorStepCount"}
        terminal_when = {"collision" :lambda x: x, "max_steps": lambda x : x>2}
        params = ParameterServer() # only for evaluated agents not passed to scenario!
        behaviors_tested = {"IDM": BehaviorIDMClassic(params), "Const" : BehaviorConstantAcceleration(params)}

        benchmark_runner = BenchmarkRunnerMP(benchmark_database=db,
                                           evaluators=evaluators,
                                           terminal_when=terminal_when,
                                           behaviors=behaviors_tested,
                                           log_eval_avg_every=10)
        rst = benchmark_runner.run(maintain_history=True)
        self.assertEqual(len(rst.get_histories()), 2*2*2)

        rst = benchmark_runner.run_benchmark_config(3, viewer=None, maintain_history=True)
        scenario_history = rst.get_histories()[3]
        print(scenario_history)
        params = ParameterServer()
        viewer = MPViewer(
              params=params,
              x_range=[5060, 5160],
              y_range=[5070,5150],
              use_world_bounds=True)
        viewer.drawWorld(world=scenario_history[1].GetWorldState(),
                          eval_agent_ids=scenario_history[1].eval_agent_ids)

        viewer.show(block=True)
    def test_database_from_github_release(self):
        db = BenchmarkDatabase(
            database_root="external/benchmark_database_release")
        evaluators = {
            "success": EvaluatorGoalReached,
            "collision": EvaluatorCollisionEgoAgent,
            "max_steps": EvaluatorStepCount
        }
        terminal_when = {
            "collision": lambda x: x,
            "max_steps": lambda x: x > 20
        }
        params = ParameterServer(
        )  # only for evaluated agents not passed to scenario!
        behaviors_tested = {
            "IDM": BehaviorIDMClassic(params),
            "Const": BehaviorConstantVelocity(params)
        }

        benchmark_runner = BenchmarkRunner(benchmark_database=db,
                                           evaluators=evaluators,
                                           terminal_when=terminal_when,
                                           behaviors=behaviors_tested)

        benchmark_runner.run(2)

        print(benchmark_runner.dataframe.to_string())
예제 #5
0
    def test_database_runner(self):
        dbs = DatabaseSerializer(test_scenarios=4, test_world_steps=5, num_serialize_scenarios=2)
        dbs.process("data/database1")
        local_release_filename = dbs.release(version="test")

        db = BenchmarkDatabase(database_root=local_release_filename)
        evaluators = {"success" : "EvaluatorGoalReached", "collision" : "EvaluatorCollisionEgoAgent",
                      "max_steps": "EvaluatorStepCount"}
        terminal_when = {"collision" :lambda x: x, "max_steps": lambda x : x>2}
        params = ParameterServer() # only for evaluated agents not passed to scenario!
        behaviors_tested = {"IDM": BehaviorIDMClassic(params), "Const" : BehaviorConstantAcceleration(params)}
                                        

        benchmark_runner = BenchmarkRunner(benchmark_database=db,
                                           evaluators=evaluators,
                                           terminal_when=terminal_when,
                                           behaviors=behaviors_tested,
                                           log_eval_avg_every=5)

        result = benchmark_runner.run()
        df = result.get_data_frame()
        print(df)
        self.assertEqual(len(df.index), 2*2*2) # 2 Behaviors * 2 Serialize Scenarios * 1 scenario sets

        groups = result.get_evaluation_groups()
        self.assertEqual(set(groups), set(["behavior", "scen_set"]))
예제 #6
0
    def test_database_multiprocessing_runner(self):
        dbs = DatabaseSerializer(test_scenarios=4,
                                 test_world_steps=5,
                                 num_serialize_scenarios=10)
        dbs.process("database")
        local_release_filename = dbs.release(version="test")

        db = BenchmarkDatabase(database_root=local_release_filename)
        evaluators = {
            "success": "EvaluatorGoalReached",
            "collision": "EvaluatorCollisionEgoAgent",
            "max_steps": "EvaluatorStepCount"
        }
        terminal_when = {
            "collision": lambda x: x,
            "max_steps": lambda x: x > 5
        }
        params = ParameterServer(
        )  # only for evaluated agents not passed to scenario!
        behaviors_tested = {
            "IDM": BehaviorIDMClassic(params),
            "Const": BehaviorConstantVelocity(params)
        }

        benchmark_runner = BenchmarkRunnerMP(benchmark_database=db,
                                             evaluators=evaluators,
                                             terminal_when=terminal_when,
                                             behaviors=behaviors_tested,
                                             log_eval_avg_every=10)

        result = benchmark_runner.run()

        params2 = ParameterServer()
        viewer = MPViewer(params=params2,
                          x_range=[5060, 5160],
                          y_range=[5070, 5150],
                          use_world_bounds=True)
        rst, _ = benchmark_runner.run_benchmark_config(10, viewer=viewer)

        rst = benchmark_runner.run(maintain_history=True)
        self.assertEqual(len(rst.get_histories()), 20)

        rst, scenario_history = benchmark_runner.run_benchmark_config(
            11, viewer=None, maintain_history=True)
        print(scenario_history)
        viewer = MPViewer(params=params2,
                          x_range=[5060, 5160],
                          y_range=[5070, 5150],
                          use_world_bounds=True)
        viewer.drawWorld(world=scenario_history[5].get_world_state(),
                         eval_agent_ids=scenario_history[5].eval_agent_ids)

        viewer.show(block=True)

        df = result.get_data_frame()
        print(df)
        self.assertEqual(len(df.index),
                         20)  # 2 Behaviors * 10 Serialize Scenarios
    def test_database_runner(self):
        dbs = DatabaseSerializer(test_scenarios=4,
                                 test_world_steps=5,
                                 num_serialize_scenarios=2)
        dbs.process("data/database1")
        local_release_filename = dbs.release(version="test")

        db = BenchmarkDatabase(database_root=local_release_filename)
        safe_dist_params = ParameterServer(log_if_default=True)
        test_python_params = ParameterServer(log_if_default=True)
        evaluators = {
            "success": "EvaluatorGoalReached",
            "collision": "EvaluatorCollisionEgoAgent",
            "max_steps": "EvaluatorStepCount",
            "safe_dist_lon": {
                "type": "EvaluatorDynamicSafeDist",
                "params": safe_dist_params
            },
            "safe_dist_lat": {
                "type": "EvaluatorStaticSafeDist",
                "params": safe_dist_params
            },
            "test_evaluator": {
                "type": "TestPythonEvaluator",
                "params": test_python_params
            },
            "test_evaluator_serializable": TestPythonEvaluatorSerializable()
        }
        terminal_when = {
            "collision": lambda x: x,
            "max_steps": lambda x: x > 2,
            "safe_dist_lon": lambda x: x
        }
        params = ParameterServer(
        )  # only for evaluated agents not passed to scenario!
        behaviors_tested = {
            "IDM": BehaviorIDMClassic(params),
            "Const": BehaviorConstantAcceleration(params)
        }

        EvaluationConfig.AddEvaluationModule(
            "bark.benchmark.tests.test_evaluator")
        benchmark_runner = BenchmarkRunner(benchmark_database=db,
                                           evaluators=evaluators,
                                           log_eval_avg_every=1,
                                           terminal_when=terminal_when,
                                           behaviors=behaviors_tested)

        result = benchmark_runner.run()
        df = result.get_data_frame()
        print(df)
        self.assertEqual(
            len(df.index),
            2 * 2 * 2)  # 2 Behaviors * 2 Serialize Scenarios * 1 scenario sets
        result.load_benchmark_configs()
        groups = result.get_evaluation_groups()
        self.assertEqual(set(groups), set(["behavior", "scen_set"]))
예제 #8
0
    def test_database_runner_checkpoint(self):
        dbs = DatabaseSerializer(test_scenarios=4, test_world_steps=5, num_serialize_scenarios=10)
        dbs.process("data/database1")
        local_release_filename = dbs.release(version="test")

        db = BenchmarkDatabase(database_root=local_release_filename)
        evaluators = {"success" : "EvaluatorGoalReached", "collision" : "EvaluatorCollisionEgoAgent",
                      "max_steps": "EvaluatorStepCount"}
        terminal_when = {"collision" :lambda x: x, "max_steps": lambda x : x>2}
        params = ParameterServer() # only for evaluated agents not passed to scenario!
        behaviors_tested = {"IDM": BehaviorIDMClassic(params), "Const" : BehaviorConstantAcceleration(params)}
                                        

        benchmark_runner = BenchmarkRunner(benchmark_database=db,
                                           evaluators=evaluators,
                                           terminal_when=terminal_when,
                                           behaviors=behaviors_tested,
                                           log_eval_avg_every=20,
                                           checkpoint_dir="checkpoints1/")

        # one run after 30 steps benchmark dumped
        result = benchmark_runner.run(checkpoint_every = 30)
        df = result.get_data_frame()
        print(df)
        self.assertEqual(len(df.index), 40) # 2 Behaviors * 10 Serialize Scenarios * 2 scenario sets
        # check twice first, merging from checkpoints
        merged_result = BenchmarkRunner.merge_checkpoint_benchmark_results(checkpoint_dir="checkpoints1/")
        df = merged_result.get_data_frame()
        self.assertEqual(len(df.index), 30)
        # second load merged results
        self.assertTrue(os.path.exists(os.path.join("checkpoints1/merged_results.ckpnt")))
        merged_result = BenchmarkRunner.merge_checkpoint_benchmark_results(checkpoint_dir="checkpoints1/")
        df = merged_result.get_data_frame()
        self.assertEqual(len(df.index), 30)

        configs_to_run = BenchmarkRunner.get_configs_to_run(benchmark_runner.configs_to_run, merged_result)
        self.assertEqual(len(configs_to_run), 10)

        benchmark_runner2 = BenchmarkRunner(benchmark_database=db,
                                           evaluators=evaluators,
                                           terminal_when=terminal_when,
                                           behaviors=behaviors_tested,
                                           log_eval_avg_every=1,
                                           checkpoint_dir="checkpoints1/",
                                           merge_existing=True)

        result = benchmark_runner2.run(checkpoint_every = 7)
        df = result.get_data_frame()
        print(df)
        self.assertEqual(len(df.index), 40) # 2 Behaviors * 10 Serialize Scenarios * 2 scenario sets

        # check if results maintained in existing result dump, 30 from previous run + 7 after new checkpoint
        merged_result = BenchmarkRunner.merge_checkpoint_benchmark_results(checkpoint_dir="checkpoints1/")
        df = merged_result.get_data_frame()
        self.assertEqual(len(df.index), 37)
예제 #9
0
    def test_database_multiprocessing_runner_checkpoint(self):
        dbs = DatabaseSerializer(test_scenarios=1, test_world_steps=2, num_serialize_scenarios=10)
        dbs.process("data/database1")
        local_release_filename = dbs.release(version="test")

        db = BenchmarkDatabase(database_root=local_release_filename)
        evaluators = {"success" : "EvaluatorGoalReached", "collision" : "EvaluatorCollisionEgoAgent",
                      "max_steps": "EvaluatorStepCount"}
        terminal_when = {"collision" :lambda x: x, "max_steps": lambda x : x>2}
        params = ParameterServer() # only for evaluated agents not passed to scenario!
        behaviors_tested = {"IDM": BehaviorIDMClassic(params), "Const" : BehaviorConstantAcceleration(params)}
                                        

        benchmark_runner = BenchmarkRunnerMP(benchmark_database=db,
                                           evaluators=evaluators,
                                           terminal_when=terminal_when,
                                           behaviors=behaviors_tested,
                                           log_eval_avg_every=10,
                                           num_cpus=4,
                                           checkpoint_dir="checkpoints2/",
                                           merge_existing=False)
        benchmark_runner.clear_checkpoint_dir()
        # one run after 30 steps benchmark dumped
        result = benchmark_runner.run(checkpoint_every = 3)
        df = result.get_data_frame()
        print(df)
        self.assertEqual(len(df.index), 40) # 2 Behaviors * 10 Serialize Scenarios * 2 scenario sets

        merged_result = BenchmarkRunner.merge_checkpoint_benchmark_results(checkpoint_dir="checkpoints2/")
        df = merged_result.get_data_frame()
        self.assertEqual(len(df.index), 4*9)
       # self.assertEqual(len(merged_result.get_histories()), 4*9)
        self.assertEqual(len(merged_result.get_benchmark_configs()), 4*9)

        configs_to_run = BenchmarkRunner.get_configs_to_run(benchmark_runner.configs_to_run, merged_result)
        self.assertEqual(len(configs_to_run), 4)
        ray.shutdown()
        benchmark_runner2 = BenchmarkRunnerMP(benchmark_database=db,
                                           evaluators=evaluators,
                                           terminal_when=terminal_when,
                                           behaviors=behaviors_tested,
                                           log_eval_avg_every=1,
                                           checkpoint_dir="checkpoints2/",
                                           merge_existing=True)

        result = benchmark_runner2.run(checkpoint_every = 1)
        df = result.get_data_frame()
        print(df)
        self.assertEqual(len(df.index), 40) # 2 Behaviors * 10 Serialize Scenarios * 2 scenario sets

        # check if existing result is incorporated for mergin result
        merged_result = BenchmarkRunner.merge_checkpoint_benchmark_results(checkpoint_dir="checkpoints2/")
        df = merged_result.get_data_frame()
        self.assertEqual(len(df.index), 40)
예제 #10
0
    def test_database_from_local_release(self):
        # release the database
        dbs = DatabaseSerializer(test_scenarios=1,
                                 test_world_steps=10,
                                 num_serialize_scenarios=1)
        dbs.process("data/database1")
        local_release_filename = dbs.release(version="0.0.1")

        # then reload to test correct parsing
        db = BenchmarkDatabase(database_root=local_release_filename)
        scenario_generation, _, set_parameters = db.get_scenario_generator(
            scenario_set_id=1)
        self.assertEqual(db.get_num_scenario_sets(), 2)
        self.assertEqual(set_parameters["Test1"], 200)
        self.assertEqual(set_parameters["Test2"], 0.5)

        db_filtered = db.apply_filter("40")
        self.assertEqual(db_filtered.get_num_scenario_sets(), 1)

        for scenario_generation, _, params in db:
            print(scenario_generation)
    def test_database_runner_python_behavior(self):
        dbs = DatabaseSerializer(test_scenarios=4,
                                 test_world_steps=5,
                                 num_serialize_scenarios=2)
        dbs.process("data/database1")
        local_release_filename = dbs.release(version="test")

        db = BenchmarkDatabase(database_root=local_release_filename)
        safe_dist_params = ParameterServer(log_if_default=True)
        evaluators = {
            "success": "EvaluatorGoalReached",
            "collision": "EvaluatorCollisionEgoAgent",
            "max_steps": "EvaluatorStepCount",
            "safe_dist_lon": {
                "type": "EvaluatorDynamicSafeDist",
                "params": safe_dist_params
            },
            "safe_dist_lat": {
                "type": "EvaluatorStaticSafeDist",
                "params": safe_dist_params
            }
        }
        terminal_when = {
            "collision": lambda x: x,
            "max_steps": lambda x: x > 2,
            "safe_dist_lon": lambda x: x
        }
        params = ParameterServer(
        )  # only for evaluated agents not passed to scenario!
        behaviors_tested = {"python_behavior": PythonDistanceBehavior(params)}

        benchmark_runner = BenchmarkRunner(benchmark_database=db,
                                           evaluators=evaluators,
                                           terminal_when=terminal_when,
                                           behaviors=behaviors_tested,
                                           log_eval_avg_every=5,
                                           deepcopy=False)
        benchmark_runner.clear_checkpoint_dir()
        result = benchmark_runner.run()
        df = result.get_data_frame()
        print(df)
        self.assertEqual(
            len(df.index),
            1 * 2 * 2)  # 2 Behaviors * 2 Serialize Scenarios * 1 scenario sets
        result.load_benchmark_configs()
        groups = result.get_evaluation_groups()
        self.assertEqual(set(groups), set(["behavior", "scen_set"]))
    def test_database_multiprocessing_runner(self):
        dbs = DatabaseSerializer(test_scenarios=4,
                                 test_world_steps=5,
                                 num_serialize_scenarios=5)
        dbs.process("data/database1")
        local_release_filename = dbs.release(version="test")

        db = BenchmarkDatabase(database_root=local_release_filename)
        test_python_params = ParameterServer(log_if_default=True)
        evaluators = {
            "success": "EvaluatorGoalReached",
            "collision": "EvaluatorCollisionEgoAgent",
            "max_steps": "EvaluatorStepCount",
            "test_evaluator": {
                "type": "TestPythonEvaluator",
                "params": test_python_params
            }
        }
        terminal_when = {
            "collision": lambda x: x,
            "max_steps": lambda x: x > 2
        }
        params = ParameterServer(
        )  # only for evaluated agents not passed to scenario!
        behaviors_tested = {
            "IDM": BehaviorIDMClassic(params),
            "Const": BehaviorConstantAcceleration(params)
        }

        EvaluationConfig.AddEvaluationModule(
            "bark.benchmark.tests.test_evaluator")
        benchmark_runner = BenchmarkRunnerMP(benchmark_database=db,
                                             evaluators=evaluators,
                                             terminal_when=terminal_when,
                                             behaviors=behaviors_tested,
                                             log_eval_avg_every=10)
        benchmark_runner.clear_checkpoint_dir()
        result = benchmark_runner.run()

        df = result.get_data_frame()
        print(df)
        self.assertEqual(
            len(df.index),
            20)  # 2 Behaviors * 5 Serialize Scenarios * 2 scenario sets
    def test_database_runner_checkpoint(self):
        dbs = DatabaseSerializer(test_scenarios=4,
                                 test_world_steps=5,
                                 num_serialize_scenarios=10)
        dbs.process("data/database1")
        local_release_filename = dbs.release(version="test")

        db = BenchmarkDatabase(database_root=local_release_filename)
        evaluators = {
            "success": "EvaluatorGoalReached",
            "collision": "EvaluatorCollisionEgoAgent",
            "max_steps": "EvaluatorStepCount"
        }
        terminal_when = {
            "collision": lambda x: x,
            "max_steps": lambda x: x > 2
        }
        params = ParameterServer(
        )  # only for evaluated agents not passed to scenario!
        behaviors_tested = {
            "IDM": BehaviorIDMClassic(params),
            "Const": BehaviorConstantAcceleration(params)
        }

        benchmark_runner = BenchmarkRunner(benchmark_database=db,
                                           evaluators=evaluators,
                                           terminal_when=terminal_when,
                                           behaviors=behaviors_tested,
                                           log_eval_avg_every=20,
                                           checkpoint_dir="checkpoints1/")
        benchmark_runner.clear_checkpoint_dir()
        # one run after 30 steps benchmark dumped
        result = benchmark_runner.run(checkpoint_every=30)
        df = result.get_data_frame()
        print(df)
        self.assertEqual(
            len(df.index),
            40)  # 2 Behaviors * 10 Serialize Scenarios * 2 scenario sets
        # check twice first, merging from checkpoints
        merged_result = BenchmarkRunner.merge_checkpoint_benchmark_results(
            checkpoint_dir="checkpoints1/")
        df = merged_result.get_data_frame()
        self.assertEqual(len(df.index), 40)
예제 #14
0
파일: barkml.py 프로젝트: steven-guo94/bark
env.reset()
env._world.agents[
    env._scenario._eval_agent_ids[0]].behavior_model = ml_behavior

print(ml_behavior)
for _ in range(0, 50):
    env.step()

# to find database files
os.chdir("../benchmark_database/")
dbs = DatabaseSerializer(test_scenarios=4,
                         test_world_steps=5,
                         num_serialize_scenarios=10)
dbs.process("database")
local_release_filename = dbs.release(version="test")
db = BenchmarkDatabase(database_root=local_release_filename)

evaluators = {
    "success": "EvaluatorGoalReached",
    "collision": "EvaluatorCollisionEgoAgent",
    "max_steps": "EvaluatorStepCount"
}
terminal_when = {"collision": lambda x: x, "max_steps": lambda x: x > 31}
behaviors_tested = {"bark_ml": ml_behavior}

benchmark_runner = BenchmarkRunner(benchmark_database=db,
                                   evaluators=evaluators,
                                   terminal_when=terminal_when,
                                   behaviors=behaviors_tested,
                                   log_eval_avg_every=1)
def run(args, params, env, exp_exists=False, db=None):
    agent = None
    demonstrations = None
    exp = None
    columns = ['pos_x', 'pos_y', 'orientation', 'velocity', 'action', 'is_demo']
    # add an eval criteria and generate demonstrations
    if is_generate_demonstrations:
      logging.info("Generating Demonstrations")
      eval_criteria = {"goal_reached" : lambda x : x}
      demo_behavior, mcts_params = generate_uct_hypothesis_behavior()
      demonstrations = generate_demonstrations(params, env, eval_criteria, demo_behavior, db=db)
      logging.info(f"Total demonstrations generated {len(demonstrations)}")
      
      ego_world_states = unpack_demo_states(demonstrations, env._observer)
      save_transitions(params, ego_world_states, columns=columns, 
        filename="demonstrations/demo_dataframe")

      print("Training off", env._observer._world_x_range, env._observer._world_y_range)
    if is_train_on_demonstrations:
      # if demonstrations were not generated in this run
      if not is_generate_demonstrations:
        collector, demonstrations = unpack_load_demonstrations(demo_dir_default)
      else:
        if args.demodir is None:
          collector, demonstrations =  unpack_load_demonstrations(params["Experiment"]["dir"])
        else:
          collector, demonstrations = unpack_load_demonstrations(args.demodir)
      if params["ML"]["BaseAgent"]["Multi_step"] is not None:
        multistep_capacity = capacity + params["ML"]["BaseAgent"]["Multi_step"] - 1 
      if multistep_capacity < len(demonstrations):
        demonstrations = demonstrations[-multistep_capacity:]
        logging.info(f"Pruned number of demonstrations {len(demonstrations)}")
      else:
        logging.info("Number of demonstrations under capacity requested, using full demonstrations")
      logging.info(f"Loaded number of demonstrations {len(demonstrations)}")
      if is_local:
        # Assign steps by args
        params["ML"]["DemonstratorAgent"]["Agent"]["online_gradient_update_steps"] = args.grad_update_steps
      # Assign capacity by length of demonstrations
      params["ML"]["BaseAgent"]["MemorySize"] = len(demonstrations)
      params["ML"]["DemonstratorAgent"]["Buffer"]["demo_ratio"] = 1.0
      logging.info(f"Capacity configured {len(demonstrations)}")
      agent = configure_agent(params, env)
      exp = Experiment(params=params, agent=agent, dump_scenario_interval=25000)
      exp.run(demonstrator=True, demonstrations=demonstrations, 
        num_episodes=num_episodes, learn_only=True)
    
    if is_train_mixed_experiences:
      assert agent is not None
      params["ML"]["DemonstratorAgent"]["Buffer"]["demo_ratio"] = 0.25
      params["ML"]["BaseAgent"]["Update_interval"] = 4
      agent.reset_params(params)
      agent.reset_training_variables(is_online_demo=is_train_mixed_experiences)

      if is_local:
          dir_prefix = ""
      else:
          dir_prefix="hy-iqn-lfd-full-beliefs-exp.runfiles/hythe/"
      # database creation
      dbs2 = DatabaseSerializer(test_scenarios=1, test_world_steps=2,
                              num_serialize_scenarios=num_scenarios)
      dbs2.process(os.path.join(dir_prefix, "configuration/database"),
        filter_sets="**/**/interaction_merging_light_dense_1D.json")
      local_release_filename = dbs2.release(version="test_online")
      db2 = BenchmarkDatabase(database_root=local_release_filename)
      scenario_generator2, _, _ = db2.get_scenario_generator(0)
      agent._env.scenario_generation = scenario_generator2
      exp.run(demonstrator=True, demonstrations=None, num_episodes=num_episodes)

      ego_world_states = extract_learned_states(agent.memory, env._observer)
      save_transitions(params, ego_world_states, columns=columns, 
        filename="demonstrations/learned_dataframe")
      print("Training on", env._observer._world_x_range, env._observer._world_y_range)
예제 #16
0
def main():
    args = configure_args()
    if is_local:
        dir_prefix = ""
    else:
        dir_prefix = "hy-iqnfd-beliefs-exp.runfiles/hythe/"
    print("Executing job :", args.jobname)
    print("Experiment server at :", os.getcwd())
    params = ParameterServer(filename=os.path.join(
        dir_prefix, "configuration/params/iqn_params_demo_full_local.json"),
                             log_if_default=True)
    params = configure_params(params, seed=args.jobname)
    experiment_id = params["Experiment"]["random_seed"]
    params_filename = os.path.join(params["Experiment"]["dir"],
                                   "params_{}.json".format(experiment_id))

    behavior = BehaviorDiscreteMacroActionsML(params)
    evaluator = GoalReached(params)

    params_behavior_filename = os.path.join(
        params["Experiment"]["dir"],
        "behavior_params_{}.json".format(experiment_id))
    params_behavior = ParameterServer(filename=os.path.join(
        dir_prefix, "configuration/params/1D_desired_gap_no_prior.json"),
                                      log_if_default=True)
    params_behavior.Save(filename=params_behavior_filename)

    # configure belief observer
    splits = 2
    behavior_space = BehaviorSpace(params_behavior)

    hypothesis_set, hypothesis_params = behavior_space.create_hypothesis_set_fixed_split(
        split=splits)
    observer = BeliefObserver(params, hypothesis_set, splits=splits)

    viewer = MPViewer(params=params,
                      x_range=[-35, 35],
                      y_range=[-35, 35],
                      follow_agent_id=True)

    # extract params and save experiment parameters
    params["ML"]["BaseAgent"]["SummaryPath"] = os.path.join(
        params["Experiment"]["dir"], "agent/summaries")
    params["ML"]["BaseAgent"]["CheckpointPath"] = os.path.join(
        params["Experiment"]["dir"], "agent/checkpoints")

    params.Save(filename=params_filename)
    logging.info('-' * 60)
    logging.info("Writing params to :{}".format(params_filename))
    logging.info('-' * 60)

    # database creation
    dbs = DatabaseSerializer(test_scenarios=1,
                             test_world_steps=2,
                             num_serialize_scenarios=num_scenarios)
    dbs.process(os.path.join(dir_prefix, "configuration/database"),
                filter_sets="**/**/interaction_merging_light_dense_1D.json")
    local_release_filename = dbs.release(version="test")
    db = BenchmarkDatabase(database_root=local_release_filename)
    scenario_generator, _, _ = db.get_scenario_generator(0)

    env = HyDiscreteHighway(params=params,
                            scenario_generation=scenario_generator,
                            behavior=behavior,
                            evaluator=evaluator,
                            observer=observer,
                            viewer=viewer,
                            render=False)
    scenario, _ = scenario_generator.get_next_scenario()
    world = scenario.GetWorldState()
    observer.Reset(world)

    assert env.action_space._n == 8, "Action Space is incorrect!"
    run(args, params, env, db=db)
    params.Save(params_filename)
    logging.info('-' * 60)
    logging.info("Writing params to :{}".format(params_filename))
    logging.info('-' * 60)
    params_behavior.Save(filename=params_behavior_filename)
    logging.info('-' * 60)
    logging.info(
        "Writing behavior params to :{}".format(params_behavior_filename))
    logging.info('-' * 60)
예제 #17
0
from modules.benchmark.benchmark_runner import BenchmarkRunner

from bark.world.evaluation import *
from modules.runtime.commons.parameters import ParameterServer

behavior_used = None
try:
    from bark.models.behavior import BehaviorUCTSingleAgent
    behavior_used = BehaviorUCTSingleAgent
except:
    print(
        "BehaviorUCTSingleAgent not available, rerun example with `bazel run //examples:uct_planner --define planner_uct=true "
    )
    exit()

db = BenchmarkDatabase(database_root="external/benchmark_database_release")
evaluators = {
    "success": EvaluatorGoalReached,
    "collision": EvaluatorCollisionEgoAgent,
    "max_steps": EvaluatorStepCount
}
terminal_when = {"collision": lambda x: x, "max_steps": lambda x: x > 2}
scenario_param_file = "uct_planner.json"  # must be within examples params folder
params = ParameterServer(
    filename=os.path.join("examples/params/", scenario_param_file))
behaviors_tested = {"search5s": behavior_used(params)}

benchmark_runner = BenchmarkRunner(benchmark_database=db,
                                   evaluators=evaluators,
                                   terminal_when=terminal_when,
                                   behaviors=behaviors_tested)
예제 #18
0
def main():
    map_filename = "external/bark_ml_project/bark_ml/environments/blueprints/highway/city_highway_straight.xodr"
    params = ParameterServer(filename=os.path.join(
        exp_root, "params_iqn_pre_exp_2sdi64.json"),
                             log_if_default=True)
    params_behavior = ParameterServer(filename=os.path.join(
        exp_root, "behavior_params_iqn_pre_exp_2sdi64.json"))
    behavior = BehaviorDiscreteMacroActionsML(params)
    evaluator = GoalReached(params)
    if is_belief_observer:
        splits = 2
        behavior_space = BehaviorSpace(params_behavior)

        hypothesis_set, hypothesis_params = behavior_space.create_hypothesis_set_fixed_split(
            split=splits)
        observer = BeliefObserver(params, hypothesis_set, splits=splits)
    else:
        observer = NearestAgentsObserver(params)
    viewer = MPViewer(params=params,
                      x_range=[-35, 35],
                      y_range=[-35, 35],
                      follow_agent_id=True)

    # database creation
    dbs1 = DatabaseSerializer(test_scenarios=1,
                              test_world_steps=2,
                              num_serialize_scenarios=1)
    dbs1.process(os.path.join("", "configuration/database"),
                 filter_sets="**/**/interaction_merging_light_dense_1D.json")
    local_release_filename = dbs1.release(version="lfd_offline")
    db1 = BenchmarkDatabase(database_root=local_release_filename)
    scenario_generator1, _, _ = db1.get_scenario_generator(0)

    env = HyDiscreteHighway(params=params,
                            scenario_generation=scenario_generator1,
                            map_filename=map_filename,
                            behavior=behavior,
                            evaluator=evaluator,
                            observer=observer,
                            viewer=viewer,
                            render=False)

    scenario, _ = scenario_generator1.get_next_scenario()
    world = scenario.GetWorldState()
    observer.Reset(world)
    env.reset()

    agent = pick_agent(exp_root, env, params)
    ego_world_states = []
    memory = agent.memory
    learned_states = memory["state"]
    actions = memory['action']
    is_demos = memory['is_demo']
    for state, action, is_demo in zip(learned_states, actions, is_demos):
        ego_state = np.zeros((observer._len_ego_state + 1))
        ego_nn_input_state = deepcopy(state[0:observer._len_ego_state])
        ego_state[1:] = ego_nn_input_state
        reverted_observed_state = observer.rev_observe_for_ego_vehicle(
            ego_state)
        ego_world_states.append(
            (reverted_observed_state[int(StateDefinition.X_POSITION)],
             reverted_observed_state[int(StateDefinition.Y_POSITION)],
             reverted_observed_state[int(StateDefinition.THETA_POSITION)],
             reverted_observed_state[int(StateDefinition.VEL_POSITION)],
             action[0], is_demo[0]))
    df = pd.DataFrame(ego_world_states,
                      columns=[
                          'pos_x', 'pos_y', 'orientation', 'velocity',
                          'action', 'is_demo'
                      ])
    print(df.head(10))
    if not os.path.exists(os.path.join(exp_root, "demonstrations")):
        os.makedirs(os.path.join(exp_root, "demonstrations"))
    df.to_pickle(os.path.join(exp_root, "demonstrations", "learned_dataframe"))
    return
예제 #19
0
def main():
    print("Experiment server at :", os.getcwd())

    args = configure_args()
    #load exp params
    exp_dir = args.output_dir
    params_filename = glob.glob(os.path.join(exp_dir, "params_[!behavior]*"))
    params = ParameterServer(filename=params_filename[0])
    params.load(fn=params_filename[0])
    params["ML"]["BaseAgent"]["SummaryPath"] = os.path.join(exp_dir, "agent/summaries")
    params["ML"]["BaseAgent"]["CheckpointPath"] = os.path.join(exp_dir, "agent/checkpoints")
    splits = 8
    behavior_params_filename = glob.glob(os.path.join(exp_dir, "behavior_params*"))
    if behavior_params_filename:
      params_behavior = ParameterServer(filename=behavior_params_filename[0])
    else:
      params_behavior = ParameterServer(filename="configuration/params/1D_desired_gap_no_prior.json")
    behavior_space = configure_behavior_space(params_behavior)

    hypothesis_set, hypothesis_params = behavior_space.create_hypothesis_set_fixed_split(split=splits)
    observer = BeliefObserver(params, hypothesis_set, splits=splits)
    behavior = BehaviorDiscreteMacroActionsML(params_behavior)
    evaluator = GoalReached(params)

    viewer = MPViewer(params=params,
                      x_range=[-35, 35],
                      y_range=[-35, 35],
                      follow_agent_id=True)


    # database creation
    dir_prefix = ""
    dbs = DatabaseSerializer(test_scenarios=2, test_world_steps=2,
                             num_serialize_scenarios=10)
    dbs.process(os.path.join(dir_prefix, "configuration/database"), filter_sets="**/**/interaction_merging_light_dense_1D.json")
    local_release_filename = dbs.release(version="test")
    db = BenchmarkDatabase(database_root=local_release_filename)
    scenario_generator, _, _ = db.get_scenario_generator(0)

    video_renderer = VideoRenderer(renderer=viewer, world_step_time=0.2)
    env = HyDiscreteHighway(params=params,
                            scenario_generation=scenario_generator,
                            behavior=behavior,
                            evaluator=evaluator,
                            observer=observer,
                            viewer=video_renderer,
                            render=is_local)

    # non-agent evaluation mode
    num_steps = 100
    num_samples = params_behavior["BehaviorSpace"]["Hypothesis"]["BehaviorHypothesisIDM"]["NumSamples"]
    print("Steps, samples, splits", num_steps, num_samples, splits)
    step = 1
    env.reset()

    threshold = observer.is_enabled_threshold
    discretize = observer.is_discretize
    
    beliefs_df = pd.DataFrame(columns=["Step", "Action", "Agent", "Beliefs", "HyNum"])
    beliefs_orig_df = pd.DataFrame(columns=["Step", "Action", "Agent", "Beliefs", "HyNum"])
    while step <= num_steps:
        action = 5 #np.random.randint(0, behavior.action_space.n)
        next_state, reward, done, info = env.step(action)
        for agent, beliefs in observer.beliefs.items():
            beliefs = np.asarray(beliefs)
            oring = deepcopy(beliefs)
            for i, belief in enumerate(oring):
                beliefs_orig_df = beliefs_orig_df.append({"Step": step, "Action": action, "Agent": agent, "Beliefs": belief, "HyNum": i}, ignore_index=True)
            if discretize:
              beliefs = observer.discretize_beliefs(beliefs)
            if threshold:
              beliefs = observer.threshold_beliefs(beliefs)
            for i, belief in enumerate(beliefs):
                beliefs_df = beliefs_df.append({"Step": step, "Action": action, "Agent": agent, "Beliefs": belief, "HyNum": i}, ignore_index=True)
        step += 1

    suffix = "switch"
    if threshold:
      suffix += "_threshold"
    if discretize:
      suffix += "_discretize"
    beliefs_data_filename = "beliefs_{}_{}_{}".format(splits, num_samples, num_steps)
    beliefs_data_filename += suffix
    print(beliefs_data_filename)
    beliefs_df.to_pickle(os.path.join(str(Path.home()), "master_thesis/code/hythe-src/beliefs_data/", beliefs_data_filename))

    beliefs_data_filename = "orig_beliefs_{}_{}_{}".format(splits, num_samples, num_steps)
    beliefs_data_filename += suffix
    print(beliefs_data_filename)
    beliefs_orig_df.to_pickle(os.path.join(str(Path.home()), "master_thesis/code/hythe-src/beliefs_data/", beliefs_data_filename))

    video_filename = os.path.join(str(Path.home()), "master_thesis/code/hythe-src/beliefs_data/", "video_{}".format(num_samples))
    print(video_filename)
    video_filename += suffix
    print(video_filename)
    video_renderer.export_video(filename=video_filename)
    return
예제 #20
0
def main():
    args = configure_args()
    if is_local:
        dir_prefix = ""
    else:
        dir_prefix = "hy-iqn-exp.runfiles/hythe/"
    print("Executing job :", args.jobname)
    print("Experiment server at :", os.getcwd())
    params = ParameterServer(filename=os.path.join(
        dir_prefix, "configuration/params/iqn_params.json"),
                             log_if_default=True)
    params = configure_params(params, seed=args.jobname)
    experiment_id = params["Experiment"]["random_seed"]
    params_filename = os.path.join(params["Experiment"]["dir"],
                                   "params_{}.json".format(experiment_id))

    # check if exp exists and handle preemption
    exp_exists = check_if_exp_exists(params)
    if exp_exists:
        print("Loading existing experiment from: {}".format(
            args.jobname, (params["Experiment"]["dir"])))
        if os.path.isfile(params_filename):
            params = ParameterServer(filename=params_filename,
                                     log_if_default=True)
    else:
        Path(params["Experiment"]["dir"]).mkdir(parents=True, exist_ok=True)

    behavior = BehaviorDiscreteMacroActionsML(params)
    evaluator = GoalReached(params)
    observer = NearestAgentsObserver(params)
    viewer = MPViewer(params=params,
                      x_range=[-35, 35],
                      y_range=[-35, 35],
                      follow_agent_id=True)

    # extract params and save experiment parameters
    params["ML"]["BaseAgent"]["SummaryPath"] = os.path.join(
        params["Experiment"]["dir"], "agent/summaries")
    params["ML"]["BaseAgent"]["CheckpointPath"] = os.path.join(
        params["Experiment"]["dir"], "agent/checkpoints")

    params.Save(filename=params_filename)
    logging.info('-' * 60)
    logging.info("Writing params to :{}".format(params_filename))
    logging.info('-' * 60)

    # database creation
    dbs = DatabaseSerializer(test_scenarios=2,
                             test_world_steps=2,
                             num_serialize_scenarios=num_scenarios)
    dbs.process(os.path.join(dir_prefix, "configuration/database"),
                filter_sets="**/**/interaction_merging_light_dense_1D.json")
    local_release_filename = dbs.release(version="test")
    db = BenchmarkDatabase(database_root=local_release_filename)
    scenario_generator, _, _ = db.get_scenario_generator(0)

    env = HyDiscreteHighway(params=params,
                            scenario_generation=scenario_generator,
                            behavior=behavior,
                            evaluator=evaluator,
                            observer=observer,
                            viewer=viewer,
                            render=False)
    assert env.action_space._n == 8, "Action Space is incorrect!"
    run(params, env, exp_exists)
    params.Save(params_filename)
    logging.info('-' * 60)
    logging.info("Writing params to :{}".format(params_filename))
    logging.info('-' * 60)
예제 #21
0
def resume_experiment(params, num_episodes, agent):
    exp = Experiment(params=params, agent=agent)
    exp.resume(num_episodes)

args = configure_args()
exp_dir = args.exp_dir
num_episodes = args.num_episodes

params_filename, params = load_params(exp_dir)

dbs = DatabaseSerializer(test_scenarios=2, test_world_steps=20, num_serialize_scenarios=num_scenarios)
dbs.process("configuration/database", filter_sets="**/**/interaction_merging_light_dense_1D.json")
local_release_filename = dbs.release(version="resume")

db = BenchmarkDatabase(database_root=local_release_filename)
scenario_generator, _, _ = db.get_scenario_generator(0)

# load belief observer specifics
if is_belief_observer:
  splits = 2
  behavior_params_filename = glob.glob(os.path.join(exp_dir, "behavior_params*"))[0]
  params_behavior = ParameterServer(filename=behavior_params_filename, log_if_default=True)
  behavior_space = BehaviorSpace(params_behavior)

  hypothesis_set, hypothesis_params = behavior_space.create_hypothesis_set_fixed_split(split=splits)
  observer = BeliefObserver(params, hypothesis_set, splits=splits)
  behavior = BehaviorDiscreteMacroActionsML(params_behavior)
# if not, load default observer
else:
  behavior = BehaviorDiscreteMacroActionsML(params)
예제 #22
0
    def test_database_run_and_analyze(self):
        dbs = DatabaseSerializer(test_scenarios=4,
                                 test_world_steps=5,
                                 num_serialize_scenarios=10)
        dbs.process("database")
        local_release_filename = dbs.release(version="test")

        db = BenchmarkDatabase(database_root=local_release_filename)
        evaluators = {
            "success": "EvaluatorGoalReached",
            "collision": "EvaluatorCollisionEgoAgent",
            "max_steps": "EvaluatorStepCount"
        }
        terminal_when = {
            "collision": lambda x: x,
            "max_steps": lambda x: x > 5
        }
        params = ParameterServer(
        )  # only for evaluated agents not passed to scenario!
        behaviors_tested = {
            "IDM": BehaviorIDMClassic(params),
            "Const": BehaviorConstantVelocity(params)
        }

        benchmark_runner = BenchmarkRunnerMP(benchmark_database=db,
                                             evaluators=evaluators,
                                             terminal_when=terminal_when,
                                             behaviors=behaviors_tested,
                                             log_eval_avg_every=10)

        result = benchmark_runner.run(maintain_history=True)

        result.dump(os.path.join("./benchmark_results.pickle"))
        result_loaded = BenchmarkResult.load(
            os.path.join("./benchmark_results.pickle"))

        params2 = ParameterServer()

        fig = plt.figure(figsize=[10, 10])
        viewer = MPViewer(params=params2,
                          center=[5112, 5165],
                          y_length=120,
                          enforce_y_length=True,
                          axis=fig.gca())

        analyzer = BenchmarkAnalyzer(benchmark_result=result_loaded)
        configs = analyzer.find_configs(criteria={
            "behavior": lambda x: x == "IDM",
            "success": lambda x: not x
        })
        configs_const = analyzer.find_configs(criteria={
            "behavior": lambda x: x == "Const",
            "success": lambda x: not x
        })

        #analyzer.visualize(configs_idx_list = configs,
        # viewer = viewer, real_time_factor=10, fontsize=12)
        plt.close(fig)

        fig, (ax1, ax2) = plt.subplots(1, 2)
        viewer1 = MPViewer(params=params2,
                           center=[5112, 5165],
                           y_length=120,
                           enforce_y_length=True,
                           axis=ax1)
        viewer2 = MPViewer(params=params2,
                           center=[5112, 5165],
                           y_length=120,
                           enforce_y_length=True,
                           axis=ax2)
        analyzer.visualize(configs_idx_list=[configs[1:3], configs_const[1:3]],
                           viewer=[viewer1, viewer2],
                           viewer_names=["IDM", "ConstVelocity"],
                           real_time_factor=1,
                           fontsize=12)
예제 #23
0
def main():
    args = configure_args()
    if is_local:
        dir_prefix = ""
    else:
        dir_prefix = "hy-x-iqn-beliefs.runfiles/hythe/"
    print("Experiment server at :", os.getcwd())
    params = ParameterServer(filename=os.path.join(
        dir_prefix, "configuration/params/iqn_params.json"))
    params = configure_params(params, seed=args.jobname)
    experiment_id = params["Experiment"]["random_seed"]
    params_filename = os.path.join(params["Experiment"]["dir"],
                                   "params_{}.json".format(experiment_id))
    params_behavior_filename = os.path.join(
        params["Experiment"]["dir"],
        "behavior_params_{}.json".format(experiment_id))

    # check if exp exists and handle preemption
    exp_exists = check_if_exp_exists(params)
    if exp_exists:
        print("Loading existing experiment from: {}".format(
            args.jobname, (params["Experiment"]["dir"])))
        if os.path.isfile(params_filename):
            params = ParameterServer(filename=params_filename,
                                     log_if_default=True)
        if os.path.isfile(params_behavior_filename):
            params_behavior = ParameterServer(
                filename=params_behavior_filename, log_if_default=True)
    else:
        Path(params["Experiment"]["dir"]).mkdir(parents=True, exist_ok=True)
    params["ML"]["BaseAgent"]["SummaryPath"] = os.path.join(
        params["Experiment"]["dir"], "agent/summaries")
    params["ML"]["BaseAgent"]["CheckpointPath"] = os.path.join(
        params["Experiment"]["dir"], "agent/checkpoints")

    params_behavior = ParameterServer(filename=os.path.join(
        dir_prefix, "configuration/params/1D_desired_gap_no_prior.json"),
                                      log_if_default=True)
    params.Save(filename=params_filename)
    params_behavior.Save(filename=params_behavior_filename)

    # configure belief observer
    splits = 2
    behavior_space = configure_behavior_space(params_behavior)

    hypothesis_set, hypothesis_params = behavior_space.create_hypothesis_set_fixed_split(
        split=splits)
    observer = BeliefObserver(params, hypothesis_set, splits=splits)
    behavior = BehaviorDiscreteMacroActionsML(params)
    evaluator = GoalReached(params)

    viewer = MPViewer(params=params,
                      x_range=[-35, 35],
                      y_range=[-35, 35],
                      follow_agent_id=True)

    # database creation
    dbs = DatabaseSerializer(test_scenarios=2,
                             test_world_steps=2,
                             num_serialize_scenarios=num_scenarios)
    dbs.process(os.path.join(dir_prefix, "configuration/database"),
                filter_sets="**/**/interaction_merging_mid_dense_1D_new.json")
    local_release_filename = dbs.release(version="test")
    db = BenchmarkDatabase(database_root=local_release_filename)
    scenario_generator, _, _ = db.get_scenario_generator(scenario_set_id=0)
    env = HyDiscreteHighway(params=params,
                            scenario_generation=scenario_generator,
                            behavior=behavior,
                            evaluator=evaluator,
                            observer=observer,
                            viewer=viewer,
                            render=is_local)
    print('Observation/state space size', env.observation_space)

    run(params, env, exp_exists)
    params.Save(filename=params_filename)
    logging.info('-' * 60)
    logging.info("Writing params to :{}".format(params_filename))
    logging.info('-' * 60)
    params_behavior.Save(filename=params_behavior_filename)
    logging.info('-' * 60)
    logging.info(
        "Writing behavior params to :{}".format(params_behavior_filename))
    logging.info('-' * 60)

    return
예제 #24
0
 def test_database_from_github_release(self):
     db = BenchmarkDatabase(database_root="external/benchmark_database_release")
     scenario_generation = db.get_scenario_generator(scenario_set_id=0)
예제 #25
0
# Copyright (c) 2019 fortiss GmbH
#
# This software is released under the MIT License.
# https://opensource.org/licenses/MIT

from load.benchmark_database import BenchmarkDatabase
from modules.runtime.viewer.matplotlib_viewer import MPViewer
from modules.runtime.commons.parameters import ParameterServer
import time

db = BenchmarkDatabase(database_root="external/benchmark_database_release")
scenario_generation = db.get_scenario_generator(scenario_set_id=0)
param_server = ParameterServer()
viewer = MPViewer(params=param_server,
                  x_range=[5060, 5160],
                  y_range=[5070, 5150])

for _ in range(0, 5):  # run 5 scenarios in a row, repeating after 3
    scenario, idx = scenario_generation[0].get_next_scenario()
    world_state = scenario.get_world_state()
    print("Running scenario {} of {}".format(
        idx, scenario_generation[0].num_scenarios))
    for _ in range(0, 10):  # run each scenario for 3 steps
        world_state.step(0.2)
        viewer.drawWorld(world_state)
        viewer.show(block=False)
        time.sleep(0.2)
예제 #26
0
def main():
    print("Experiment server at:", os.getcwd())
    params = ParameterServer(
        filename=
        "hy-exp-run.runfiles/hythe/configuration/params/default_exp_runne_params.json"
    )
    params = configure_params(params)
    num_scenarios = 5
    random_seed = 0
    behavior = BehaviorDiscreteMacroActionsML(params)
    # evaluator = GoalReachedGuiding(params)
    evaluator = GoalReached(params)
    observer = NearestAgentsObserver(params)
    viewer = MPViewer(params=params,
                      x_range=[-35, 35],
                      y_range=[-35, 35],
                      follow_agent_id=True)
    experiment_id = params["Experiment"]["random_seed"]
    params_filename = "./{}_default_exp_runner_params.json".format(
        experiment_id)
    print(params_filename)
    params.Save(filename=params_filename)
    # database creation
    dbs = DatabaseSerializer(
        test_scenarios=2, test_world_steps=2, num_serialize_scenarios=20
    )  # increase the number of serialize scenarios to 100
    dbs.process("hy-exp-run.runfiles/hythe/configuration/database")
    local_release_filename = dbs.release(version="test",
                                         sub_dir="hy_bark_packaged_databases")
    db = BenchmarkDatabase(database_root=local_release_filename)

    # switch this to other generator to get other index
    # scenario_generator, _, _ = db.get_scenario_generator(0)
    scenario_generator, _, _ = db.get_scenario_generator(1)
    #
    # env = GymSingleAgentRuntime(ml_behavior = behavior,
    #                             observer = observer,
    #                             evaluator = evaluator,
    #                             step_time=0.2,
    #                             viewer=viewer,
    #                             scenario_generator=scenario_generator,
    #                             render=False)

    env = HyDiscreteHighway(params=params,
                            scenario_generation=scenario_generator,
                            behavior=behavior,
                            evaluator=evaluator,
                            observer=observer,
                            viewer=viewer,
                            render=False)

    run(params, env)
    # video_renderer.drawWorld(env._world)
    env._viewer.export_video("./test_video")
    # from gym.envs.registration import register
    # register(
    #     id='highway-v1',
    #     entry_point='bark_ml.environments.gym:DiscreteHighwayGym'
    # )
    # import gym
    # env = gym.make("highway-v1")
    # env.reset()
    # actions = [5]*100
    # print(actions)
    # for action in actions:
    #     env.step(action)
    #     time.sleep(0.2)
    return