def test_database_runner_python_behavior(self):
        dbs = DatabaseSerializer(test_scenarios=4,
                                 test_world_steps=5,
                                 num_serialize_scenarios=2)
        dbs.process("data/database1")
        local_release_filename = dbs.release(version="test")

        db = BenchmarkDatabase(database_root=local_release_filename)
        safe_dist_params = ParameterServer(log_if_default=True)
        evaluators = {
            "success": "EvaluatorGoalReached",
            "collision": "EvaluatorCollisionEgoAgent",
            "max_steps": "EvaluatorStepCount",
            "safe_dist_lon": {
                "type": "EvaluatorDynamicSafeDist",
                "params": safe_dist_params
            },
            "safe_dist_lat": {
                "type": "EvaluatorStaticSafeDist",
                "params": safe_dist_params
            }
        }
        terminal_when = {
            "collision": lambda x: x,
            "max_steps": lambda x: x > 2,
            "safe_dist_lon": lambda x: x
        }
        params = ParameterServer(
        )  # only for evaluated agents not passed to scenario!
        behaviors_tested = {"python_behavior": PythonDistanceBehavior(params)}

        benchmark_runner = BenchmarkRunner(benchmark_database=db,
                                           evaluators=evaluators,
                                           terminal_when=terminal_when,
                                           behaviors=behaviors_tested,
                                           log_eval_avg_every=5,
                                           deepcopy=False)
        benchmark_runner.clear_checkpoint_dir()
        result = benchmark_runner.run()
        df = result.get_data_frame()
        print(df)
        self.assertEqual(
            len(df.index),
            1 * 2 * 2)  # 2 Behaviors * 2 Serialize Scenarios * 1 scenario sets
        result.load_benchmark_configs()
        groups = result.get_evaluation_groups()
        self.assertEqual(set(groups), set(["behavior", "scen_set"]))
 def test_database2(self):
     # last params must be only passed for testing purposes not for release!
     dbs = DatabaseSerializer(test_scenarios=1,
                              test_world_steps=10,
                              num_serialize_scenarios=1)
     test_result = dbs.process("data/data2/database2")
     self.assertEqual(test_result, 3)
     dbs.release(version="0.0.1")
    def test_database_multiprocessing_history(self):
        dbs = DatabaseSerializer(test_scenarios=4,
                                 test_world_steps=5,
                                 num_serialize_scenarios=2)
        dbs.process("data/database1")
        local_release_filename = dbs.release(version="test")

        db = BenchmarkDatabase(database_root=local_release_filename)
        evaluators = {
            "success": "EvaluatorGoalReached",
            "collision": "EvaluatorCollisionEgoAgent",
            "max_steps": "EvaluatorStepCount"
        }
        terminal_when = {
            "collision": lambda x: x,
            "max_steps": lambda x: x > 2
        }
        params = ParameterServer(
        )  # only for evaluated agents not passed to scenario!
        behaviors_tested = {
            "IDM": BehaviorIDMClassic(params),
            "Const": BehaviorConstantVelocity(params)
        }

        benchmark_runner = BenchmarkRunnerMP(benchmark_database=db,
                                             evaluators=evaluators,
                                             terminal_when=terminal_when,
                                             behaviors=behaviors_tested,
                                             log_eval_avg_every=10)
        rst = benchmark_runner.run(maintain_history=True)
        self.assertEqual(len(rst.get_histories()), 2 * 2 * 2)

        rst = benchmark_runner.run_benchmark_config(3,
                                                    viewer=None,
                                                    maintain_history=True)
        scenario_history = rst.get_histories()[3]
        print(scenario_history)
        params = ParameterServer()
        viewer = MPViewer(params=params,
                          x_range=[5060, 5160],
                          y_range=[5070, 5150],
                          use_world_bounds=True)
        viewer.drawWorld(world=scenario_history[1].GetWorldState(),
                         eval_agent_ids=scenario_history[1].eval_agent_ids)

        viewer.show(block=True)
    def test_database_multiprocessing_runner(self):
        dbs = DatabaseSerializer(test_scenarios=4,
                                 test_world_steps=5,
                                 num_serialize_scenarios=5)
        dbs.process("data/database1")
        local_release_filename = dbs.release(version="test")

        db = BenchmarkDatabase(database_root=local_release_filename)
        test_python_params = ParameterServer(log_if_default=True)
        evaluators = {
            "success": "EvaluatorGoalReached",
            "collision": "EvaluatorCollisionEgoAgent",
            "max_steps": "EvaluatorStepCount",
            "test_evaluator": {
                "type": "TestPythonEvaluator",
                "params": test_python_params
            }
        }
        terminal_when = {
            "collision": lambda x: x,
            "max_steps": lambda x: x > 2
        }
        params = ParameterServer(
        )  # only for evaluated agents not passed to scenario!
        behaviors_tested = {
            "IDM": BehaviorIDMClassic(params),
            "Const": BehaviorConstantAcceleration(params)
        }

        EvaluationConfig.AddEvaluationModule(
            "bark.benchmark.tests.test_evaluator")
        benchmark_runner = BenchmarkRunnerMP(benchmark_database=db,
                                             evaluators=evaluators,
                                             terminal_when=terminal_when,
                                             behaviors=behaviors_tested,
                                             log_eval_avg_every=10)
        benchmark_runner.clear_checkpoint_dir()
        result = benchmark_runner.run()

        df = result.get_data_frame()
        print(df)
        self.assertEqual(
            len(df.index),
            20)  # 2 Behaviors * 5 Serialize Scenarios * 2 scenario sets
    def test_database_multiprocessing_runner(self):
        dbs = DatabaseSerializer(test_scenarios=4,
                                 test_world_steps=5,
                                 num_serialize_scenarios=5)
        dbs.process("data/database1")
        local_release_filename = dbs.release(version="test")

        db = BenchmarkDatabase(database_root=local_release_filename)
        evaluators = {
            "success": "EvaluatorGoalReached",
            "collision": "EvaluatorCollisionEgoAgent",
            "max_steps": "EvaluatorStepCount"
        }
        terminal_when = {
            "collision": lambda x: x,
            "max_steps": lambda x: x > 2
        }
        params = ParameterServer(
        )  # only for evaluated agents not passed to scenario!
        behaviors_tested = {
            "IDM": BehaviorIDMClassic(params),
            "Const": BehaviorConstantVelocity(params)
        }

        benchmark_runner = BenchmarkRunnerMP(benchmark_database=db,
                                             evaluators=evaluators,
                                             terminal_when=terminal_when,
                                             behaviors=behaviors_tested,
                                             log_eval_avg_every=10)

        result = benchmark_runner.run()

        df = result.get_data_frame()
        print(df)
        self.assertEqual(
            len(df.index),
            20)  # 2 Behaviors * 5 Serialize Scenarios * 2 scenario sets

        params2 = ParameterServer()
        viewer = MPViewer(params=params2,
                          x_range=[5060, 5160],
                          y_range=[5070, 5150],
                          use_world_bounds=True)
        rst = benchmark_runner.run_benchmark_config(10, viewer=viewer)
    def test_database_runner_checkpoint(self):
        dbs = DatabaseSerializer(test_scenarios=4,
                                 test_world_steps=5,
                                 num_serialize_scenarios=10)
        dbs.process("data/database1")
        local_release_filename = dbs.release(version="test")

        db = BenchmarkDatabase(database_root=local_release_filename)
        evaluators = {
            "success": "EvaluatorGoalReached",
            "collision": "EvaluatorCollisionEgoAgent",
            "max_steps": "EvaluatorStepCount"
        }
        terminal_when = {
            "collision": lambda x: x,
            "max_steps": lambda x: x > 2
        }
        params = ParameterServer(
        )  # only for evaluated agents not passed to scenario!
        behaviors_tested = {
            "IDM": BehaviorIDMClassic(params),
            "Const": BehaviorConstantAcceleration(params)
        }

        benchmark_runner = BenchmarkRunner(benchmark_database=db,
                                           evaluators=evaluators,
                                           terminal_when=terminal_when,
                                           behaviors=behaviors_tested,
                                           log_eval_avg_every=20,
                                           checkpoint_dir="checkpoints1/")
        benchmark_runner.clear_checkpoint_dir()
        # one run after 30 steps benchmark dumped
        result = benchmark_runner.run(checkpoint_every=30)
        df = result.get_data_frame()
        print(df)
        self.assertEqual(
            len(df.index),
            40)  # 2 Behaviors * 10 Serialize Scenarios * 2 scenario sets
        # check twice first, merging from checkpoints
        merged_result = BenchmarkRunner.merge_checkpoint_benchmark_results(
            checkpoint_dir="checkpoints1/")
        df = merged_result.get_data_frame()
        self.assertEqual(len(df.index), 40)
Exemple #7
0
    def test_database_from_local_release(self):
        # release the database
        dbs = DatabaseSerializer(test_scenarios=1,
                                 test_world_steps=10,
                                 num_serialize_scenarios=1)
        dbs.process("data/database1")
        local_release_filename = dbs.release(version="0.0.1")

        # then reload to test correct parsing
        db = BenchmarkDatabase(database_root=local_release_filename)
        scenario_generation, _, set_parameters = db.get_scenario_generator(
            scenario_set_id=1)
        self.assertEqual(db.get_num_scenario_sets(), 2)
        self.assertEqual(set_parameters["Test1"], 200)
        self.assertEqual(set_parameters["Test2"], 0.5)

        db_filtered = db.apply_filter("40")
        self.assertEqual(db_filtered.get_num_scenario_sets(), 1)

        for scenario_generation, _, params in db:
            print(scenario_generation)
    def test_database_runner(self):
        dbs = DatabaseSerializer(test_scenarios=4,
                                 test_world_steps=5,
                                 num_serialize_scenarios=2)
        dbs.process("data/database1")
        local_release_filename = dbs.release(version="test")

        db = BenchmarkDatabase(database_root=local_release_filename)
        evaluators = {
            "success": "EvaluatorGoalReached",
            "collision": "EvaluatorCollisionEgoAgent",
            "max_steps": "EvaluatorStepCount"
        }
        terminal_when = {
            "collision": lambda x: x,
            "max_steps": lambda x: x > 2
        }
        params = ParameterServer(
        )  # only for evaluated agents not passed to scenario!
        behaviors_tested = {
            "IDM": BehaviorIDMClassic(params),
            "Const": BehaviorConstantVelocity(params)
        }

        benchmark_runner = BenchmarkRunner(benchmark_database=db,
                                           evaluators=evaluators,
                                           terminal_when=terminal_when,
                                           behaviors=behaviors_tested,
                                           log_eval_avg_every=5)

        result = benchmark_runner.run()
        df = result.get_data_frame()
        print(df)
        self.assertEqual(
            len(df.index),
            2 * 2 * 2)  # 2 Behaviors * 2 Serialize Scenarios * 1 scenario sets

        groups = result.get_evaluation_groups()
        self.assertEqual(set(groups), set(["behavior", "scen_set"]))
def main():
    args = configure_args()
    if is_local:
        dir_prefix = ""
    else:
        dir_prefix="hy-iqn-lfd-full-beliefs-exp.runfiles/hythe/"
    logging.info(f"Executing job: {args.jobname}")
    logging.info(f"Experiment server at: {os.getcwd()}")
    params = ParameterServer(filename=os.path.join(dir_prefix, params_file),
                             log_if_default=True)
    params = configure_params(params, seed=args.jobname)
    experiment_id = params["Experiment"]["random_seed"]
    params_filename = os.path.join(params["Experiment"]["dir"], "params_{}.json".format(experiment_id))

    params_behavior_filename = os.path.join(params["Experiment"]["dir"], "behavior_params_{}.json".format(experiment_id))
    params_behavior = ParameterServer(filename=os.path.join(dir_prefix, "configuration/params/1D_desired_gap_no_prior.json"),
                                      log_if_default=True)
    params_behavior.Save(filename=params_behavior_filename)

    splits = 2
    behavior_space = BehaviorSpace(params_behavior)

    hypothesis_set, hypothesis_params = behavior_space.create_hypothesis_set_fixed_split(split=splits)
    observer = BeliefObserver(params, hypothesis_set, splits=splits)

    behavior = BehaviorDiscreteMacroActionsML(params)
    evaluator = GoalReached(params)
    viewer = MPViewer(params=params,
                      x_range=[-35, 35],
                      y_range=[-35, 35],
                      follow_agent_id=True)

    # extract params and save experiment parameters
    params["ML"]["BaseAgent"]["SummaryPath"] = os.path.join(params["Experiment"]["dir"], "agent/summaries")
    params["ML"]["BaseAgent"]["CheckpointPath"] = os.path.join(params["Experiment"]["dir"], "agent/checkpoints")

    params.Save(filename=params_filename)
    logging.info('-' * 60)
    logging.info("Writing params to :{}".format(params_filename))
    logging.info('-' * 60)

    # database creation
    dbs1 = DatabaseSerializer(test_scenarios=1, test_world_steps=2,
                             num_serialize_scenarios=num_demo_scenarios)
    dbs1.process(os.path.join(dir_prefix, "configuration/database"),
      filter_sets="**/**/interaction_merging_light_dense_1D.json")
    local_release_filename = dbs1.release(version="lfd_offline")
    db1 = BenchmarkDatabase(database_root=local_release_filename)
    scenario_generator1, _, _ = db1.get_scenario_generator(0)

    env = HyDiscreteHighway(params=params,
                            scenario_generation=scenario_generator1,
                            behavior=behavior,
                            evaluator=evaluator,
                            observer=observer,
                            viewer=viewer,
                            render=False)

    scenario, _ = scenario_generator1.get_next_scenario()
    world = scenario.GetWorldState()
    observer.Reset(world)

    assert env.action_space._n == 8, "Action Space is incorrect!"
    run(args, params, env, db=db1)
    params.Save(params_filename)
    logging.info('-' * 60)
    logging.info("Writing params to :{}".format(params_filename))
    logging.info('-' * 60)
def run(args, params, env, exp_exists=False, db=None):
    agent = None
    demonstrations = None
    exp = None
    columns = ['pos_x', 'pos_y', 'orientation', 'velocity', 'action', 'is_demo']
    # add an eval criteria and generate demonstrations
    if is_generate_demonstrations:
      logging.info("Generating Demonstrations")
      eval_criteria = {"goal_reached" : lambda x : x}
      demo_behavior, mcts_params = generate_uct_hypothesis_behavior()
      demonstrations = generate_demonstrations(params, env, eval_criteria, demo_behavior, db=db)
      logging.info(f"Total demonstrations generated {len(demonstrations)}")
      
      ego_world_states = unpack_demo_states(demonstrations, env._observer)
      save_transitions(params, ego_world_states, columns=columns, 
        filename="demonstrations/demo_dataframe")

      print("Training off", env._observer._world_x_range, env._observer._world_y_range)
    if is_train_on_demonstrations:
      # if demonstrations were not generated in this run
      if not is_generate_demonstrations:
        collector, demonstrations = unpack_load_demonstrations(demo_dir_default)
      else:
        if args.demodir is None:
          collector, demonstrations =  unpack_load_demonstrations(params["Experiment"]["dir"])
        else:
          collector, demonstrations = unpack_load_demonstrations(args.demodir)
      if params["ML"]["BaseAgent"]["Multi_step"] is not None:
        multistep_capacity = capacity + params["ML"]["BaseAgent"]["Multi_step"] - 1 
      if multistep_capacity < len(demonstrations):
        demonstrations = demonstrations[-multistep_capacity:]
        logging.info(f"Pruned number of demonstrations {len(demonstrations)}")
      else:
        logging.info("Number of demonstrations under capacity requested, using full demonstrations")
      logging.info(f"Loaded number of demonstrations {len(demonstrations)}")
      if is_local:
        # Assign steps by args
        params["ML"]["DemonstratorAgent"]["Agent"]["online_gradient_update_steps"] = args.grad_update_steps
      # Assign capacity by length of demonstrations
      params["ML"]["BaseAgent"]["MemorySize"] = len(demonstrations)
      params["ML"]["DemonstratorAgent"]["Buffer"]["demo_ratio"] = 1.0
      logging.info(f"Capacity configured {len(demonstrations)}")
      agent = configure_agent(params, env)
      exp = Experiment(params=params, agent=agent, dump_scenario_interval=25000)
      exp.run(demonstrator=True, demonstrations=demonstrations, 
        num_episodes=num_episodes, learn_only=True)
    
    if is_train_mixed_experiences:
      assert agent is not None
      params["ML"]["DemonstratorAgent"]["Buffer"]["demo_ratio"] = 0.25
      params["ML"]["BaseAgent"]["Update_interval"] = 4
      agent.reset_params(params)
      agent.reset_training_variables(is_online_demo=is_train_mixed_experiences)

      if is_local:
          dir_prefix = ""
      else:
          dir_prefix="hy-iqn-lfd-full-beliefs-exp.runfiles/hythe/"
      # database creation
      dbs2 = DatabaseSerializer(test_scenarios=1, test_world_steps=2,
                              num_serialize_scenarios=num_scenarios)
      dbs2.process(os.path.join(dir_prefix, "configuration/database"),
        filter_sets="**/**/interaction_merging_light_dense_1D.json")
      local_release_filename = dbs2.release(version="test_online")
      db2 = BenchmarkDatabase(database_root=local_release_filename)
      scenario_generator2, _, _ = db2.get_scenario_generator(0)
      agent._env.scenario_generation = scenario_generator2
      exp.run(demonstrator=True, demonstrations=None, num_episodes=num_episodes)

      ego_world_states = extract_learned_states(agent.memory, env._observer)
      save_transitions(params, ego_world_states, columns=columns, 
        filename="demonstrations/learned_dataframe")
      print("Training on", env._observer._world_x_range, env._observer._world_y_range)
Exemple #11
0
def main():
    map_filename = "external/bark_ml_project/bark_ml/environments/blueprints/highway/city_highway_straight.xodr"
    params = ParameterServer(filename=os.path.join(
        exp_root, "params_iqn_pre_exp_2sdi64.json"),
                             log_if_default=True)
    params_behavior = ParameterServer(filename=os.path.join(
        exp_root, "behavior_params_iqn_pre_exp_2sdi64.json"))
    behavior = BehaviorDiscreteMacroActionsML(params)
    evaluator = GoalReached(params)
    if is_belief_observer:
        splits = 2
        behavior_space = BehaviorSpace(params_behavior)

        hypothesis_set, hypothesis_params = behavior_space.create_hypothesis_set_fixed_split(
            split=splits)
        observer = BeliefObserver(params, hypothesis_set, splits=splits)
    else:
        observer = NearestAgentsObserver(params)
    viewer = MPViewer(params=params,
                      x_range=[-35, 35],
                      y_range=[-35, 35],
                      follow_agent_id=True)

    # database creation
    dbs1 = DatabaseSerializer(test_scenarios=1,
                              test_world_steps=2,
                              num_serialize_scenarios=1)
    dbs1.process(os.path.join("", "configuration/database"),
                 filter_sets="**/**/interaction_merging_light_dense_1D.json")
    local_release_filename = dbs1.release(version="lfd_offline")
    db1 = BenchmarkDatabase(database_root=local_release_filename)
    scenario_generator1, _, _ = db1.get_scenario_generator(0)

    env = HyDiscreteHighway(params=params,
                            scenario_generation=scenario_generator1,
                            map_filename=map_filename,
                            behavior=behavior,
                            evaluator=evaluator,
                            observer=observer,
                            viewer=viewer,
                            render=False)

    scenario, _ = scenario_generator1.get_next_scenario()
    world = scenario.GetWorldState()
    observer.Reset(world)
    env.reset()

    agent = pick_agent(exp_root, env, params)
    ego_world_states = []
    memory = agent.memory
    learned_states = memory["state"]
    actions = memory['action']
    is_demos = memory['is_demo']
    for state, action, is_demo in zip(learned_states, actions, is_demos):
        ego_state = np.zeros((observer._len_ego_state + 1))
        ego_nn_input_state = deepcopy(state[0:observer._len_ego_state])
        ego_state[1:] = ego_nn_input_state
        reverted_observed_state = observer.rev_observe_for_ego_vehicle(
            ego_state)
        ego_world_states.append(
            (reverted_observed_state[int(StateDefinition.X_POSITION)],
             reverted_observed_state[int(StateDefinition.Y_POSITION)],
             reverted_observed_state[int(StateDefinition.THETA_POSITION)],
             reverted_observed_state[int(StateDefinition.VEL_POSITION)],
             action[0], is_demo[0]))
    df = pd.DataFrame(ego_world_states,
                      columns=[
                          'pos_x', 'pos_y', 'orientation', 'velocity',
                          'action', 'is_demo'
                      ])
    print(df.head(10))
    if not os.path.exists(os.path.join(exp_root, "demonstrations")):
        os.makedirs(os.path.join(exp_root, "demonstrations"))
    df.to_pickle(os.path.join(exp_root, "demonstrations", "learned_dataframe"))
    return
def main():
    print("Experiment server at :", os.getcwd())

    args = configure_args()
    #load exp params
    exp_dir = args.output_dir
    params_filename = glob.glob(os.path.join(exp_dir, "params_[!behavior]*"))
    params = ParameterServer(filename=params_filename[0])
    params.load(fn=params_filename[0])
    params["ML"]["BaseAgent"]["SummaryPath"] = os.path.join(exp_dir, "agent/summaries")
    params["ML"]["BaseAgent"]["CheckpointPath"] = os.path.join(exp_dir, "agent/checkpoints")
    splits = 8
    behavior_params_filename = glob.glob(os.path.join(exp_dir, "behavior_params*"))
    if behavior_params_filename:
      params_behavior = ParameterServer(filename=behavior_params_filename[0])
    else:
      params_behavior = ParameterServer(filename="configuration/params/1D_desired_gap_no_prior.json")
    behavior_space = configure_behavior_space(params_behavior)

    hypothesis_set, hypothesis_params = behavior_space.create_hypothesis_set_fixed_split(split=splits)
    observer = BeliefObserver(params, hypothesis_set, splits=splits)
    behavior = BehaviorDiscreteMacroActionsML(params_behavior)
    evaluator = GoalReached(params)

    viewer = MPViewer(params=params,
                      x_range=[-35, 35],
                      y_range=[-35, 35],
                      follow_agent_id=True)


    # database creation
    dir_prefix = ""
    dbs = DatabaseSerializer(test_scenarios=2, test_world_steps=2,
                             num_serialize_scenarios=10)
    dbs.process(os.path.join(dir_prefix, "configuration/database"), filter_sets="**/**/interaction_merging_light_dense_1D.json")
    local_release_filename = dbs.release(version="test")
    db = BenchmarkDatabase(database_root=local_release_filename)
    scenario_generator, _, _ = db.get_scenario_generator(0)

    video_renderer = VideoRenderer(renderer=viewer, world_step_time=0.2)
    env = HyDiscreteHighway(params=params,
                            scenario_generation=scenario_generator,
                            behavior=behavior,
                            evaluator=evaluator,
                            observer=observer,
                            viewer=video_renderer,
                            render=is_local)

    # non-agent evaluation mode
    num_steps = 100
    num_samples = params_behavior["BehaviorSpace"]["Hypothesis"]["BehaviorHypothesisIDM"]["NumSamples"]
    print("Steps, samples, splits", num_steps, num_samples, splits)
    step = 1
    env.reset()

    threshold = observer.is_enabled_threshold
    discretize = observer.is_discretize
    
    beliefs_df = pd.DataFrame(columns=["Step", "Action", "Agent", "Beliefs", "HyNum"])
    beliefs_orig_df = pd.DataFrame(columns=["Step", "Action", "Agent", "Beliefs", "HyNum"])
    while step <= num_steps:
        action = 5 #np.random.randint(0, behavior.action_space.n)
        next_state, reward, done, info = env.step(action)
        for agent, beliefs in observer.beliefs.items():
            beliefs = np.asarray(beliefs)
            oring = deepcopy(beliefs)
            for i, belief in enumerate(oring):
                beliefs_orig_df = beliefs_orig_df.append({"Step": step, "Action": action, "Agent": agent, "Beliefs": belief, "HyNum": i}, ignore_index=True)
            if discretize:
              beliefs = observer.discretize_beliefs(beliefs)
            if threshold:
              beliefs = observer.threshold_beliefs(beliefs)
            for i, belief in enumerate(beliefs):
                beliefs_df = beliefs_df.append({"Step": step, "Action": action, "Agent": agent, "Beliefs": belief, "HyNum": i}, ignore_index=True)
        step += 1

    suffix = "switch"
    if threshold:
      suffix += "_threshold"
    if discretize:
      suffix += "_discretize"
    beliefs_data_filename = "beliefs_{}_{}_{}".format(splits, num_samples, num_steps)
    beliefs_data_filename += suffix
    print(beliefs_data_filename)
    beliefs_df.to_pickle(os.path.join(str(Path.home()), "master_thesis/code/hythe-src/beliefs_data/", beliefs_data_filename))

    beliefs_data_filename = "orig_beliefs_{}_{}_{}".format(splits, num_samples, num_steps)
    beliefs_data_filename += suffix
    print(beliefs_data_filename)
    beliefs_orig_df.to_pickle(os.path.join(str(Path.home()), "master_thesis/code/hythe-src/beliefs_data/", beliefs_data_filename))

    video_filename = os.path.join(str(Path.home()), "master_thesis/code/hythe-src/beliefs_data/", "video_{}".format(num_samples))
    print(video_filename)
    video_filename += suffix
    print(video_filename)
    video_renderer.export_video(filename=video_filename)
    return
Exemple #13
0
def main():
    args = configure_args()
    if is_local:
        dir_prefix = ""
    else:
        dir_prefix = "hy-iqn-exp.runfiles/hythe/"
    print("Executing job :", args.jobname)
    print("Experiment server at :", os.getcwd())
    params = ParameterServer(filename=os.path.join(
        dir_prefix, "configuration/params/iqn_params.json"),
                             log_if_default=True)
    params = configure_params(params, seed=args.jobname)
    experiment_id = params["Experiment"]["random_seed"]
    params_filename = os.path.join(params["Experiment"]["dir"],
                                   "params_{}.json".format(experiment_id))

    # check if exp exists and handle preemption
    exp_exists = check_if_exp_exists(params)
    if exp_exists:
        print("Loading existing experiment from: {}".format(
            args.jobname, (params["Experiment"]["dir"])))
        if os.path.isfile(params_filename):
            params = ParameterServer(filename=params_filename,
                                     log_if_default=True)
    else:
        Path(params["Experiment"]["dir"]).mkdir(parents=True, exist_ok=True)

    behavior = BehaviorDiscreteMacroActionsML(params)
    evaluator = GoalReached(params)
    observer = NearestAgentsObserver(params)
    viewer = MPViewer(params=params,
                      x_range=[-35, 35],
                      y_range=[-35, 35],
                      follow_agent_id=True)

    # extract params and save experiment parameters
    params["ML"]["BaseAgent"]["SummaryPath"] = os.path.join(
        params["Experiment"]["dir"], "agent/summaries")
    params["ML"]["BaseAgent"]["CheckpointPath"] = os.path.join(
        params["Experiment"]["dir"], "agent/checkpoints")

    params.Save(filename=params_filename)
    logging.info('-' * 60)
    logging.info("Writing params to :{}".format(params_filename))
    logging.info('-' * 60)

    # database creation
    dbs = DatabaseSerializer(test_scenarios=2,
                             test_world_steps=2,
                             num_serialize_scenarios=num_scenarios)
    dbs.process(os.path.join(dir_prefix, "configuration/database"),
                filter_sets="**/**/interaction_merging_light_dense_1D.json")
    local_release_filename = dbs.release(version="test")
    db = BenchmarkDatabase(database_root=local_release_filename)
    scenario_generator, _, _ = db.get_scenario_generator(0)

    env = HyDiscreteHighway(params=params,
                            scenario_generation=scenario_generator,
                            behavior=behavior,
                            evaluator=evaluator,
                            observer=observer,
                            viewer=viewer,
                            render=False)
    assert env.action_space._n == 8, "Action Space is incorrect!"
    run(params, env, exp_exists)
    params.Save(params_filename)
    logging.info('-' * 60)
    logging.info("Writing params to :{}".format(params_filename))
    logging.info('-' * 60)
    def test_database_runner_checkpoint(self):
        dbs = DatabaseSerializer(test_scenarios=4,
                                 test_world_steps=5,
                                 num_serialize_scenarios=10)
        dbs.process("data/database1")
        local_release_filename = dbs.release(version="test")

        db = BenchmarkDatabase(database_root=local_release_filename)
        evaluators = {
            "success": "EvaluatorGoalReached",
            "collision": "EvaluatorCollisionEgoAgent",
            "max_steps": "EvaluatorStepCount"
        }
        terminal_when = {
            "collision": lambda x: x,
            "max_steps": lambda x: x > 2
        }
        params = ParameterServer(
        )  # only for evaluated agents not passed to scenario!
        behaviors_tested = {
            "IDM": BehaviorIDMClassic(params),
            "Const": BehaviorConstantVelocity(params)
        }

        benchmark_runner = BenchmarkRunner(benchmark_database=db,
                                           evaluators=evaluators,
                                           terminal_when=terminal_when,
                                           behaviors=behaviors_tested,
                                           log_eval_avg_every=20,
                                           checkpoint_dir="checkpoints1/")

        # one run after 30 steps benchmark dumped
        result = benchmark_runner.run(checkpoint_every=30)
        df = result.get_data_frame()
        print(df)
        self.assertEqual(
            len(df.index),
            40)  # 2 Behaviors * 10 Serialize Scenarios * 2 scenario sets
        # check twice first, merging from checkpoints
        merged_result = BenchmarkRunner.merge_checkpoint_benchmark_results(
            checkpoint_dir="checkpoints1/")
        df = merged_result.get_data_frame()
        self.assertEqual(len(df.index), 30)
        # second load merged results
        self.assertTrue(
            os.path.exists(os.path.join("checkpoints1/merged_results.ckpnt")))
        merged_result = BenchmarkRunner.merge_checkpoint_benchmark_results(
            checkpoint_dir="checkpoints1/")
        df = merged_result.get_data_frame()
        self.assertEqual(len(df.index), 30)

        configs_to_run = BenchmarkRunner.get_configs_to_run(
            benchmark_runner.configs_to_run, merged_result)
        self.assertEqual(len(configs_to_run), 10)

        benchmark_runner2 = BenchmarkRunner(benchmark_database=db,
                                            evaluators=evaluators,
                                            terminal_when=terminal_when,
                                            behaviors=behaviors_tested,
                                            log_eval_avg_every=1,
                                            checkpoint_dir="checkpoints1/",
                                            merge_existing=True)

        result = benchmark_runner2.run(checkpoint_every=7)
        df = result.get_data_frame()
        print(df)
        self.assertEqual(
            len(df.index),
            40)  # 2 Behaviors * 10 Serialize Scenarios * 2 scenario sets

        # check if results maintained in existing result dump, 30 from previous run + 7 after new checkpoint
        merged_result = BenchmarkRunner.merge_checkpoint_benchmark_results(
            checkpoint_dir="checkpoints1/")
        df = merged_result.get_data_frame()
        self.assertEqual(len(df.index), 37)
    def test_database_multiprocessing_runner_checkpoint(self):
        dbs = DatabaseSerializer(test_scenarios=1,
                                 test_world_steps=2,
                                 num_serialize_scenarios=10)
        dbs.process("data/database1")
        local_release_filename = dbs.release(version="test")

        db = BenchmarkDatabase(database_root=local_release_filename)
        evaluators = {
            "success": "EvaluatorGoalReached",
            "collision": "EvaluatorCollisionEgoAgent",
            "max_steps": "EvaluatorStepCount"
        }
        terminal_when = {
            "collision": lambda x: x,
            "max_steps": lambda x: x > 2
        }
        params = ParameterServer(
        )  # only for evaluated agents not passed to scenario!
        behaviors_tested = {
            "IDM": BehaviorIDMClassic(params),
            "Const": BehaviorConstantVelocity(params)
        }

        benchmark_runner = BenchmarkRunnerMP(benchmark_database=db,
                                             evaluators=evaluators,
                                             terminal_when=terminal_when,
                                             behaviors=behaviors_tested,
                                             log_eval_avg_every=10,
                                             num_cpus=4,
                                             checkpoint_dir="checkpoints2/",
                                             merge_existing=False)
        benchmark_runner.clear_checkpoint_dir()
        # one run after 30 steps benchmark dumped
        result = benchmark_runner.run(checkpoint_every=3)
        df = result.get_data_frame()
        print(df)
        self.assertEqual(
            len(df.index),
            40)  # 2 Behaviors * 10 Serialize Scenarios * 2 scenario sets

        merged_result = BenchmarkRunner.merge_checkpoint_benchmark_results(
            checkpoint_dir="checkpoints2/")
        df = merged_result.get_data_frame()
        self.assertEqual(len(df.index), 4 * 9)
        # self.assertEqual(len(merged_result.get_histories()), 4*9)
        self.assertEqual(len(merged_result.get_benchmark_configs()), 4 * 9)

        configs_to_run = BenchmarkRunner.get_configs_to_run(
            benchmark_runner.configs_to_run, merged_result)
        self.assertEqual(len(configs_to_run), 4)
        ray.shutdown()
        benchmark_runner2 = BenchmarkRunnerMP(benchmark_database=db,
                                              evaluators=evaluators,
                                              terminal_when=terminal_when,
                                              behaviors=behaviors_tested,
                                              log_eval_avg_every=1,
                                              checkpoint_dir="checkpoints2/",
                                              merge_existing=True)

        result = benchmark_runner2.run(checkpoint_every=1)
        df = result.get_data_frame()
        print(df)
        self.assertEqual(
            len(df.index),
            40)  # 2 Behaviors * 10 Serialize Scenarios * 2 scenario sets

        # check if existing result is incorporated for mergin result
        merged_result = BenchmarkRunner.merge_checkpoint_benchmark_results(
            checkpoint_dir="checkpoints2/")
        df = merged_result.get_data_frame()
        self.assertEqual(len(df.index), 40)
Exemple #16
0
env = Runtime(0.2, viewer, scenario_generator, render=True)

env.reset()
env._world.agents[
    env._scenario._eval_agent_ids[0]].behavior_model = ml_behavior

print(ml_behavior)
for _ in range(0, 50):
    env.step()

# to find database files
os.chdir("../benchmark_database/")
dbs = DatabaseSerializer(test_scenarios=4,
                         test_world_steps=5,
                         num_serialize_scenarios=10)
dbs.process("database")
local_release_filename = dbs.release(version="test")
db = BenchmarkDatabase(database_root=local_release_filename)

evaluators = {
    "success": "EvaluatorGoalReached",
    "collision": "EvaluatorCollisionEgoAgent",
    "max_steps": "EvaluatorStepCount"
}
terminal_when = {"collision": lambda x: x, "max_steps": lambda x: x > 31}
behaviors_tested = {"bark_ml": ml_behavior}

benchmark_runner = BenchmarkRunner(benchmark_database=db,
                                   evaluators=evaluators,
                                   terminal_when=terminal_when,
                                   behaviors=behaviors_tested,
    agent_dir = os.path.join(exp_dir, 'agent')
    return IQNAgent(params=params, env=env, agent_save_dir=agent_dir, checkpoint_load=checkpoint)


def resume_experiment(params, num_episodes, agent):
    exp = Experiment(params=params, agent=agent)
    exp.resume(num_episodes)

args = configure_args()
exp_dir = args.exp_dir
num_episodes = args.num_episodes

params_filename, params = load_params(exp_dir)

dbs = DatabaseSerializer(test_scenarios=2, test_world_steps=20, num_serialize_scenarios=num_scenarios)
dbs.process("configuration/database", filter_sets="**/**/interaction_merging_light_dense_1D.json")
local_release_filename = dbs.release(version="resume")

db = BenchmarkDatabase(database_root=local_release_filename)
scenario_generator, _, _ = db.get_scenario_generator(0)

# load belief observer specifics
if is_belief_observer:
  splits = 2
  behavior_params_filename = glob.glob(os.path.join(exp_dir, "behavior_params*"))[0]
  params_behavior = ParameterServer(filename=behavior_params_filename, log_if_default=True)
  behavior_space = BehaviorSpace(params_behavior)

  hypothesis_set, hypothesis_params = behavior_space.create_hypothesis_set_fixed_split(split=splits)
  observer = BeliefObserver(params, hypothesis_set, splits=splits)
  behavior = BehaviorDiscreteMacroActionsML(params_behavior)
Exemple #18
0
    def test_database_run_and_analyze(self):
        dbs = DatabaseSerializer(test_scenarios=4,
                                 test_world_steps=5,
                                 num_serialize_scenarios=10)
        dbs.process("database")
        local_release_filename = dbs.release(version="test")

        db = BenchmarkDatabase(database_root=local_release_filename)
        evaluators = {
            "success": "EvaluatorGoalReached",
            "collision": "EvaluatorCollisionEgoAgent",
            "max_steps": "EvaluatorStepCount"
        }
        terminal_when = {
            "collision": lambda x: x,
            "max_steps": lambda x: x > 5
        }
        params = ParameterServer(
        )  # only for evaluated agents not passed to scenario!
        behaviors_tested = {
            "IDM": BehaviorIDMClassic(params),
            "Const": BehaviorConstantVelocity(params)
        }

        benchmark_runner = BenchmarkRunnerMP(benchmark_database=db,
                                             evaluators=evaluators,
                                             terminal_when=terminal_when,
                                             behaviors=behaviors_tested,
                                             log_eval_avg_every=10)

        result = benchmark_runner.run(maintain_history=True)

        result.dump(os.path.join("./benchmark_results.pickle"))
        result_loaded = BenchmarkResult.load(
            os.path.join("./benchmark_results.pickle"))

        params2 = ParameterServer()

        fig = plt.figure(figsize=[10, 10])
        viewer = MPViewer(params=params2,
                          center=[5112, 5165],
                          y_length=120,
                          enforce_y_length=True,
                          axis=fig.gca())

        analyzer = BenchmarkAnalyzer(benchmark_result=result_loaded)
        configs = analyzer.find_configs(criteria={
            "behavior": lambda x: x == "IDM",
            "success": lambda x: not x
        })
        configs_const = analyzer.find_configs(criteria={
            "behavior": lambda x: x == "Const",
            "success": lambda x: not x
        })

        #analyzer.visualize(configs_idx_list = configs,
        # viewer = viewer, real_time_factor=10, fontsize=12)
        plt.close(fig)

        fig, (ax1, ax2) = plt.subplots(1, 2)
        viewer1 = MPViewer(params=params2,
                           center=[5112, 5165],
                           y_length=120,
                           enforce_y_length=True,
                           axis=ax1)
        viewer2 = MPViewer(params=params2,
                           center=[5112, 5165],
                           y_length=120,
                           enforce_y_length=True,
                           axis=ax2)
        analyzer.visualize(configs_idx_list=[configs[1:3], configs_const[1:3]],
                           viewer=[viewer1, viewer2],
                           viewer_names=["IDM", "ConstVelocity"],
                           real_time_factor=1,
                           fontsize=12)
def main(version, github_token, delete):
    dbs = DatabaseSerializer(test_scenarios=1, test_world_steps=10)
    dbs.process(DATABASE_ROOT)
    dbs.release(version=version, github_token=github_token, delete=delete)
Exemple #20
0
def main():
    args = configure_args()
    if is_local:
        dir_prefix = ""
    else:
        dir_prefix = "hy-x-iqn-beliefs.runfiles/hythe/"
    print("Experiment server at :", os.getcwd())
    params = ParameterServer(filename=os.path.join(
        dir_prefix, "configuration/params/iqn_params.json"))
    params = configure_params(params, seed=args.jobname)
    experiment_id = params["Experiment"]["random_seed"]
    params_filename = os.path.join(params["Experiment"]["dir"],
                                   "params_{}.json".format(experiment_id))
    params_behavior_filename = os.path.join(
        params["Experiment"]["dir"],
        "behavior_params_{}.json".format(experiment_id))

    # check if exp exists and handle preemption
    exp_exists = check_if_exp_exists(params)
    if exp_exists:
        print("Loading existing experiment from: {}".format(
            args.jobname, (params["Experiment"]["dir"])))
        if os.path.isfile(params_filename):
            params = ParameterServer(filename=params_filename,
                                     log_if_default=True)
        if os.path.isfile(params_behavior_filename):
            params_behavior = ParameterServer(
                filename=params_behavior_filename, log_if_default=True)
    else:
        Path(params["Experiment"]["dir"]).mkdir(parents=True, exist_ok=True)
    params["ML"]["BaseAgent"]["SummaryPath"] = os.path.join(
        params["Experiment"]["dir"], "agent/summaries")
    params["ML"]["BaseAgent"]["CheckpointPath"] = os.path.join(
        params["Experiment"]["dir"], "agent/checkpoints")

    params_behavior = ParameterServer(filename=os.path.join(
        dir_prefix, "configuration/params/1D_desired_gap_no_prior.json"),
                                      log_if_default=True)
    params.Save(filename=params_filename)
    params_behavior.Save(filename=params_behavior_filename)

    # configure belief observer
    splits = 2
    behavior_space = configure_behavior_space(params_behavior)

    hypothesis_set, hypothesis_params = behavior_space.create_hypothesis_set_fixed_split(
        split=splits)
    observer = BeliefObserver(params, hypothesis_set, splits=splits)
    behavior = BehaviorDiscreteMacroActionsML(params)
    evaluator = GoalReached(params)

    viewer = MPViewer(params=params,
                      x_range=[-35, 35],
                      y_range=[-35, 35],
                      follow_agent_id=True)

    # database creation
    dbs = DatabaseSerializer(test_scenarios=2,
                             test_world_steps=2,
                             num_serialize_scenarios=num_scenarios)
    dbs.process(os.path.join(dir_prefix, "configuration/database"),
                filter_sets="**/**/interaction_merging_mid_dense_1D_new.json")
    local_release_filename = dbs.release(version="test")
    db = BenchmarkDatabase(database_root=local_release_filename)
    scenario_generator, _, _ = db.get_scenario_generator(scenario_set_id=0)
    env = HyDiscreteHighway(params=params,
                            scenario_generation=scenario_generator,
                            behavior=behavior,
                            evaluator=evaluator,
                            observer=observer,
                            viewer=viewer,
                            render=is_local)
    print('Observation/state space size', env.observation_space)

    run(params, env, exp_exists)
    params.Save(filename=params_filename)
    logging.info('-' * 60)
    logging.info("Writing params to :{}".format(params_filename))
    logging.info('-' * 60)
    params_behavior.Save(filename=params_behavior_filename)
    logging.info('-' * 60)
    logging.info(
        "Writing behavior params to :{}".format(params_behavior_filename))
    logging.info('-' * 60)

    return
Exemple #21
0
def main():
    print("Experiment server at:", os.getcwd())
    params = ParameterServer(
        filename=
        "hy-exp-run.runfiles/hythe/configuration/params/default_exp_runne_params.json"
    )
    params = configure_params(params)
    num_scenarios = 5
    random_seed = 0
    behavior = BehaviorDiscreteMacroActionsML(params)
    # evaluator = GoalReachedGuiding(params)
    evaluator = GoalReached(params)
    observer = NearestAgentsObserver(params)
    viewer = MPViewer(params=params,
                      x_range=[-35, 35],
                      y_range=[-35, 35],
                      follow_agent_id=True)
    experiment_id = params["Experiment"]["random_seed"]
    params_filename = "./{}_default_exp_runner_params.json".format(
        experiment_id)
    print(params_filename)
    params.Save(filename=params_filename)
    # database creation
    dbs = DatabaseSerializer(
        test_scenarios=2, test_world_steps=2, num_serialize_scenarios=20
    )  # increase the number of serialize scenarios to 100
    dbs.process("hy-exp-run.runfiles/hythe/configuration/database")
    local_release_filename = dbs.release(version="test",
                                         sub_dir="hy_bark_packaged_databases")
    db = BenchmarkDatabase(database_root=local_release_filename)

    # switch this to other generator to get other index
    # scenario_generator, _, _ = db.get_scenario_generator(0)
    scenario_generator, _, _ = db.get_scenario_generator(1)
    #
    # env = GymSingleAgentRuntime(ml_behavior = behavior,
    #                             observer = observer,
    #                             evaluator = evaluator,
    #                             step_time=0.2,
    #                             viewer=viewer,
    #                             scenario_generator=scenario_generator,
    #                             render=False)

    env = HyDiscreteHighway(params=params,
                            scenario_generation=scenario_generator,
                            behavior=behavior,
                            evaluator=evaluator,
                            observer=observer,
                            viewer=viewer,
                            render=False)

    run(params, env)
    # video_renderer.drawWorld(env._world)
    env._viewer.export_video("./test_video")
    # from gym.envs.registration import register
    # register(
    #     id='highway-v1',
    #     entry_point='bark_ml.environments.gym:DiscreteHighwayGym'
    # )
    # import gym
    # env = gym.make("highway-v1")
    # env.reset()
    # actions = [5]*100
    # print(actions)
    # for action in actions:
    #     env.step(action)
    #     time.sleep(0.2)
    return