Esempio n. 1
0
def main():
    args = configure_args()
    if is_local:
        dir_prefix = ""
    else:
        dir_prefix="hy-iqn-lfd-full-exp.runfiles/hythe/"
    logging.info(f"Executing job: {args.jobname}")
    logging.info(f"Experiment server at: {os.getcwd()}")
    params = ParameterServer(filename=os.path.join(dir_prefix, params_file),
                             log_if_default=True)
    params = configure_params(params, seed=args.jobname)
    experiment_id = params["Experiment"]["random_seed"]
    params_filename = os.path.join(params["Experiment"]["dir"], "params_{}.json".format(experiment_id))

    behavior = BehaviorDiscreteMacroActionsML(params)
    evaluator = GoalReached(params)
    observer = NearestAgentsObserver(params)
    viewer = MPViewer(params=params,
                      x_range=[-35, 35],
                      y_range=[-35, 35],
                      follow_agent_id=True)

    # extract params and save experiment parameters
    params["ML"]["BaseAgent"]["SummaryPath"] = os.path.join(params["Experiment"]["dir"], "agent/summaries")
    params["ML"]["BaseAgent"]["CheckpointPath"] = os.path.join(params["Experiment"]["dir"], "agent/checkpoints")

    params.Save(filename=params_filename)
    logging.info('-' * 60)
    logging.info("Writing params to :{}".format(params_filename))
    logging.info('-' * 60)

    # database creation
    dbs1 = DatabaseSerializer(test_scenarios=1, test_world_steps=2,
                             num_serialize_scenarios=num_demo_scenarios)
    dbs1.process(os.path.join(dir_prefix, "configuration/database"),
      filter_sets="**/**/interaction_merging_light_dense_1D.json")
    local_release_filename = dbs1.release(version="lfd_offline")
    db1 = BenchmarkDatabase(database_root=local_release_filename)
    scenario_generator1, _, _ = db1.get_scenario_generator(0)

    env = HyDiscreteHighway(params=params,
                            scenario_generation=scenario_generator1,
                            behavior=behavior,
                            evaluator=evaluator,
                            observer=observer,
                            viewer=viewer,
                            render=False)

    scenario, _ = scenario_generator1.get_next_scenario()
    world = scenario.GetWorldState()
    observer.Reset(world)

    assert env.action_space._n == 8, "Action Space is incorrect!"
    run(args, params, env, db=db1)
    params.Save(params_filename)
    logging.info('-' * 60)
    logging.info("Writing params to :{}".format(params_filename))
    logging.info('-' * 60)
Esempio n. 2
0
    def test_parameters(self):
        # initialize Params
        p = ParameterServer()

        # set new parameter
        self.assertTrue(p["LetsTest"]["hierarchy", "bla", True])

        # check whether right value is recovered
        tester = p["Car"]["Length", "Car Length", 6]
        tester2 = p["Car"]["Length", "Car Length", 8]
        self.assertEqual(tester, 6)
        self.assertEqual(
            tester2,
            6)  # value should not change, since key already exists in dict

        # check whether access without description and default value is possible
        self.assertEqual(p["Car"]["Length"], 6)

        # check whether setting values works
        p["Age"] = 24
        self.assertEqual(p["Age"], 24)
        p["Localization"]["Number of Particles"] = 2000
        self.assertEqual(p["Localization"]["Number of Particles"], 2000)

        # C++ Test in /bark/commons/Params/params_test.h
        # write in parameters in C++ and check whether they can be accessed in python afterwards
        #ParamsTest(p)
        #self.assertEqual(p["param_cpp"], 16.5)

        # add child in python
        child = p.AddChild("ch")
        self.assertTrue(child["ChildTest"]["hierarchy", "bla", True])

        # write parameters to json file
        p.Save("written_a_param_test.json")
Esempio n. 3
0
 def write_scenario_parameter(self, config_idx, folder):
   benchmark_config = super().get_benchmark_result().get_benchmark_config(config_idx)
   if benchmark_config is not None:
     params = benchmark_config.scenario.json_params
     p = ParameterServer()
     p.ConvertToParam(params)
     p.Save(os.path.join(folder, "scenario_parameters.json"))
Esempio n. 4
0
def generate_uct_hypothesis_behavior():
    ml_params = ParameterServer(filename="configuration/params/iqn_params_demo_full.json", log_if_default=True)
    behavior_ml_params = ml_params["ML"]["BehaviorMPMacroActions"]
    mcts_params = ParameterServer(filename="configuration/params/default_uct_params.json", log_if_default=True)
    mcts_params["BehaviorUctBase"]["EgoBehavior"] = behavior_ml_params
    behavior = BehaviorUCTHypothesis(mcts_params, [])
    mcts_params.Save(filename="./default_uct_params.json")
    return behavior, mcts_params
def generate_uct_hypothesis_behavior():
    if is_local:
        dir_prefix = ""
    else:
        dir_prefix="hy-iqn-lfd-full-beliefs-exp.runfiles/hythe/"
    ml_params = ParameterServer(filename=os.path.join(dir_prefix, params_file), log_if_default=True)
    behavior_ml_params = ml_params["ML"]["BehaviorMPMacroActions"]
    mcts_params = ParameterServer(filename=os.path.join(dir_prefix, "configuration/params/default_uct_params.json"), log_if_default=True)
    mcts_params["BehaviorUctBase"]["EgoBehavior"] = behavior_ml_params
    behavior = BehaviorUCTHypothesis(mcts_params, [])
    mcts_params.Save(filename="./default_uct_params.json")
    return behavior, mcts_params
Esempio n. 6
0
 def test_write_params_agent(self):
     params = ParameterServer()
     behavior = BehaviorConstantAcceleration(params)
     execution = ExecutionModelInterpolate(params)
     dynamic = SingleTrackModel(params)
     shape = Polygon2d([1.25, 1, 0], [
         Point2d(0, 0),
         Point2d(0, 2),
         Point2d(4, 2),
         Point2d(4, 0),
         Point2d(0, 0)
     ])
     init_state = np.zeros(4)
     agent = Agent(init_state, behavior, dynamic, execution, shape,
                   params.AddChild("agent"))
     params.Save("written_agents_param_test.json")
Esempio n. 7
0
def main():
    args = configure_args()
    if is_local:
        dir_prefix = ""
    else:
        dir_prefix = "hy-x-iqn-beliefs.runfiles/hythe/"
    print("Experiment server at :", os.getcwd())
    params = ParameterServer(filename=os.path.join(
        dir_prefix, "configuration/params/iqn_params.json"))
    params = configure_params(params, seed=args.jobname)
    experiment_id = params["Experiment"]["random_seed"]
    params_filename = os.path.join(params["Experiment"]["dir"],
                                   "params_{}.json".format(experiment_id))
    params_behavior_filename = os.path.join(
        params["Experiment"]["dir"],
        "behavior_params_{}.json".format(experiment_id))

    # check if exp exists and handle preemption
    exp_exists = check_if_exp_exists(params)
    if exp_exists:
        print("Loading existing experiment from: {}".format(
            args.jobname, (params["Experiment"]["dir"])))
        if os.path.isfile(params_filename):
            params = ParameterServer(filename=params_filename,
                                     log_if_default=True)
        if os.path.isfile(params_behavior_filename):
            params_behavior = ParameterServer(
                filename=params_behavior_filename, log_if_default=True)
    else:
        Path(params["Experiment"]["dir"]).mkdir(parents=True, exist_ok=True)
    params["ML"]["BaseAgent"]["SummaryPath"] = os.path.join(
        params["Experiment"]["dir"], "agent/summaries")
    params["ML"]["BaseAgent"]["CheckpointPath"] = os.path.join(
        params["Experiment"]["dir"], "agent/checkpoints")

    params_behavior = ParameterServer(filename=os.path.join(
        dir_prefix, "configuration/params/1D_desired_gap_no_prior.json"),
                                      log_if_default=True)
    params.Save(filename=params_filename)
    params_behavior.Save(filename=params_behavior_filename)

    # configure belief observer
    splits = 2
    behavior_space = configure_behavior_space(params_behavior)

    hypothesis_set, hypothesis_params = behavior_space.create_hypothesis_set_fixed_split(
        split=splits)
    observer = BeliefObserver(params, hypothesis_set, splits=splits)
    behavior = BehaviorDiscreteMacroActionsML(params)
    evaluator = GoalReached(params)

    viewer = MPViewer(params=params,
                      x_range=[-35, 35],
                      y_range=[-35, 35],
                      follow_agent_id=True)

    # database creation
    dbs = DatabaseSerializer(test_scenarios=2,
                             test_world_steps=2,
                             num_serialize_scenarios=num_scenarios)
    dbs.process(os.path.join(dir_prefix, "configuration/database"),
                filter_sets="**/**/interaction_merging_mid_dense_1D_new.json")
    local_release_filename = dbs.release(version="test")
    db = BenchmarkDatabase(database_root=local_release_filename)
    scenario_generator, _, _ = db.get_scenario_generator(scenario_set_id=0)
    env = HyDiscreteHighway(params=params,
                            scenario_generation=scenario_generator,
                            behavior=behavior,
                            evaluator=evaluator,
                            observer=observer,
                            viewer=viewer,
                            render=is_local)
    print('Observation/state space size', env.observation_space)

    run(params, env, exp_exists)
    params.Save(filename=params_filename)
    logging.info('-' * 60)
    logging.info("Writing params to :{}".format(params_filename))
    logging.info('-' * 60)
    params_behavior.Save(filename=params_behavior_filename)
    logging.info('-' * 60)
    logging.info(
        "Writing behavior params to :{}".format(params_behavior_filename))
    logging.info('-' * 60)

    return
Esempio n. 8
0
def main():
    args = configure_args()
    if is_local:
        dir_prefix = ""
    else:
        dir_prefix = "hy-iqnfd-beliefs-exp.runfiles/hythe/"
    print("Executing job :", args.jobname)
    print("Experiment server at :", os.getcwd())
    params = ParameterServer(filename=os.path.join(
        dir_prefix, "configuration/params/iqn_params_demo_full_local.json"),
                             log_if_default=True)
    params = configure_params(params, seed=args.jobname)
    experiment_id = params["Experiment"]["random_seed"]
    params_filename = os.path.join(params["Experiment"]["dir"],
                                   "params_{}.json".format(experiment_id))

    behavior = BehaviorDiscreteMacroActionsML(params)
    evaluator = GoalReached(params)

    params_behavior_filename = os.path.join(
        params["Experiment"]["dir"],
        "behavior_params_{}.json".format(experiment_id))
    params_behavior = ParameterServer(filename=os.path.join(
        dir_prefix, "configuration/params/1D_desired_gap_no_prior.json"),
                                      log_if_default=True)
    params_behavior.Save(filename=params_behavior_filename)

    # configure belief observer
    splits = 2
    behavior_space = BehaviorSpace(params_behavior)

    hypothesis_set, hypothesis_params = behavior_space.create_hypothesis_set_fixed_split(
        split=splits)
    observer = BeliefObserver(params, hypothesis_set, splits=splits)

    viewer = MPViewer(params=params,
                      x_range=[-35, 35],
                      y_range=[-35, 35],
                      follow_agent_id=True)

    # extract params and save experiment parameters
    params["ML"]["BaseAgent"]["SummaryPath"] = os.path.join(
        params["Experiment"]["dir"], "agent/summaries")
    params["ML"]["BaseAgent"]["CheckpointPath"] = os.path.join(
        params["Experiment"]["dir"], "agent/checkpoints")

    params.Save(filename=params_filename)
    logging.info('-' * 60)
    logging.info("Writing params to :{}".format(params_filename))
    logging.info('-' * 60)

    # database creation
    dbs = DatabaseSerializer(test_scenarios=1,
                             test_world_steps=2,
                             num_serialize_scenarios=num_scenarios)
    dbs.process(os.path.join(dir_prefix, "configuration/database"),
                filter_sets="**/**/interaction_merging_light_dense_1D.json")
    local_release_filename = dbs.release(version="test")
    db = BenchmarkDatabase(database_root=local_release_filename)
    scenario_generator, _, _ = db.get_scenario_generator(0)

    env = HyDiscreteHighway(params=params,
                            scenario_generation=scenario_generator,
                            behavior=behavior,
                            evaluator=evaluator,
                            observer=observer,
                            viewer=viewer,
                            render=False)
    scenario, _ = scenario_generator.get_next_scenario()
    world = scenario.GetWorldState()
    observer.Reset(world)

    assert env.action_space._n == 8, "Action Space is incorrect!"
    run(args, params, env, db=db)
    params.Save(params_filename)
    logging.info('-' * 60)
    logging.info("Writing params to :{}".format(params_filename))
    logging.info('-' * 60)
    params_behavior.Save(filename=params_behavior_filename)
    logging.info('-' * 60)
    logging.info(
        "Writing behavior params to :{}".format(params_behavior_filename))
    logging.info('-' * 60)
Esempio n. 9
0
def main():
    args = configure_args()
    if is_local:
        dir_prefix = ""
    else:
        dir_prefix = "hy-iqn-exp.runfiles/hythe/"
    print("Executing job :", args.jobname)
    print("Experiment server at :", os.getcwd())
    params = ParameterServer(filename=os.path.join(
        dir_prefix, "configuration/params/iqn_params.json"),
                             log_if_default=True)
    params = configure_params(params, seed=args.jobname)
    experiment_id = params["Experiment"]["random_seed"]
    params_filename = os.path.join(params["Experiment"]["dir"],
                                   "params_{}.json".format(experiment_id))

    # check if exp exists and handle preemption
    exp_exists = check_if_exp_exists(params)
    if exp_exists:
        print("Loading existing experiment from: {}".format(
            args.jobname, (params["Experiment"]["dir"])))
        if os.path.isfile(params_filename):
            params = ParameterServer(filename=params_filename,
                                     log_if_default=True)
    else:
        Path(params["Experiment"]["dir"]).mkdir(parents=True, exist_ok=True)

    behavior = BehaviorDiscreteMacroActionsML(params)
    evaluator = GoalReached(params)
    observer = NearestAgentsObserver(params)
    viewer = MPViewer(params=params,
                      x_range=[-35, 35],
                      y_range=[-35, 35],
                      follow_agent_id=True)

    # extract params and save experiment parameters
    params["ML"]["BaseAgent"]["SummaryPath"] = os.path.join(
        params["Experiment"]["dir"], "agent/summaries")
    params["ML"]["BaseAgent"]["CheckpointPath"] = os.path.join(
        params["Experiment"]["dir"], "agent/checkpoints")

    params.Save(filename=params_filename)
    logging.info('-' * 60)
    logging.info("Writing params to :{}".format(params_filename))
    logging.info('-' * 60)

    # database creation
    dbs = DatabaseSerializer(test_scenarios=2,
                             test_world_steps=2,
                             num_serialize_scenarios=num_scenarios)
    dbs.process(os.path.join(dir_prefix, "configuration/database"),
                filter_sets="**/**/interaction_merging_light_dense_1D.json")
    local_release_filename = dbs.release(version="test")
    db = BenchmarkDatabase(database_root=local_release_filename)
    scenario_generator, _, _ = db.get_scenario_generator(0)

    env = HyDiscreteHighway(params=params,
                            scenario_generation=scenario_generator,
                            behavior=behavior,
                            evaluator=evaluator,
                            observer=observer,
                            viewer=viewer,
                            render=False)
    assert env.action_space._n == 8, "Action Space is incorrect!"
    run(params, env, exp_exists)
    params.Save(params_filename)
    logging.info('-' * 60)
    logging.info("Writing params to :{}".format(params_filename))
    logging.info('-' * 60)
Esempio n. 10
0
class ExperimentRunner:
    """The ExperimentRunner-Class provides an easy-to-use interface to
  train, visualize, evaluate, and manage experiments.

  Additionally, it creates an Experiment only from a json that is
  hashes before training. Thus, trained results can be matched to executions
  and evaluations.
  """
    def __init__(self,
                 json_file=None,
                 params=None,
                 mode="visualize",
                 random_seed=0):
        self._logger = logging.getLogger()
        self._experiment_json = json_file
        if params is not None:
            self._params = params
        else:
            self._params = ParameterServer(filename=json_file)
        self._experiment_folder, self._json_name = \
          self.GetExperimentsFolder(json_file)
        # set random seeds
        self._random_seed = random_seed
        np.random.seed(random_seed)
        tf.random.set_seed(random_seed)
        self.SetCkptsAndSummaries()
        self._experiment = self.BuildExperiment(json_file, mode)
        self.Visitor(mode)

    def Visitor(self, mode):
        if mode == "train":
            self._experiment._params.Save(self._runs_folder + "params.json")
            self.Train()
        if mode == "visualize":
            self.Visualize()
        if mode == "evaluate":
            self.Evaluate()
        if mode == "print":
            self.PrintExperiment()
        if mode == "save":
            self.SaveExperiment(FLAGS.save_path)

    def BuildExperiment(self, json_file, mode):
        return Experiment(json_file, self._params, mode)

    @staticmethod
    def GetExperimentsFolder(json_file):
        dir_name = Path(json_file).parent
        if not os.path.isdir(dir_name):
            assert f"{dir_name} does not exist."
        base_name = os.path.basename(json_file)
        file_name = os.path.splitext(base_name)[0]
        return dir_name, file_name

    @staticmethod
    def GenerateHash(params):
        """
    Hash-function to indicate whether the same json is used
    as during training.
    """
        exp_params = params.ConvertToDict()
        return hashlib.sha1(repr(sorted(
            exp_params.items())).encode('utf-8')).hexdigest()

    def CompareHashes(self):
        experiment_hash = self.GenerateHash(self._params)
        if os.path.isfile(self._hash_file_path):
            file = open(self._hash_file_path, 'r')
            old_experiment_hash = file.readline()
            file.close()
            if experiment_hash != old_experiment_hash:
                self._logger.warning(
                    "\033[31m Trained experiment hash does not match \033[0m")

    def SetCkptsAndSummaries(self):
        self._runs_folder = \
          str(self._experiment_folder) + "/" + self._json_name + "/" + str(self._random_seed) + "/"
        ckpt_folder = self._runs_folder + "ckpts/"
        summ_folder = self._runs_folder + "summ/"
        self._logger.info(f"Run folder of the agent {self._runs_folder}.")
        self._hash_file_path = self._runs_folder + "hash.txt"
        self._params["ML"]["BehaviorTFAAgents"]["CheckpointPath"] = \
          ckpt_folder
        self._params["ML"]["TFARunner"]["SummaryPath"] = \
          summ_folder

    def Train(self):
        if not os.path.isfile(self._hash_file_path):
            os.makedirs(os.path.dirname(self._hash_file_path), exist_ok=True)
            file = open(self._hash_file_path, 'w')
            file.write(str(self.GenerateHash(self._experiment.params)))
            file.close()
        else:
            self.CompareHashes()
        self._experiment.runner.SetupSummaryWriter()
        self._experiment.runner.Train()

    def Evaluate(self):
        self.CompareHashes()
        num_episodes = \
          self._params["Experiment"]["NumEvaluationEpisodes"]
        evaluation_results = self._experiment.runner.Run(
            num_episodes=num_episodes, render=False, trace_colliding_ids=True)
        # metrics = self._evaluate(evaluation_results)
        # ...

    def Visualize(self):
        self.CompareHashes()
        num_episodes = \
          self._params["Experiment"]["NumVisualizationEpisodes"]
        self._experiment.runner.Run(num_episodes=num_episodes, render=True)

    def PrintExperiment(self):
        pprint.pprint(self._experiment.params.ConvertToDict())

    def SaveExperiment(self, file_path):
        self._params.Save(file_path)