Esempio n. 1
0
    def test_database_multiprocessing_history(self):
        dbs = DatabaseSerializer(test_scenarios=4, test_world_steps=5, num_serialize_scenarios=2)
        dbs.process("data/database1")
        local_release_filename = dbs.release(version="test")

        db = BenchmarkDatabase(database_root=local_release_filename)
        evaluators = {"success" : "EvaluatorGoalReached", "collision" : "EvaluatorCollisionEgoAgent",
                      "max_steps": "EvaluatorStepCount"}
        terminal_when = {"collision" :lambda x: x, "max_steps": lambda x : x>2}
        params = ParameterServer() # only for evaluated agents not passed to scenario!
        behaviors_tested = {"IDM": BehaviorIDMClassic(params), "Const" : BehaviorConstantAcceleration(params)}

        benchmark_runner = BenchmarkRunnerMP(benchmark_database=db,
                                           evaluators=evaluators,
                                           terminal_when=terminal_when,
                                           behaviors=behaviors_tested,
                                           log_eval_avg_every=10)
        rst = benchmark_runner.run(maintain_history=True)
        self.assertEqual(len(rst.get_histories()), 2*2*2)

        rst = benchmark_runner.run_benchmark_config(3, viewer=None, maintain_history=True)
        scenario_history = rst.get_histories()[3]
        print(scenario_history)
        params = ParameterServer()
        viewer = MPViewer(
              params=params,
              x_range=[5060, 5160],
              y_range=[5070,5150],
              use_world_bounds=True)
        viewer.drawWorld(world=scenario_history[1].GetWorldState(),
                          eval_agent_ids=scenario_history[1].eval_agent_ids)

        viewer.show(block=True)
    def get_scenario_generator(self, scenario_set_id):
        serialized_file_name = self.dataframe.iloc[scenario_set_id]["Serialized"]
        if os.path.exists(serialized_file_name):
            serialized_file_path = serialized_file_name
        else:
            serialized_file_path = os.path.join(
                self.database_root, serialized_file_name)
        cwd = None
        if os.path.exists(self.database_root):
            # move into database root that map files can be found
            cwd = os.getcwd()
            os.chdir(self.database_root)
        param_file_name = self.dataframe.iloc[scenario_set_id]["Params"]
        if not param_file_name:
            logging.warning("No param file found for scenario set {}. Using defaults...".format(
                self.dataframe.iloc[scenario_set_id]["SetName"]))
            params = ParameterServer()
        else:
            params = ParameterServer(filename=param_file_name)

        scenario_generation = ScenarioGeneration(params=params)
        scenario_generation.load_scenario_list(filename=serialized_file_name)
        SetMapfileDirectory(self.database_root)
        if cwd:
            os.chdir(cwd)
        scenario_set_name = self.dataframe.iloc[scenario_set_id]["SetName"]
        scenario_set_parameters = self.dataframe.iloc[scenario_set_id]["SetParameters"]
        return scenario_generation, scenario_set_name, scenario_set_parameters
Esempio n. 3
0
    def test_database_multiprocessing_runner(self):
        dbs = DatabaseSerializer(test_scenarios=4, test_world_steps=5, num_serialize_scenarios=5)
        dbs.process("data/database1")
        local_release_filename = dbs.release(version="test")

        db = BenchmarkDatabase(database_root=local_release_filename)
        evaluators = {"success" : "EvaluatorGoalReached", "collision" : "EvaluatorCollisionEgoAgent",
                      "max_steps": "EvaluatorStepCount"}
        terminal_when = {"collision" :lambda x: x, "max_steps": lambda x : x>2}
        params = ParameterServer() # only for evaluated agents not passed to scenario!
        behaviors_tested = {"IDM": BehaviorIDMClassic(params), "Const" : BehaviorConstantAcceleration(params)}

        benchmark_runner = BenchmarkRunnerMP(benchmark_database=db,
                                           evaluators=evaluators,
                                           terminal_when=terminal_when,
                                           behaviors=behaviors_tested,
                                           log_eval_avg_every=10)

        result = benchmark_runner.run()

        df = result.get_data_frame()
        print(df)
        self.assertEqual(len(df.index), 20) # 2 Behaviors * 5 Serialize Scenarios * 2 scenario sets

        params2 = ParameterServer()
        viewer = MPViewer(
              params=params2,
              x_range=[5060, 5160],
              y_range=[5070,5150],
              use_world_bounds=True)
        rst  = benchmark_runner.run_benchmark_config(10, viewer=viewer)
Esempio n. 4
0
    def test_agent_from_trackfile_centered(self):

        map_filename = os.path.join(
            os.path.dirname(__file__),
            "../tests/data/DR_DEU_Merging_MT_v01_centered.xodr")
        track_filename = os.path.join(
            os.path.dirname(__file__),
            "../tests/data/interaction_dataset_dummy_track.csv")

        agent_track_info = AgentTrackInfo(track_filename,
                                          track_id=1,
                                          start_offset=500,
                                          end_offset=1000)

        params = ParameterServer()
        params_id = params["Scenario"]["Generation"]["InteractionDataset"]
        params_id["MapFilename", "", map_filename]
        params_id["TrackFilename", "", track_filename]
        params_id["BehaviorModel", "", {}]

        track_params = ParameterServer()
        track_params["execution_model"] = 'ExecutionModelInterpolate'
        track_params["dynamic_model"] = 'SingleTrackModel'
        track_params["map_interface"] = None  # world.map
        track_params["behavior_model"] = None

        scenario_info = ScenarioTrackInfo(map_filename,
                                          track_filename,
                                          agent_track_info,
                                          xy_offset=[-900, -900])
        ds_reader = InteractionDatasetReader()
        agent = ds_reader.AgentFromTrackfile(track_params, params,
                                             scenario_info,
                                             agent_track_info.GetTrackId())
Esempio n. 5
0
def generate_uct_hypothesis_behavior():
    ml_params = ParameterServer(filename="configuration/params/iqn_params_demo_full.json", log_if_default=True)
    behavior_ml_params = ml_params["ML"]["BehaviorMPMacroActions"]
    mcts_params = ParameterServer(filename="configuration/params/default_uct_params.json", log_if_default=True)
    mcts_params["BehaviorUctBase"]["EgoBehavior"] = behavior_ml_params
    behavior = BehaviorUCTHypothesis(mcts_params, [])
    mcts_params.Save(filename="./default_uct_params.json")
    return behavior, mcts_params
    def test_database_runner(self):
        dbs = DatabaseSerializer(test_scenarios=4,
                                 test_world_steps=5,
                                 num_serialize_scenarios=2)
        dbs.process("data/database1")
        local_release_filename = dbs.release(version="test")

        db = BenchmarkDatabase(database_root=local_release_filename)
        safe_dist_params = ParameterServer(log_if_default=True)
        test_python_params = ParameterServer(log_if_default=True)
        evaluators = {
            "success": "EvaluatorGoalReached",
            "collision": "EvaluatorCollisionEgoAgent",
            "max_steps": "EvaluatorStepCount",
            "safe_dist_lon": {
                "type": "EvaluatorDynamicSafeDist",
                "params": safe_dist_params
            },
            "safe_dist_lat": {
                "type": "EvaluatorStaticSafeDist",
                "params": safe_dist_params
            },
            "test_evaluator": {
                "type": "TestPythonEvaluator",
                "params": test_python_params
            },
            "test_evaluator_serializable": TestPythonEvaluatorSerializable()
        }
        terminal_when = {
            "collision": lambda x: x,
            "max_steps": lambda x: x > 2,
            "safe_dist_lon": lambda x: x
        }
        params = ParameterServer(
        )  # only for evaluated agents not passed to scenario!
        behaviors_tested = {
            "IDM": BehaviorIDMClassic(params),
            "Const": BehaviorConstantAcceleration(params)
        }

        EvaluationConfig.AddEvaluationModule(
            "bark.benchmark.tests.test_evaluator")
        benchmark_runner = BenchmarkRunner(benchmark_database=db,
                                           evaluators=evaluators,
                                           log_eval_avg_every=1,
                                           terminal_when=terminal_when,
                                           behaviors=behaviors_tested)

        result = benchmark_runner.run()
        df = result.get_data_frame()
        print(df)
        self.assertEqual(
            len(df.index),
            2 * 2 * 2)  # 2 Behaviors * 2 Serialize Scenarios * 1 scenario sets
        result.load_benchmark_configs()
        groups = result.get_evaluation_groups()
        self.assertEqual(set(groups), set(["behavior", "scen_set"]))
Esempio n. 7
0
 def test_belief_observer_observation_space(self):
     params = ParameterServer(
         filename="configuration/params/iqn_params.json")
     splits = 8
     params_behavior = ParameterServer(
         filename="configuration/params/1D_desired_gap_no_prior.json")
     behavior_space = BehaviorSpace(params_behavior)
     hypothesis_set, hypothesis_params = behavior_space.create_hypothesis_set_fixed_split(split=splits)
     observer = BeliefObserver(params, hypothesis_set, splits=splits)
     max_num_agents = observer._max_num_vehicles
     assert max_num_agents * len(hypothesis_set) == observer.max_beliefs
Esempio n. 8
0
def main():
    map_filename = "external/bark_ml_project/bark_ml/environments/blueprints/highway/city_highway_straight.xodr"
    params_filename = glob.glob(os.path.join(demo_root, "params_[!behavior]*"))
    params = ParameterServer(filename=params_filename[0], log_if_default=True)
    behavior = BehaviorDiscreteMacroActionsML(params)
    evaluator = GoalReached(params)
    if is_belief_observer:
        splits = 2
        bparams_filename = glob.glob(os.path.join(demo_root, "behavior_*"))
        params_behavior = ParameterServer(filename=bparams_filename[0])
        behavior_space = BehaviorSpace(params_behavior)

        hypothesis_set, hypothesis_params = behavior_space.create_hypothesis_set_fixed_split(
            split=splits)
        observer = BeliefObserver(params, hypothesis_set, splits=splits)
    else:
        observer = NearestAgentsObserver(params)
    viewer = MPViewer(params=params,
                      x_range=[-35, 35],
                      y_range=[-35, 35],
                      follow_agent_id=True)
    env = HyDiscreteHighway(params=params,
                            map_filename=map_filename,
                            behavior=behavior,
                            evaluator=evaluator,
                            observer=observer,
                            viewer=viewer,
                            render=False)

    env.reset()

    ego_world_states = []
    _, demonstrations = unpack_load_demonstrations(demo_root)
    for demo in demonstrations:
        ego_state = np.zeros((observer._len_ego_state + 1))
        (nn_ip_state, action, reward, next_state, done, is_demo) = demo
        ego_nn_input_state = deepcopy(nn_ip_state[0:observer._len_ego_state])
        ego_state[1:] = ego_nn_input_state
        reverted_observed_state = observer.rev_observe_for_ego_vehicle(
            ego_state)
        ego_world_states.append(
            (reverted_observed_state[int(StateDefinition.X_POSITION)],
             reverted_observed_state[int(StateDefinition.Y_POSITION)],
             reverted_observed_state[int(StateDefinition.THETA_POSITION)],
             reverted_observed_state[int(StateDefinition.VEL_POSITION)],
             action, int(is_demo)))
    df = pd.DataFrame(ego_world_states,
                      columns=[
                          'pos_x', 'pos_y', 'orientation', 'velocity',
                          'action', 'is_demo'
                      ])
    print(df.head(10))
    df.to_pickle(os.path.join(demo_root, "demonstrations/demo_dataframe"))
    return
def generate_uct_hypothesis_behavior():
    if is_local:
        dir_prefix = ""
    else:
        dir_prefix="hy-iqn-lfd-full-beliefs-exp.runfiles/hythe/"
    ml_params = ParameterServer(filename=os.path.join(dir_prefix, params_file), log_if_default=True)
    behavior_ml_params = ml_params["ML"]["BehaviorMPMacroActions"]
    mcts_params = ParameterServer(filename=os.path.join(dir_prefix, "configuration/params/default_uct_params.json"), log_if_default=True)
    mcts_params["BehaviorUctBase"]["EgoBehavior"] = behavior_ml_params
    behavior = BehaviorUCTHypothesis(mcts_params, [])
    mcts_params.Save(filename="./default_uct_params.json")
    return behavior, mcts_params
Esempio n. 10
0
    def test_pickle_belief_observer(self):
        params = ParameterServer()
        # init behavior space
        splits = 2
        behavior_params = ParameterServer()
        behavior_space = BehaviorSpace(behavior_params)
        hypothesis_set, _ = behavior_space.create_hypothesis_set_fixed_split(
            split=splits)
        observer = BeliefObserver(params, hypothesis_set, splits=splits)

        po = pickle_unpickle(observer)
        self.assertIsNotNone(po)
        self.assertTrue(isinstance(po, BeliefObserver))
        self.assertEqual(po.splits, observer.splits)
        self.assertEqual(po.len_beliefs, observer.len_beliefs)
Esempio n. 11
0
def main():
    args = configure_args()
    exp_dir = args.exp_dir or "results/training/toy_evaluation"
    params_filename = glob.glob(os.path.join(exp_dir, "params_*"))
    params = ParameterServer(filename=params_filename[0])
    behavior_params_filename = glob.glob(
        os.path.join(exp_dir, "behavior_params*"))
    if behavior_params_filename:
        splits = 8
        behavior_params = ParameterServer(filename=behavior_params_filename[0])
        behavior_space = BehaviorSpace(behavior_params)
        hypothesis_set, _ = behavior_space.create_hypothesis_set_fixed_split(
            split=splits)
        observer = BeliefObserver(params, hypothesis_set, splits=splits)
        behavior = BehaviorDiscreteMacroActionsML(behavior_params)
    else:
        behavior = BehaviorDiscreteMacroActionsML(params)
        observer = NearestAgentsObserver(params)

    evaluator = GoalReached(params)

    scenario_params = ParameterServer(
        filename=
        "configuration/database/scenario_sets/interaction_merging_light_dense_1D.json"
    )
    scenario_generator = ConfigurableScenarioGeneration(params=scenario_params,
                                                        num_scenarios=5)
    scenario_file = glob.glob(os.path.join(exp_dir, "scenarios_list*"))
    scenario_generator.load_scenario_list(scenario_file[0])
    viewer = MPViewer(params=params,
                      x_range=[-35, 35],
                      y_range=[-35, 35],
                      follow_agent_id=True)
    env = HyDiscreteHighway(behavior=behavior,
                            observer=observer,
                            evaluator=evaluator,
                            viewer=viewer,
                            scenario_generation=scenario_generator,
                            render=True)

    env.reset()
    actions = [0, 1, 2, 3, 4, 5, 6]
    for action in actions:
        print(action)
        env.step(action)
    agent = FQFAgent(env=env, test_env=env, params=params)

    agent.load_models(os.path.join(exp_dir, "agent/checkpoints/final"))
Esempio n. 12
0
 def test_add_ego_agent(self):
     params = ParameterServer()
     env = self.create_runtime_and_setup_empty_world(params)
     state = np.array([0, 0, 0, 0, 0, 0])
     goal_line = Line2d(np.array([[0., 0.], [1., 1.]]))
     env.addEgoAgent(state, goal_line)
     self.assertTrue(np.array_equal(env.ego_agent.state, state))
Esempio n. 13
0
  def test_gnn_parameters(self):
    params = ParameterServer()
    params["ML"]["BehaviorGraphSACAgent"]["GNN"]["NumMpLayers"] = 4
    params["ML"]["BehaviorGraphSACAgent"]["GNN"]["MpLayerNumUnits"] = 64
    params["ML"]["BehaviorGraphSACAgent"]["GNN"]["message_calculation_class"] = "gnn_edge_mlp"
    params["ML"]["BehaviorGraphSACAgent"]["GNN"]["global_exchange_mode"] = "mean"
    
    gnn_library = GNNWrapper.SupportedLibrary.spektral
    params["ML"]["BehaviorGraphSACAgent"]["GNN"]["Library"] = gnn_library

    
    bp = ContinuousHighwayBlueprint(params, number_of_senarios=2500, random_seed=0)
    observer = GraphObserver(params=params)
    env = SingleAgentRuntime(blueprint=bp, observer=observer, render=False)
    sac_agent = BehaviorGraphSACAgent(environment=env, observer=observer, params=params)

    actor_gnn = sac_agent._agent._actor_network._gnn
    critic_gnn = sac_agent._agent._critic_network_1._gnn

    for gnn in [actor_gnn, critic_gnn]:
      self.assertEqual(gnn._params["NumMpLayers"], 4)
      self.assertEqual(gnn._params["MpLayerNumUnits"], 64)
      self.assertEqual(gnn._params["message_calculation_class"], "gnn_edge_mlp")
      self.assertEqual(gnn._params["global_exchange_mode"], "mean")
      self.assertEqual(gnn._params["Library"], gnn_library)
Esempio n. 14
0
    def test_python_model(self):
        param_server = ParameterServer(filename=os.path.join(
            os.path.dirname(__file__),
            "../../runtime/tests/data/deterministic_scenario.json"))
        param_server

        mapfile = os.path.join(
            os.path.dirname(__file__),
            "../../runtime/tests/data/city_highway_straight.xodr")
        param_server["Scenario"]["Generation"][
            "DeterministicScenarioGeneration"]["MapFilename"] = mapfile
        scenario_generation = DeterministicScenarioGeneration(
            num_scenarios=3, random_seed=0, params=param_server)
        viewer = MPViewer(params=param_server,
                          follow_agent_id=False,
                          use_world_bounds=True)
        scenario, idx = scenario_generation.get_next_scenario()
        world = scenario.GetWorldState()
        single_track_model = SingleTrackModel(param_server)
        behavior_model = PythonBehaviorModelWrapper(single_track_model,
                                                    param_server)
        world.GetAgent(0).behavior_model = behavior_model
        world.GetAgent(0).behavior_model.SetLastAction(
            np.array([1., 1.], dtype=np.float32))
        world.Step(0.2)
Esempio n. 15
0
def run_configuration(argv):

    params = ParameterServer(
        filename="examples/example_params/iqn_params.json")
    params["ML"]["BaseAgent"][
        "SummaryPath"] = "/home/mansoor/Study/Werkstudent/fortiss/code/bark-ml/summaries"
    params["ML"]["BaseAgent"][
        "CheckpointPath"] = "/home/mansoor/Study/Werkstudent/fortiss/code/bark-ml/checkpoints"

    env = gym.make(FLAGS.env, params=params)
    agent = IQNAgent(env=env, test_env=env, params=params)

    if FLAGS.load and params["ML"]["BaseAgent"]["CheckpointPath"]:
        agent.load_models(
            os.path.join(params["ML"]["BaseAgent"]["CheckpointPath"], "best"))

    if FLAGS.mode == "train":
        agent.run()

    elif FLAGS.mode == "visualize":
        agent.visualize()

    elif FLAGS.mode == "evaluate":
        # writes evaluaion data using summary writer in summary path
        agent.evaluate()

    else:
        raise Exception("Invalid argument for --mode")
  def test_observed_agents_selection(self):
    agent_limit = 10
    params = ParameterServer()
    params["ML"]["GraphObserver"]["AgentLimit"] = agent_limit
    observer = GraphObserver(params=params)

    obs, obs_world = self._get_observation(
      observer=observer,
      world=self.world,
      eval_id=self.eval_id)

    obs = tf.expand_dims(obs, 0) # add a batch dimension

    nodes, _, _ = GraphObserver.graph(obs, graph_dims=observer.graph_dimensions)
    nodes = nodes[0] # remove batch dim

    ego_node = nodes[0]
    ego_node_pos = Point2d(
      ego_node[0].numpy(), # x coordinate
      ego_node[1].numpy()) # y coordinate

    # verify that the nodes are ordered by
    # ascending distance to the ego node
    max_distance_to_ego = 0
    for node in nodes:
      pos = Point2d(
        node[0].numpy(), # x coordinate
        node[1].numpy()) # y coordinate
      distance_to_ego = Distance(pos, ego_node_pos)

      self.assertGreaterEqual(distance_to_ego, max_distance_to_ego,
        msg='Nodes are not sorted by distance relative to '\
          + 'the ego node in ascending order.')

      max_distance_to_ego = distance_to_ego
Esempio n. 17
0
    def test_execution_model_pickle(self):
        
        params = ParameterServer()
        e = ExecutionModelInterpolate(params)

        ea = pickle_unpickle(e)
        self.assertTrue(isinstance(ea,ExecutionModelInterpolate))
Esempio n. 18
0
    def test_agent_pickle(self):
        params = ParameterServer()
        behavior = BehaviorConstantVelocity(params)
        execution = ExecutionModelInterpolate(params)
        dynamic = SingleTrackModel(params)
        shape = CarLimousine()
        init_state = np.array([0, 0, 0, 0, 5])
        goal_polygon = Polygon2d([0, 0, 0],[Point2d(-1,-1),Point2d(-1,1),Point2d(1,1), Point2d(1,-1)])
        goal_definition = GoalDefinitionPolygon(goal_polygon)
        agent = Agent(init_state, behavior, dynamic, execution, shape, params.AddChild("agent"), goal_definition )

        agent_after = pickle_unpickle(agent)

        self.assertEqual(agent_after.id , agent.id)
        self.assertTrue(np.array_equal(agent_after.state, agent.state) )
        self.assertTrue(np.array_equal(agent_after.goal_definition.goal_shape.center, \
                                       agent.goal_definition.goal_shape.center))

        goal_definition_2 = GoalDefinitionStateLimits(goal_polygon, (0.2 , 0.5))
        agent2 = Agent(init_state, behavior, dynamic, execution, shape, params.AddChild("agent"), goal_definition_2)

        agent_after2 = pickle_unpickle(agent2)

        self.assertEqual(agent_after2.id , agent2.id)
        self.assertTrue(np.array_equal(agent_after2.state, agent.state) )
        self.assertTrue(np.array_equal(agent_after2.goal_definition.xy_limits.center, \
                                       agent2.goal_definition.xy_limits.center))

        agent_list = []
        agent_list.append(agent)

        agent_list_after = pickle_unpickle(agent_list)

        self.assertEqual(agent_list_after[0].id , agent.id)
        self.assertTrue(np.array_equal(agent_list_after[0].state, agent.state) )
  def test_agents(self):
    params = ParameterServer()
    params["ML"]["BaseAgent"]["NumSteps"] = 2
    params["ML"]["BaseAgent"]["MaxEpisodeSteps"] = 2

    bp = DiscreteHighwayBlueprint(params, num_scenarios=10, random_seed=0)
    env = SingleAgentRuntime(blueprint=bp, render=False)

    # IQN Agent
    iqn_agent = IQNAgent(env=env, test_env=env, params=params)
    env.ml_behavior = iqn_agent
    self.assertEqual(env.ml_behavior.set_action_externally, False)
    iqn_agent.run()
    self.assertEqual(env.ml_behavior.set_action_externally, True)

    # FQF Agent
    fqf_agent = FQFAgent(env=env, test_env=env, params=params)
    env.ml_behavior = fqf_agent
    self.assertEqual(env.ml_behavior.set_action_externally, False)
    fqf_agent.run()
    self.assertEqual(env.ml_behavior.set_action_externally, True)

    # QRDQN Agent
    qrdqn_agent = QRDQNAgent(env=env, test_env=env, params=params)
    env.ml_behavior = qrdqn_agent
    self.assertEqual(env.ml_behavior.set_action_externally, False)
    qrdqn_agent.run()
    self.assertEqual(env.ml_behavior.set_action_externally, True)
Esempio n. 20
0
    def test_one_agent_at_goal_state_limits(self):
        param_server = ParameterServer()
        # Model Definition
        behavior_model = BehaviorConstantVelocity(param_server)
        execution_model = ExecutionModelInterpolate(param_server)
        dynamic_model = SingleTrackModel(param_server)

        # Agent Definition
        agent_2d_shape = CarLimousine()
        init_state = np.array(
            [0, -191.789, -50.1725, 3.14 * 3.0 / 4.0, 150 / 3.6])
        agent_params = param_server.AddChild("agent1")
        goal_polygon = Polygon2d(
            [0, 0, 0],
            [Point2d(-1, -1),
             Point2d(-1, 1),
             Point2d(1, 1),
             Point2d(1, -1)])
        goal_polygon = goal_polygon.Translate(Point2d(-191.789, -50.1725))

        agent = Agent(
            init_state, behavior_model, dynamic_model, execution_model,
            agent_2d_shape, agent_params,
            GoalDefinitionStateLimits(
                goal_polygon,
                (3.14 * 3.0 / 4.0 - 0.08, 3.14 * 3.0 / 4.0 + 0.08)), None)

        world = World(param_server)
        world.AddAgent(agent)
        evaluator = EvaluatorGoalReached(agent.id)
        world.AddEvaluator("success", evaluator)

        info = world.Evaluate()
        self.assertEqual(info["success"], True)
Esempio n. 21
0
    def test_collect_demonstrations(self):
        params = ParameterServer()
        bp = DiscreteHighwayBlueprint(params,
                                      number_of_senarios=10,
                                      random_seed=0)
        env = SingleAgentRuntime(blueprint=bp, render=False)
        env._observer = NearestAgentsObserver(params)
        env._action_wrapper = BehaviorDiscreteMacroActionsML(params)
        env._evaluator = TestEvaluator()

        demo_behavior = bark_ml.library_wrappers.lib_fqf_iqn_qrdqn.\
                tests.test_demo_behavior.TestDemoBehavior(params)
        collector = DemonstrationCollector()
        collection_result = collector.CollectDemonstrations(env, demo_behavior, 4, "./test_demo_collected", \
               use_mp_runner=False, runner_init_params={"deepcopy" : False})
        self.assertTrue(
            os.path.exists("./test_demo_collected/collection_result"))
        print(collection_result.get_data_frame().to_string())

        experiences = collector.ProcessCollectionResult(
            eval_criteria={"goal_r1": lambda x: x})
        # expected length = 2 scenarios (only every second reaches goal) x 3 steps (4 executed, but first not counted)
        self.assertEqual(len(experiences), 2 * 3)

        collector.dump("./final_collections")

        loaded_collector = DemonstrationCollector.load("./final_collections")
        experiences_loaded = loaded_collector.GetDemonstrationExperiences()
        print(experiences_loaded)
        self.assertEqual(len(experiences_loaded), 2 * 3)
Esempio n. 22
0
 def write_scenario_parameter(self, config_idx, folder):
   benchmark_config = super().get_benchmark_result().get_benchmark_config(config_idx)
   if benchmark_config is not None:
     params = benchmark_config.scenario.json_params
     p = ParameterServer()
     p.ConvertToParam(params)
     p.Save(os.path.join(folder, "scenario_parameters.json"))
Esempio n. 23
0
 def __init__(self,
              params=ParameterServer(),
              eval_agent=None,
              bark_eval_fns=None,
              bark_ml_eval_fns=None):
   self._eval_agent = eval_agent
   self._params = params["ML"]["GeneralEvaluator"]
   self._bark_eval_fns = bark_eval_fns or {
     "goal_reached" : lambda: EvaluatorGoalReached(),
     "collision" : lambda: EvaluatorCollisionEgoAgent(),
     "step_count" : lambda: EvaluatorStepCount(),
     "drivable_area" : lambda: EvaluatorDrivableArea()
   }
   self._bark_ml_eval_fns = bark_ml_eval_fns or {
     "collision_functor" : CollisionFunctor(self._params),
     "goal_functor" : GoalFunctor(self._params),
     "low_speed_goal_reached_functor" : LowSpeedGoalFunctor(self._params),
     "drivable_area_functor" : DrivableAreaFunctor(self._params),
     "step_count_functor" : StepCountFunctor(self._params),
     "smoothness_functor" : SmoothnessFunctor(self._params),
     "min_max_vel_functor" : MinMaxVelFunctor(self._params),
     # "pot_center_functor": PotentialCenterlineFunctor(self._params),
     # "pot_vel_functor": PotentialVelocityFunctor(self._params),
     "pot_goal_center_functor": PotentialGoalCenterlineFunctor(self._params),
     # "pot_goal_switch_vel_functor": PotentialGoalSwitchVelocityFunctor(self._params)
     # "state_action_logging_functor": StateActionLoggingFunctor(self._params)
   }
Esempio n. 24
0
    def __init__(self,
                 params=ParameterServer(),
                 name='GNN',
                 output_dtype=tf.float32):
        """
    Initializes a GraphNetwork instance.

    Args:
    params: A `ParameterServer` instance containing the parameters
      to configure the GNN.
    graph_dims: A tuple containing the three elements
      (num_nodes, len_node_features, len_edge_features) of the input graph.
      Needed to properly convert observations back into a graph structure
      that can be processed by the GNN.
    name: Name of the instance.
    output_dtype: The dtype to which the GNN output is casted.
    """
        super(GraphNetwork, self).__init__(name=name)
        self.output_dtype = output_dtype
        self._params = params
        try:
            self._graph_dims = self._validated_graph_dims(
                params["ML"]["GraphDims"])
        except:
            pass
Esempio n. 25
0
def run_configuration(argv):
    # params = ParameterServer(filename="examples/example_params/tfa_params.json")
    params = ParameterServer()
    # NOTE: Modify these paths in order to save the checkpoints and summaries
    # params["ML"]["BehaviorTFAAgents"]["CheckpointPath"] = "YOUR_PATH"
    # params["ML"]["TFARunner"]["SummaryPath"] = "YOUR_PATH"
    params["World"]["remove_agents_out_of_map"] = True

    # create environment
    bp = ContinuousMergingBlueprint(params,
                                    number_of_senarios=2500,
                                    random_seed=0)
    env = SingleAgentRuntime(blueprint=bp, render=False)

    # PPO-agent
    # ppo_agent = BehaviorPPOAgent(environment=env,
    #                              params=params)
    # env.ml_behavior = ppo_agent
    # runner = PPORunner(params=params,
    #                    environment=env,
    #                    agent=ppo_agent)

    # SAC-agent
    sac_agent = BehaviorSACAgent(environment=env, params=params)
    env.ml_behavior = sac_agent
    runner = SACRunner(params=params, environment=env, agent=sac_agent)
    if FLAGS.mode == "train":
        runner.SetupSummaryWriter()
        runner.Train()
    elif FLAGS.mode == "visualize":
        runner.Run(num_episodes=10, render=True)
    elif FLAGS.mode == "evaluate":
        runner.Run(num_episodes=100, render=False)
Esempio n. 26
0
    def test_agent_pickle_uct_planner(self):
        try:
            from bark.core.models.behavior import BehaviorUCTSingleAgentMacroActions
        except:
            print("Rerun test with ---define planner_uct=true")
            return

        params = ParameterServer()
        behavior = BehaviorUCTSingleAgentMacroActions(params)
        execution = ExecutionModelInterpolate(params)
        dynamic = SingleTrackModel(params)
        shape = CarLimousine()
        init_state = np.array([0, 0, 0, 0, 5])
        goal_polygon = Polygon2d(
            [0, 0, 0],
            [Point2d(-1, -1),
             Point2d(-1, 1),
             Point2d(1, 1),
             Point2d(1, -1)])
        goal_definition = GoalDefinitionPolygon(goal_polygon)
        agent = Agent(init_state, behavior, dynamic, execution, shape,
                      params.AddChild("agent"), goal_definition)

        agent_after = pickle_unpickle(agent)

        self.assertTrue(
            isinstance(agent_after.behavior_model,
                       BehaviorUCTSingleAgentMacroActions))
Esempio n. 27
0
    def test_behavior_model_pickle(self):
        
        params = ParameterServer()
        b = BehaviorConstantVelocity(params)

        ba = pickle_unpickle(b)
        self.assertTrue(isinstance(ba, BehaviorConstantVelocity))
Esempio n. 28
0
 def test_obs_traj(self):
     map_filename = "external/bark_ml_project/bark_ml/environments/blueprints/highway/city_highway_straight.xodr"
     params = ParameterServer()
     behavior = BehaviorDiscreteMacroActionsML(params)
     evaluator = GoalReached(params)
     observer = NearestAgentsObserver(params)
     viewer = MPViewer(params=params,
                       x_range=[-35, 35],
                       y_range=[-35, 35],
                       follow_agent_id=True)
     env = HyDiscreteHighway(params=params,
                             map_filename=map_filename,
                             behavior=behavior,
                             evaluator=evaluator,
                             observer=observer,
                             viewer=viewer,
                             render=True)
     env.reset()
     actions = np.random.randint(0, 7, 100)
     for action in actions:
         concatenated_state, _, _, _ = env.step(action)
         nn_ip_state = concatenated_state
         ego_nn_input_state = deepcopy(
             concatenated_state[0:observer._len_ego_state])
         reverted_observed_state = observer.rev_observe_for_ego_vehicle(
             nn_ip_state)
         ext_reverted_observed_state = np.zeros(
             (reverted_observed_state.shape[0] + 1))
         ext_reverted_observed_state[1:] = reverted_observed_state
         renormed_ego_state = observer._select_state_by_index(
             observer._norm(ext_reverted_observed_state))
         time.sleep(0.2)
         np.testing.assert_array_almost_equal(ego_nn_input_state,
                                              renormed_ego_state)
Esempio n. 29
0
    def test_parameters(self):
        # initialize Params
        p = ParameterServer()

        # set new parameter
        self.assertTrue(p["LetsTest"]["hierarchy", "bla", True])

        # check whether right value is recovered
        tester = p["Car"]["Length", "Car Length", 6]
        tester2 = p["Car"]["Length", "Car Length", 8]
        self.assertEqual(tester, 6)
        self.assertEqual(
            tester2,
            6)  # value should not change, since key already exists in dict

        # check whether access without description and default value is possible
        self.assertEqual(p["Car"]["Length"], 6)

        # check whether setting values works
        p["Age"] = 24
        self.assertEqual(p["Age"], 24)
        p["Localization"]["Number of Particles"] = 2000
        self.assertEqual(p["Localization"]["Number of Particles"], 2000)

        # C++ Test in /bark/commons/Params/params_test.h
        # write in parameters in C++ and check whether they can be accessed in python afterwards
        #ParamsTest(p)
        #self.assertEqual(p["param_cpp"], 16.5)

        # add child in python
        child = p.AddChild("ch")
        self.assertTrue(child["ChildTest"]["hierarchy", "bla", True])

        # write parameters to json file
        p.Save("written_a_param_test.json")
    def create_from_config(self, config_param_object, road_corridor,
                           agent_states, **kwargs):
        model_types = config_param_object["ModelTypesList", "Type of behavior model" \
                    "used for all vehicles", ["BehaviorIDMClassic", "BehaviorMobil"]]
        model_params = config_param_object.AddChild("ModelParams")
        # ----- DEFAULT PARAMETER HANDLING
        # based on types retrieve default params which are maintained as scenario defaults
        for model_type in model_types:
            behavior_params = model_params.AddChild(model_type)
            _, _ = self.model_from_model_type(model_type, behavior_params)
            #param server must be persisted for each behavior to enable serialization of parameters

        #------ BEHAVIOR MODEL SAMPLING
        behavior_models = []
        behavior_model_types = []
        for _ in agent_states:
            model_idx = self.random_state.randint(low=0,
                                                  high=len(model_types),
                                                  size=None)
            model_type = model_types[model_idx]
            model_type_params = model_params.AddChild(model_type)
            params = ParameterServer()
            bark_model, params = self.model_from_model_type(
                model_type, model_type_params)
            self.param_servers.append(model_type_params)
            behavior_models.append(bark_model)
            behavior_model_types.append(model_type)
        return behavior_models, {
            "behavior_model_types": behavior_model_types
        }, config_param_object