Exemplo n.º 1
0
 def write_scenario_parameter(self, config_idx, folder):
   benchmark_config = super().get_benchmark_result().get_benchmark_config(config_idx)
   if benchmark_config is not None:
     params = benchmark_config.scenario.json_params
     p = ParameterServer()
     p.ConvertToParam(params)
     p.Save(os.path.join(folder, "scenario_parameters.json"))
Exemplo n.º 2
0
    def test_database_multiprocessing_runner(self):
        dbs = DatabaseSerializer(test_scenarios=4, test_world_steps=5, num_serialize_scenarios=5)
        dbs.process("data/database1")
        local_release_filename = dbs.release(version="test")

        db = BenchmarkDatabase(database_root=local_release_filename)
        evaluators = {"success" : "EvaluatorGoalReached", "collision" : "EvaluatorCollisionEgoAgent",
                      "max_steps": "EvaluatorStepCount"}
        terminal_when = {"collision" :lambda x: x, "max_steps": lambda x : x>2}
        params = ParameterServer() # only for evaluated agents not passed to scenario!
        behaviors_tested = {"IDM": BehaviorIDMClassic(params), "Const" : BehaviorConstantAcceleration(params)}

        benchmark_runner = BenchmarkRunnerMP(benchmark_database=db,
                                           evaluators=evaluators,
                                           terminal_when=terminal_when,
                                           behaviors=behaviors_tested,
                                           log_eval_avg_every=10)

        result = benchmark_runner.run()

        df = result.get_data_frame()
        print(df)
        self.assertEqual(len(df.index), 20) # 2 Behaviors * 5 Serialize Scenarios * 2 scenario sets

        params2 = ParameterServer()
        viewer = MPViewer(
              params=params2,
              x_range=[5060, 5160],
              y_range=[5070,5150],
              use_world_bounds=True)
        rst  = benchmark_runner.run_benchmark_config(10, viewer=viewer)
Exemplo n.º 3
0
    def test_database_multiprocessing_history(self):
        dbs = DatabaseSerializer(test_scenarios=4, test_world_steps=5, num_serialize_scenarios=2)
        dbs.process("data/database1")
        local_release_filename = dbs.release(version="test")

        db = BenchmarkDatabase(database_root=local_release_filename)
        evaluators = {"success" : "EvaluatorGoalReached", "collision" : "EvaluatorCollisionEgoAgent",
                      "max_steps": "EvaluatorStepCount"}
        terminal_when = {"collision" :lambda x: x, "max_steps": lambda x : x>2}
        params = ParameterServer() # only for evaluated agents not passed to scenario!
        behaviors_tested = {"IDM": BehaviorIDMClassic(params), "Const" : BehaviorConstantAcceleration(params)}

        benchmark_runner = BenchmarkRunnerMP(benchmark_database=db,
                                           evaluators=evaluators,
                                           terminal_when=terminal_when,
                                           behaviors=behaviors_tested,
                                           log_eval_avg_every=10)
        rst = benchmark_runner.run(maintain_history=True)
        self.assertEqual(len(rst.get_histories()), 2*2*2)

        rst = benchmark_runner.run_benchmark_config(3, viewer=None, maintain_history=True)
        scenario_history = rst.get_histories()[3]
        print(scenario_history)
        params = ParameterServer()
        viewer = MPViewer(
              params=params,
              x_range=[5060, 5160],
              y_range=[5070,5150],
              use_world_bounds=True)
        viewer.drawWorld(world=scenario_history[1].GetWorldState(),
                          eval_agent_ids=scenario_history[1].eval_agent_ids)

        viewer.show(block=True)
Exemplo n.º 4
0
  def test_planning_time(self):
    param_server = ParameterServer()
    # Model Definition
    behavior_model = BehaviorConstantAcceleration(param_server)
    execution_model = ExecutionModelInterpolate(param_server)
    dynamic_model = SingleTrackModel(param_server)

    # Agent Definition
    agent_2d_shape = CarLimousine()
    init_state = np.array([0, -191.789,-50.1725, 3.14*3.0/4.0, 150/3.6])
    agent_params = param_server.AddChild("agent1")
    goal_polygon = Polygon2d([0, 0, 0],
                             [Point2d(-4,-4),
                              Point2d(-4,4),
                              Point2d(4,4),
                              Point2d(4,-4)])
    goal_polygon = goal_polygon.Translate(Point2d(-191.789,-50.1725))

    agent = Agent(init_state,
                behavior_model,
                dynamic_model,
                execution_model,
                agent_2d_shape,
                agent_params,
                GoalDefinitionPolygon(goal_polygon),
                  None)

    world = World(param_server)
    world.AddAgent(agent)
    evaluator = EvaluatorPlanningTime(agent.id)
    world.AddEvaluator("time", evaluator)


    info = world.Evaluate()
    self.assertEqual(info["time"], 0.0)
Exemplo n.º 5
0
    def test_one_agent_at_goal_state_limits(self):
        param_server = ParameterServer()
        # Model Definition
        behavior_model = BehaviorConstantVelocity(param_server)
        execution_model = ExecutionModelInterpolate(param_server)
        dynamic_model = SingleTrackModel(param_server)

        # Agent Definition
        agent_2d_shape = CarLimousine()
        init_state = np.array(
            [0, -191.789, -50.1725, 3.14 * 3.0 / 4.0, 150 / 3.6])
        agent_params = param_server.AddChild("agent1")
        goal_polygon = Polygon2d(
            [0, 0, 0],
            [Point2d(-1, -1),
             Point2d(-1, 1),
             Point2d(1, 1),
             Point2d(1, -1)])
        goal_polygon = goal_polygon.Translate(Point2d(-191.789, -50.1725))

        agent = Agent(
            init_state, behavior_model, dynamic_model, execution_model,
            agent_2d_shape, agent_params,
            GoalDefinitionStateLimits(
                goal_polygon,
                (3.14 * 3.0 / 4.0 - 0.08, 3.14 * 3.0 / 4.0 + 0.08)), None)

        world = World(param_server)
        world.AddAgent(agent)
        evaluator = EvaluatorGoalReached(agent.id)
        world.AddEvaluator("success", evaluator)

        info = world.Evaluate()
        self.assertEqual(info["success"], True)
Exemplo n.º 6
0
    def test_parameters(self):
        # initialize Params
        p = ParameterServer()

        # set new parameter
        self.assertTrue(p["LetsTest"]["hierarchy", "bla", True])

        # check whether right value is recovered
        tester = p["Car"]["Length", "Car Length", 6]
        tester2 = p["Car"]["Length", "Car Length", 8]
        self.assertEqual(tester, 6)
        self.assertEqual(
            tester2,
            6)  # value should not change, since key already exists in dict

        # check whether access without description and default value is possible
        self.assertEqual(p["Car"]["Length"], 6)

        # check whether setting values works
        p["Age"] = 24
        self.assertEqual(p["Age"], 24)
        p["Localization"]["Number of Particles"] = 2000
        self.assertEqual(p["Localization"]["Number of Particles"], 2000)

        # C++ Test in /bark/commons/Params/params_test.h
        # write in parameters in C++ and check whether they can be accessed in python afterwards
        #ParamsTest(p)
        #self.assertEqual(p["param_cpp"], 16.5)

        # add child in python
        child = p.AddChild("ch")
        self.assertTrue(child["ChildTest"]["hierarchy", "bla", True])

        # write parameters to json file
        p.Save("written_a_param_test.json")
Exemplo n.º 7
0
    def test_number_of_agents(self):
        # World Definition
        params = ParameterServer()
        world = World(params)

        # Model Definitions
        behavior_model = BehaviorConstantAcceleration(params)
        execution_model = ExecutionModelInterpolate(params)
        dynamic_model = SingleTrackModel(params)

        behavior_model2 = BehaviorConstantAcceleration(params)
        execution_model2 = ExecutionModelInterpolate(params)
        dynamic_model2 = SingleTrackModel(params)

        # Map Definition
        map_interface = MapInterface()
        xodr_map = MakeXodrMapOneRoadTwoLanes()
        map_interface.SetOpenDriveMap(xodr_map)
        world.SetMap(map_interface)

        agent_2d_shape = CarLimousine()
        init_state = np.array([0, 13, -1.75, 0, 5])
        agent_params = params.AddChild("agent1")
        goal_polygon = Polygon2d(
            [1, 1, 0],
            [Point2d(0, 0),
             Point2d(0, 2),
             Point2d(2, 2),
             Point2d(2, 0)])
        goal_polygon = goal_polygon.Translate(Point2d(50, -2))

        agent = Agent(init_state, behavior_model, dynamic_model,
                      execution_model, agent_2d_shape, agent_params,
                      GoalDefinitionPolygon(goal_polygon), map_interface)
        world.AddAgent(agent)

        init_state2 = np.array([0, 16, -1.75, 0, 5])
        agent2 = Agent(init_state2, behavior_model2, dynamic_model2,
                       execution_model2, agent_2d_shape, agent_params,
                       GoalDefinitionPolygon(goal_polygon), map_interface)
        world.AddAgent(agent2)

        evaluator = EvaluatorNumberOfAgents(agent.id)
        world.AddEvaluator("num_agents", evaluator)

        info = world.Evaluate()
        self.assertEqual(info["num_agents"], len(world.agents))
        # do it once more
        self.assertEqual(info["num_agents"], len(world.agents))

        world.RemoveAgentById(agent2.id)
        info = world.Evaluate()
        # evaluator should still hold two
        self.assertNotEqual(info["num_agents"], len(world.agents))
        self.assertEqual(info["num_agents"], 2)

        world.Step(0.1)
        info = world.Evaluate()
        # evaluator should still hold two
        self.assertEqual(info["num_agents"], 2)
Exemplo n.º 8
0
    def test_agent_pickle_uct_planner(self):
        try:
            from bark.core.models.behavior import BehaviorUCTSingleAgentMacroActions
        except:
            print("Rerun test with ---define planner_uct=true")
            return

        params = ParameterServer()
        behavior = BehaviorUCTSingleAgentMacroActions(params)
        execution = ExecutionModelInterpolate(params)
        dynamic = SingleTrackModel(params)
        shape = CarLimousine()
        init_state = np.array([0, 0, 0, 0, 5])
        goal_polygon = Polygon2d(
            [0, 0, 0],
            [Point2d(-1, -1),
             Point2d(-1, 1),
             Point2d(1, 1),
             Point2d(1, -1)])
        goal_definition = GoalDefinitionPolygon(goal_polygon)
        agent = Agent(init_state, behavior, dynamic, execution, shape,
                      params.AddChild("agent"), goal_definition)

        agent_after = pickle_unpickle(agent)

        self.assertTrue(
            isinstance(agent_after.behavior_model,
                       BehaviorUCTSingleAgentMacroActions))
Exemplo n.º 9
0
    def test_agent_pickle(self):
        params = ParameterServer()
        behavior = BehaviorConstantVelocity(params)
        execution = ExecutionModelInterpolate(params)
        dynamic = SingleTrackModel(params)
        shape = CarLimousine()
        init_state = np.array([0, 0, 0, 0, 5])
        goal_polygon = Polygon2d([0, 0, 0],[Point2d(-1,-1),Point2d(-1,1),Point2d(1,1), Point2d(1,-1)])
        goal_definition = GoalDefinitionPolygon(goal_polygon)
        agent = Agent(init_state, behavior, dynamic, execution, shape, params.AddChild("agent"), goal_definition )

        agent_after = pickle_unpickle(agent)

        self.assertEqual(agent_after.id , agent.id)
        self.assertTrue(np.array_equal(agent_after.state, agent.state) )
        self.assertTrue(np.array_equal(agent_after.goal_definition.goal_shape.center, \
                                       agent.goal_definition.goal_shape.center))

        goal_definition_2 = GoalDefinitionStateLimits(goal_polygon, (0.2 , 0.5))
        agent2 = Agent(init_state, behavior, dynamic, execution, shape, params.AddChild("agent"), goal_definition_2)

        agent_after2 = pickle_unpickle(agent2)

        self.assertEqual(agent_after2.id , agent2.id)
        self.assertTrue(np.array_equal(agent_after2.state, agent.state) )
        self.assertTrue(np.array_equal(agent_after2.goal_definition.xy_limits.center, \
                                       agent2.goal_definition.xy_limits.center))

        agent_list = []
        agent_list.append(agent)

        agent_list_after = pickle_unpickle(agent_list)

        self.assertEqual(agent_list_after[0].id , agent.id)
        self.assertTrue(np.array_equal(agent_list_after[0].state, agent.state) )
Exemplo n.º 10
0
    def test_agent_from_trackfile_centered(self):

        map_filename = os.path.join(
            os.path.dirname(__file__),
            "../tests/data/DR_DEU_Merging_MT_v01_centered.xodr")
        track_filename = os.path.join(
            os.path.dirname(__file__),
            "../tests/data/interaction_dataset_dummy_track.csv")

        agent_track_info = AgentTrackInfo(track_filename,
                                          track_id=1,
                                          start_offset=500,
                                          end_offset=1000)

        params = ParameterServer()
        params_id = params["Scenario"]["Generation"]["InteractionDataset"]
        params_id["MapFilename", "", map_filename]
        params_id["TrackFilename", "", track_filename]
        params_id["BehaviorModel", "", {}]

        track_params = ParameterServer()
        track_params["execution_model"] = 'ExecutionModelInterpolate'
        track_params["dynamic_model"] = 'SingleTrackModel'
        track_params["map_interface"] = None  # world.map
        track_params["behavior_model"] = None

        scenario_info = ScenarioTrackInfo(map_filename,
                                          track_filename,
                                          agent_track_info,
                                          xy_offset=[-900, -900])
        ds_reader = InteractionDatasetReader()
        agent = ds_reader.AgentFromTrackfile(track_params, params,
                                             scenario_info,
                                             agent_track_info.GetTrackId())
    def get_scenario_generator(self, scenario_set_id):
        serialized_file_name = self.dataframe.iloc[scenario_set_id]["Serialized"]
        if os.path.exists(serialized_file_name):
            serialized_file_path = serialized_file_name
        else:
            serialized_file_path = os.path.join(
                self.database_root, serialized_file_name)
        cwd = None
        if os.path.exists(self.database_root):
            # move into database root that map files can be found
            cwd = os.getcwd()
            os.chdir(self.database_root)
        param_file_name = self.dataframe.iloc[scenario_set_id]["Params"]
        if not param_file_name:
            logging.warning("No param file found for scenario set {}. Using defaults...".format(
                self.dataframe.iloc[scenario_set_id]["SetName"]))
            params = ParameterServer()
        else:
            params = ParameterServer(filename=param_file_name)

        scenario_generation = ScenarioGeneration(params=params)
        scenario_generation.load_scenario_list(filename=serialized_file_name)
        SetMapfileDirectory(self.database_root)
        if cwd:
            os.chdir(cwd)
        scenario_set_name = self.dataframe.iloc[scenario_set_id]["SetName"]
        scenario_set_parameters = self.dataframe.iloc[scenario_set_id]["SetParameters"]
        return scenario_generation, scenario_set_name, scenario_set_parameters
Exemplo n.º 12
0
def generate_uct_hypothesis_behavior():
    ml_params = ParameterServer(filename="configuration/params/iqn_params_demo_full.json", log_if_default=True)
    behavior_ml_params = ml_params["ML"]["BehaviorMPMacroActions"]
    mcts_params = ParameterServer(filename="configuration/params/default_uct_params.json", log_if_default=True)
    mcts_params["BehaviorUctBase"]["EgoBehavior"] = behavior_ml_params
    behavior = BehaviorUCTHypothesis(mcts_params, [])
    mcts_params.Save(filename="./default_uct_params.json")
    return behavior, mcts_params
Exemplo n.º 13
0
 def __setup_map_interface__(self):
     params = ParameterServer()
     # we are creating a dummy scenario to get the map interface from it
     scenario = Scenario(map_file_name=self._map_filename,
                         json_params=params.ConvertToDict())
     world = scenario.GetWorldState()
     map_interface = world.map
     return map_interface
Exemplo n.º 14
0
def main():
    args = configure_args()
    if is_local:
        dir_prefix = ""
    else:
        dir_prefix="hy-iqn-lfd-full-exp.runfiles/hythe/"
    logging.info(f"Executing job: {args.jobname}")
    logging.info(f"Experiment server at: {os.getcwd()}")
    params = ParameterServer(filename=os.path.join(dir_prefix, params_file),
                             log_if_default=True)
    params = configure_params(params, seed=args.jobname)
    experiment_id = params["Experiment"]["random_seed"]
    params_filename = os.path.join(params["Experiment"]["dir"], "params_{}.json".format(experiment_id))

    behavior = BehaviorDiscreteMacroActionsML(params)
    evaluator = GoalReached(params)
    observer = NearestAgentsObserver(params)
    viewer = MPViewer(params=params,
                      x_range=[-35, 35],
                      y_range=[-35, 35],
                      follow_agent_id=True)

    # extract params and save experiment parameters
    params["ML"]["BaseAgent"]["SummaryPath"] = os.path.join(params["Experiment"]["dir"], "agent/summaries")
    params["ML"]["BaseAgent"]["CheckpointPath"] = os.path.join(params["Experiment"]["dir"], "agent/checkpoints")

    params.Save(filename=params_filename)
    logging.info('-' * 60)
    logging.info("Writing params to :{}".format(params_filename))
    logging.info('-' * 60)

    # database creation
    dbs1 = DatabaseSerializer(test_scenarios=1, test_world_steps=2,
                             num_serialize_scenarios=num_demo_scenarios)
    dbs1.process(os.path.join(dir_prefix, "configuration/database"),
      filter_sets="**/**/interaction_merging_light_dense_1D.json")
    local_release_filename = dbs1.release(version="lfd_offline")
    db1 = BenchmarkDatabase(database_root=local_release_filename)
    scenario_generator1, _, _ = db1.get_scenario_generator(0)

    env = HyDiscreteHighway(params=params,
                            scenario_generation=scenario_generator1,
                            behavior=behavior,
                            evaluator=evaluator,
                            observer=observer,
                            viewer=viewer,
                            render=False)

    scenario, _ = scenario_generator1.get_next_scenario()
    world = scenario.GetWorldState()
    observer.Reset(world)

    assert env.action_space._n == 8, "Action Space is incorrect!"
    run(args, params, env, db=db1)
    params.Save(params_filename)
    logging.info('-' * 60)
    logging.info("Writing params to :{}".format(params_filename))
    logging.info('-' * 60)
Exemplo n.º 15
0
def make_initial_world(primitives):
    # must be within examples params folder
    params = ParameterServer()
    world = World(params)

    # Define two behavior models
    behavior_model = BehaviorMPContinuousActions(params)
    primitive_mapping = {}
    for prim in primitives:
        idx = behavior_model.AddMotionPrimitive(
            np.array(prim))  # adding action
        primitive_mapping[idx] = prim

    behavior_model.ActionToBehavior(0)  # setting initial action

    execution_model = ExecutionModelInterpolate(params)
    dynamic_model = SingleTrackModel(params)

    behavior_model2 = BehaviorConstantVelocity(params)
    execution_model2 = ExecutionModelInterpolate(params)
    dynamic_model2 = SingleTrackModel(params)

    # Define the map interface and load a testing map
    map_interface = MapInterface()
    xodr_map = MakeXodrMapOneRoadTwoLanes()
    map_interface.SetOpenDriveMap(xodr_map)
    world.SetMap(map_interface)

    # Define the agent shapes
    agent_2d_shape = CarRectangle()
    init_state = np.array([0, 3, -5.25, 0, 20])

    # Define the goal definition for agents
    center_line = Line2d()
    center_line.AddPoint(Point2d(0.0, -1.75))
    center_line.AddPoint(Point2d(100.0, -1.75))

    max_lateral_dist = (0.4, 0.5)
    max_orientation_diff = (0.08, 0.1)
    velocity_range = (5.0, 20.0)
    goal_definition = GoalDefinitionStateLimitsFrenet(center_line,
                                                      max_lateral_dist,
                                                      max_orientation_diff,
                                                      velocity_range)

    # define two agents with the different behavior models
    agent_params = params.AddChild("agent1")
    agent = Agent(init_state, behavior_model, dynamic_model, execution_model,
                  agent_2d_shape, agent_params, goal_definition, map_interface)
    world.AddAgent(agent)

    init_state2 = np.array([0, 25, -5.25, 0, 15])
    agent2 = Agent(init_state2, behavior_model2, dynamic_model2,
                   execution_model2, agent_2d_shape, agent_params,
                   goal_definition, map_interface)
    world.AddAgent(agent2)

    return world
Exemplo n.º 16
0
  def test_set_item_using_delimiter(self):
    params = ParameterServer()
    _ = params["test_child"]["Child2"]["ValueFloat", "Desc", 2.0]
    params["test_child::Child2::ValueFloat"] = 3.2323
    self.assertEqual(params["test_child"]["Child2"]["ValueFloat"], 3.2323)

    child = params.AddChild("test_child5::Child5")
    child["test_param2"] = "etesd99533sbgfgf"
    self.assertEqual(params["test_child5"]["Child5"]["test_param2", "Desc", 0], "etesd99533sbgfgf")
Exemplo n.º 17
0
def create_map_interface(map_file_name, road_ids):
    params = ParameterServer()
    # we are creating a dummy scenario to get the map interface from it
    scenario = Scenario(map_file_name=map_file_name,
                        json_params=params.ConvertToDict())
    world = scenario.GetWorldState()
    map_interface = world.map
    map_interface.GenerateRoadCorridor(road_ids, XodrDrivingDirection.forward)
    return map_interface
    def test_database_runner(self):
        dbs = DatabaseSerializer(test_scenarios=4,
                                 test_world_steps=5,
                                 num_serialize_scenarios=2)
        dbs.process("data/database1")
        local_release_filename = dbs.release(version="test")

        db = BenchmarkDatabase(database_root=local_release_filename)
        safe_dist_params = ParameterServer(log_if_default=True)
        test_python_params = ParameterServer(log_if_default=True)
        evaluators = {
            "success": "EvaluatorGoalReached",
            "collision": "EvaluatorCollisionEgoAgent",
            "max_steps": "EvaluatorStepCount",
            "safe_dist_lon": {
                "type": "EvaluatorDynamicSafeDist",
                "params": safe_dist_params
            },
            "safe_dist_lat": {
                "type": "EvaluatorStaticSafeDist",
                "params": safe_dist_params
            },
            "test_evaluator": {
                "type": "TestPythonEvaluator",
                "params": test_python_params
            },
            "test_evaluator_serializable": TestPythonEvaluatorSerializable()
        }
        terminal_when = {
            "collision": lambda x: x,
            "max_steps": lambda x: x > 2,
            "safe_dist_lon": lambda x: x
        }
        params = ParameterServer(
        )  # only for evaluated agents not passed to scenario!
        behaviors_tested = {
            "IDM": BehaviorIDMClassic(params),
            "Const": BehaviorConstantAcceleration(params)
        }

        EvaluationConfig.AddEvaluationModule(
            "bark.benchmark.tests.test_evaluator")
        benchmark_runner = BenchmarkRunner(benchmark_database=db,
                                           evaluators=evaluators,
                                           log_eval_avg_every=1,
                                           terminal_when=terminal_when,
                                           behaviors=behaviors_tested)

        result = benchmark_runner.run()
        df = result.get_data_frame()
        print(df)
        self.assertEqual(
            len(df.index),
            2 * 2 * 2)  # 2 Behaviors * 2 Serialize Scenarios * 1 scenario sets
        result.load_benchmark_configs()
        groups = result.get_evaluation_groups()
        self.assertEqual(set(groups), set(["behavior", "scen_set"]))
Exemplo n.º 19
0
  def test_one_agent_at_goal_sequential(self):
    param_server = ParameterServer()
    # Model Definition
    dynamic_model = SingleTrackModel(param_server)
    behavior_model = BehaviorMPContinuousActions(param_server)
    idx = behavior_model.AddMotionPrimitive(np.array([1, 0]))
    behavior_model.ActionToBehavior(idx)
    execution_model = ExecutionModelInterpolate(param_server)


    # Agent Definition
    agent_2d_shape = CarLimousine()
    init_state = np.array([0, 0, 0, 0, 0])
    agent_params = param_server.AddChild("agent1")
    goal_frame = Polygon2d([0, 0, 0],
                             [Point2d(-1,-1),
                              Point2d(-1,1),
                              Point2d(1,1),
                              Point2d(1,-1)])

    goal_polygon1 = goal_frame.Translate(Point2d(10, 0))
    goal_polygon2 = goal_frame.Translate(Point2d(20, 0))
    goal_polygon3 = goal_frame.Translate(Point2d(30, 0))

    goal_def1 = GoalDefinitionStateLimits(goal_polygon1, [-0.08, 0.08])
    goal_def2 = GoalDefinitionStateLimits(goal_polygon2, [-0.08, 0.08])
    goal_def3 = GoalDefinitionStateLimits(goal_polygon3, [-0.08, 0.08])

    goal_definition = GoalDefinitionSequential([goal_def1,
                                                goal_def2,
                                                goal_def3])

    self.assertEqual(len(goal_definition.sequential_goals),3)
    agent = Agent(init_state,
                behavior_model,
                dynamic_model,
                execution_model,
                agent_2d_shape,
                agent_params,
                goal_definition,
                  None)

    world = World(param_server)
    world.AddAgent(agent)
    evaluator = EvaluatorGoalReached(agent.id)
    world.AddEvaluator("success", evaluator)

    # just drive with the single motion primitive should be successful 
    for _ in range(0,1000):
        world.Step(0.2)
        info = world.Evaluate()
        if info["success"]:
            break
    
    self.assertEqual(info["success"], True)
    self.assertAlmostEqual(agent.state[int(StateDefinition.X_POSITION)], 30, delta=0.5)
Exemplo n.º 20
0
 def test_belief_observer_observation_space(self):
     params = ParameterServer(
         filename="configuration/params/iqn_params.json")
     splits = 8
     params_behavior = ParameterServer(
         filename="configuration/params/1D_desired_gap_no_prior.json")
     behavior_space = BehaviorSpace(params_behavior)
     hypothesis_set, hypothesis_params = behavior_space.create_hypothesis_set_fixed_split(split=splits)
     observer = BeliefObserver(params, hypothesis_set, splits=splits)
     max_num_agents = observer._max_num_vehicles
     assert max_num_agents * len(hypothesis_set) == observer.max_beliefs
Exemplo n.º 21
0
def main():
    map_filename = "external/bark_ml_project/bark_ml/environments/blueprints/highway/city_highway_straight.xodr"
    params_filename = glob.glob(os.path.join(demo_root, "params_[!behavior]*"))
    params = ParameterServer(filename=params_filename[0], log_if_default=True)
    behavior = BehaviorDiscreteMacroActionsML(params)
    evaluator = GoalReached(params)
    if is_belief_observer:
        splits = 2
        bparams_filename = glob.glob(os.path.join(demo_root, "behavior_*"))
        params_behavior = ParameterServer(filename=bparams_filename[0])
        behavior_space = BehaviorSpace(params_behavior)

        hypothesis_set, hypothesis_params = behavior_space.create_hypothesis_set_fixed_split(
            split=splits)
        observer = BeliefObserver(params, hypothesis_set, splits=splits)
    else:
        observer = NearestAgentsObserver(params)
    viewer = MPViewer(params=params,
                      x_range=[-35, 35],
                      y_range=[-35, 35],
                      follow_agent_id=True)
    env = HyDiscreteHighway(params=params,
                            map_filename=map_filename,
                            behavior=behavior,
                            evaluator=evaluator,
                            observer=observer,
                            viewer=viewer,
                            render=False)

    env.reset()

    ego_world_states = []
    _, demonstrations = unpack_load_demonstrations(demo_root)
    for demo in demonstrations:
        ego_state = np.zeros((observer._len_ego_state + 1))
        (nn_ip_state, action, reward, next_state, done, is_demo) = demo
        ego_nn_input_state = deepcopy(nn_ip_state[0:observer._len_ego_state])
        ego_state[1:] = ego_nn_input_state
        reverted_observed_state = observer.rev_observe_for_ego_vehicle(
            ego_state)
        ego_world_states.append(
            (reverted_observed_state[int(StateDefinition.X_POSITION)],
             reverted_observed_state[int(StateDefinition.Y_POSITION)],
             reverted_observed_state[int(StateDefinition.THETA_POSITION)],
             reverted_observed_state[int(StateDefinition.VEL_POSITION)],
             action, int(is_demo)))
    df = pd.DataFrame(ego_world_states,
                      columns=[
                          'pos_x', 'pos_y', 'orientation', 'velocity',
                          'action', 'is_demo'
                      ])
    print(df.head(10))
    df.to_pickle(os.path.join(demo_root, "demonstrations/demo_dataframe"))
    return
Exemplo n.º 22
0
    def test_gap_distance_front(self):
        # World Definition
        params = ParameterServer()
        world = World(params)

        gap = 10

        # Model Definitions
        behavior_model = BehaviorConstantAcceleration(params)
        execution_model = ExecutionModelInterpolate(params)
        dynamic_model = SingleTrackModel(params)

        behavior_model2 = BehaviorConstantAcceleration(params)
        execution_model2 = ExecutionModelInterpolate(params)
        dynamic_model2 = SingleTrackModel(params)

        # Map Definition
        map_interface = MapInterface()
        xodr_map = MakeXodrMapOneRoadTwoLanes()
        map_interface.SetOpenDriveMap(xodr_map)
        world.SetMap(map_interface)

        agent_2d_shape = CarLimousine()
        init_state = np.array([0, 13, -1.75, 0, 5])
        agent_params = params.AddChild("agent1")
        goal_polygon = Polygon2d(
            [1, 1, 0],
            [Point2d(0, 0),
             Point2d(0, 2),
             Point2d(2, 2),
             Point2d(2, 0)])
        goal_polygon = goal_polygon.Translate(Point2d(50, -2))

        agent = Agent(init_state, behavior_model, dynamic_model,
                      execution_model, agent_2d_shape, agent_params,
                      GoalDefinitionPolygon(goal_polygon), map_interface)
        world.AddAgent(agent)

        init_state2 = np.array([0, 13 + gap, -1.75, 0, 5])
        agent2 = Agent(init_state2, behavior_model2, dynamic_model2,
                       execution_model2, agent_2d_shape, agent_params,
                       GoalDefinitionPolygon(goal_polygon), map_interface)
        world.AddAgent(agent2)

        world.Step(0.1)

        evaluator = EvaluatorGapDistanceFront(agent.id)
        world.AddEvaluator("gap", evaluator)

        info = world.Evaluate()
        self.assertAlmostEqual(info["gap"],
                               gap - agent_2d_shape.front_dist -
                               agent_2d_shape.rear_dist,
                               places=4)
def generate_uct_hypothesis_behavior():
    if is_local:
        dir_prefix = ""
    else:
        dir_prefix="hy-iqn-lfd-full-beliefs-exp.runfiles/hythe/"
    ml_params = ParameterServer(filename=os.path.join(dir_prefix, params_file), log_if_default=True)
    behavior_ml_params = ml_params["ML"]["BehaviorMPMacroActions"]
    mcts_params = ParameterServer(filename=os.path.join(dir_prefix, "configuration/params/default_uct_params.json"), log_if_default=True)
    mcts_params["BehaviorUctBase"]["EgoBehavior"] = behavior_ml_params
    behavior = BehaviorUCTHypothesis(mcts_params, [])
    mcts_params.Save(filename="./default_uct_params.json")
    return behavior, mcts_params
 def _process_scenario_list(self, database_dir, scenario_set_dict):
     serialized_sets_dir = os.path.join(database_dir, "scenario_sets")
     if not os.path.exists(serialized_sets_dir):
         os.makedirs(serialized_sets_dir)
     process_result = True
     for scenario_set, json_params in scenario_set_dict.items():
         json_params["Scenario"]["Generation"]["SetName"] = scenario_set
         params = ParameterServer(json=json_params)
         filename = os.path.join(serialized_sets_dir,
                                 "{}.json".format(scenario_set))
         print(filename)
         params.save(filename)
         process_result = process_result and \
                 self._process_json_paramfile(filename, json=json_params)
     return process_result
Exemplo n.º 25
0
    def test_pickle_belief_observer(self):
        params = ParameterServer()
        # init behavior space
        splits = 2
        behavior_params = ParameterServer()
        behavior_space = BehaviorSpace(behavior_params)
        hypothesis_set, _ = behavior_space.create_hypothesis_set_fixed_split(
            split=splits)
        observer = BeliefObserver(params, hypothesis_set, splits=splits)

        po = pickle_unpickle(observer)
        self.assertIsNotNone(po)
        self.assertTrue(isinstance(po, BeliefObserver))
        self.assertEqual(po.splits, observer.splits)
        self.assertEqual(po.len_beliefs, observer.len_beliefs)
Exemplo n.º 26
0
def main():
    args = configure_args()
    exp_dir = args.exp_dir or "results/training/toy_evaluation"
    params_filename = glob.glob(os.path.join(exp_dir, "params_*"))
    params = ParameterServer(filename=params_filename[0])
    behavior_params_filename = glob.glob(
        os.path.join(exp_dir, "behavior_params*"))
    if behavior_params_filename:
        splits = 8
        behavior_params = ParameterServer(filename=behavior_params_filename[0])
        behavior_space = BehaviorSpace(behavior_params)
        hypothesis_set, _ = behavior_space.create_hypothesis_set_fixed_split(
            split=splits)
        observer = BeliefObserver(params, hypothesis_set, splits=splits)
        behavior = BehaviorDiscreteMacroActionsML(behavior_params)
    else:
        behavior = BehaviorDiscreteMacroActionsML(params)
        observer = NearestAgentsObserver(params)

    evaluator = GoalReached(params)

    scenario_params = ParameterServer(
        filename=
        "configuration/database/scenario_sets/interaction_merging_light_dense_1D.json"
    )
    scenario_generator = ConfigurableScenarioGeneration(params=scenario_params,
                                                        num_scenarios=5)
    scenario_file = glob.glob(os.path.join(exp_dir, "scenarios_list*"))
    scenario_generator.load_scenario_list(scenario_file[0])
    viewer = MPViewer(params=params,
                      x_range=[-35, 35],
                      y_range=[-35, 35],
                      follow_agent_id=True)
    env = HyDiscreteHighway(behavior=behavior,
                            observer=observer,
                            evaluator=evaluator,
                            viewer=viewer,
                            scenario_generation=scenario_generator,
                            render=True)

    env.reset()
    actions = [0, 1, 2, 3, 4, 5, 6]
    for action in actions:
        print(action)
        env.step(action)
    agent = FQFAgent(env=env, test_env=env, params=params)

    agent.load_models(os.path.join(exp_dir, "agent/checkpoints/final"))
Exemplo n.º 27
0
    def test_execution_model_pickle(self):
        
        params = ParameterServer()
        e = ExecutionModelInterpolate(params)

        ea = pickle_unpickle(e)
        self.assertTrue(isinstance(ea,ExecutionModelInterpolate))
Exemplo n.º 28
0
    def test_behavior_model_pickle(self):
        
        params = ParameterServer()
        b = BehaviorConstantVelocity(params)

        ba = pickle_unpickle(b)
        self.assertTrue(isinstance(ba, BehaviorConstantVelocity))
Exemplo n.º 29
0
 def test_write_params_agent(self):
     params = ParameterServer()
     behavior = BehaviorConstantAcceleration(params)
     execution = ExecutionModelInterpolate(params)
     dynamic = SingleTrackModel(params)
     shape = Polygon2d([1.25, 1, 0], [
         Point2d(0, 0),
         Point2d(0, 2),
         Point2d(4, 2),
         Point2d(4, 0),
         Point2d(0, 0)
     ])
     init_state = np.zeros(4)
     agent = Agent(init_state, behavior, dynamic, execution, shape,
                   params.AddChild("agent"))
     params.Save("written_agents_param_test.json")
Exemplo n.º 30
0
def run_configuration(argv):
    # params = ParameterServer(filename="examples/example_params/tfa_params.json")
    params = ParameterServer()
    # NOTE: Modify these paths in order to save the checkpoints and summaries
    # params["ML"]["BehaviorTFAAgents"]["CheckpointPath"] = "YOUR_PATH"
    # params["ML"]["TFARunner"]["SummaryPath"] = "YOUR_PATH"
    params["World"]["remove_agents_out_of_map"] = True

    # create environment
    bp = ContinuousMergingBlueprint(params,
                                    number_of_senarios=2500,
                                    random_seed=0)
    env = SingleAgentRuntime(blueprint=bp, render=False)

    # PPO-agent
    # ppo_agent = BehaviorPPOAgent(environment=env,
    #                              params=params)
    # env.ml_behavior = ppo_agent
    # runner = PPORunner(params=params,
    #                    environment=env,
    #                    agent=ppo_agent)

    # SAC-agent
    sac_agent = BehaviorSACAgent(environment=env, params=params)
    env.ml_behavior = sac_agent
    runner = SACRunner(params=params, environment=env, agent=sac_agent)
    if FLAGS.mode == "train":
        runner.SetupSummaryWriter()
        runner.Train()
    elif FLAGS.mode == "visualize":
        runner.Run(num_episodes=10, render=True)
    elif FLAGS.mode == "evaluate":
        runner.Run(num_episodes=100, render=False)