Esempio n. 1
0
    def test_uniform_vehicle_distribution_default_params(self):
        param_server = ParameterServer(
            filename=
            "modules/runtime/tests/data/deterministic_scenario_test.json")
        scenario_generation = DeterministicScenarioGeneration(
            num_scenarios=2, random_seed=0, params=param_server)
        scenario_generation.dump_scenario_list("test.scenario")
        self.assertEqual(len(scenario_generation._scenario_list), 2)
        self.assertEqual(
            len(scenario_generation._scenario_list[0]._agent_list), 2)

        # assert scenario generation
        agent0 = scenario_generation._scenario_list[0]._agent_list[0]
        np.testing.assert_array_equal(agent0.state,
                                      np.array([0., 0., 0., 0., 5.0]))
        self.assertEqual(agent0.id, 0)
        self.assertEqual(str(agent0.behavior_model),
                         "bark.behavior.BehaviorConstantVelocity")
        self.assertEqual(str(agent0.dynamic_model),
                         "bark.dynamic.SingleTrackModel")
        self.assertEqual(str(agent0.execution_model),
                         "bark.dynamic.ExecutionModelInterpolate")
        agent1 = scenario_generation._scenario_list[0]._agent_list[1]
        #np.testing.assert_array_equal(agent1.state, np.array([0., 10., 0., 0., 5.0]))
        self.assertEqual(agent1.id, 1)
        self.assertEqual(str(agent1.behavior_model),
                         "bark.behavior.BehaviorIDMClassic")

        # TODO(@hart): make sure the map is not reloaded for rl-performance
        for _ in range(0, 10):
            _ = scenario_generation.get_next_scenario()

        # loading serialized scenarios
        scenario_loader = ScenarioGeneration()
        scenario_loader.load_scenario_list("test.scenario")
        self.assertEqual(len(scenario_loader._scenario_list), 2)
        self.assertEqual(
            len(scenario_loader._scenario_list[0]._agent_list),
            len(scenario_generation._scenario_list[0]._agent_list))
        self.assertEqual(
            str(scenario_loader._scenario_list[0]._agent_list[0].behavior_model
                ), "bark.behavior.BehaviorConstantVelocity")
        #np.testing.assert_array_equal(scenario_loader._scenario_list[0]._agent_list[0].state,
        #  np.array([0., 0., 0., 0., 5.0]))

        self.assertEqual(
            str(scenario_loader._scenario_list[0]._agent_list[1].behavior_model
                ), "bark.behavior.BehaviorIDMClassic")
Esempio n. 2
0
    def test_motion_primitives_concat_state(self):
        params = ParameterServer(
            filename="tests/data/deterministic_scenario_test.json")
        scenario_generation = DeterministicScenarioGeneration(num_scenarios=3,
                                                              random_seed=0,
                                                              params=params)
        state_observer = SimpleObserver(params=params)
        action_wrapper = MotionPrimitives(params=params)
        evaluator = GoalReached(params=params)
        viewer = MPViewer(params=params,
                          x_range=[-30, 30],
                          y_range=[-40, 40],
                          use_world_bounds=True)

        runtimerl = RuntimeRL(action_wrapper=action_wrapper,
                              observer=state_observer,
                              evaluator=evaluator,
                              step_time=0.2,
                              viewer=viewer,
                              scenario_generator=scenario_generation,
                              render=False)

        for _ in range(0, 3):
            runtimerl.reset()
            for _ in range(0, 50):  # run each scenario for 10 steps
                action = action_wrapper.action_space.sample()
                next_observed_state, reward, done, info = \
                  runtimerl.step(action)
                if done:
                    print("State: {} \n Reward: {} \n Done {}, Info: {} \n \
              ================================================="                                                                          . \
                      format(next_observed_state, reward, done, info))
                    break
Esempio n. 3
0
  def test_python_model(self):
    param_server = ParameterServer(
      filename="modules/runtime/tests/data/deterministic_scenario.json")
    scenario_generation = DeterministicScenarioGeneration(num_scenarios=3,
                                                          random_seed=0,
                                                          params=param_server)
    viewer = MPViewer(params=param_server,
                      follow_agent_id=False,
                      use_world_bounds=True)
    env = Runtime(0.2,
                  viewer,
                  scenario_generation,
                  render=True)
    
    single_track_model = SingleTrackModel(param_server)
    behavior_model = DynamicBehaviorModel(single_track_model, param_server)

    env.reset()
    env._world.get_agent(0).behavior_model = behavior_model
    env._world.get_agent(0).behavior_model.clone()


    env.reset()
    env._world.get_agent(0).behavior_model = behavior_model
    env._world.get_agent(0).behavior_model.set_last_action(np.array([1., 2.]))
    print(env._world.get_agent(0).behavior_model.get_last_action())
    env._world.step(0.2)
    print(env._world.get_agent(0).behavior_model.get_last_action())
Esempio n. 4
0
    def _build_configuration(self):
        """Builds a configuration using an SAC agent
    """
        self._scenario_generator = \
          DeterministicScenarioGeneration(num_scenarios=3,
                                          random_seed=0,
                                          params=self._params)
        self._observer = CustomObserver(params=self._params)
        self._behavior_model = DynamicModel(params=self._params)
        self._evaluator = CustomEvaluator(params=self._params)

        viewer = MPViewer(params=self._params,
                          x_range=[-30, 30],
                          y_range=[-20, 40],
                          follow_agent_id=True)
        self._viewer = viewer
        # self._viewer = VideoRenderer(renderer=viewer, world_step_time=0.2)
        self._runtime = RuntimeRL(action_wrapper=self._behavior_model,
                                  observer=self._observer,
                                  evaluator=self._evaluator,
                                  step_time=0.2,
                                  viewer=self._viewer,
                                  scenario_generator=self._scenario_generator)
        tfa_env = tf_py_environment.TFPyEnvironment(TFAWrapper(self._runtime))
        self._agent = SACAgent(tfa_env, params=self._params)
        self._runner = SACRunner(tfa_env,
                                 self._agent,
                                 params=self._params,
                                 unwrapped_runtime=self._runtime)
Esempio n. 5
0
    def test_python_model_inheritance(self):
        param_server = ParameterServer(
            filename="modules/runtime/tests/data/deterministic_scenario.json")
        scenario_generation = DeterministicScenarioGeneration(
            num_scenarios=3, random_seed=0, params=param_server)
        viewer = MPViewer(params=param_server,
                          follow_agent_id=False,
                          use_world_bounds=True)
        scenario, idx = scenario_generation.get_next_scenario()
        world = scenario.get_world_state()
        behavior_model = PythonBehaviorModelWrapperInheritance(param_server)

        world.GetAgent(0).behavior_model = behavior_model
        world.GetAgent(0).behavior_model.SetLastAction(
            np.array([1., 1.], dtype=np.float32))
        world.Step(0.2)
Esempio n. 6
0
    def test_tfa_runtime():
        params = ParameterServer(
            filename="tests/data/deterministic_scenario_test.json")
        scenario_generation = DeterministicScenarioGeneration(num_scenarios=3,
                                                              random_seed=0,
                                                              params=params)
        state_observer = ClosestAgentsObserver(params=params)
        action_wrapper = DynamicModel(params=params)
        evaluator = GoalReached(params=params)
        viewer = MPViewer(params=params,
                          x_range=[-30, 30],
                          y_range=[-20, 40],
                          follow_agent_id=True)  # use_world_bounds=True

        runtimerl = RuntimeRL(action_wrapper=action_wrapper,
                              observer=state_observer,
                              evaluator=evaluator,
                              step_time=0.05,
                              viewer=viewer,
                              scenario_generator=scenario_generation)

        tfa_env = TFAWrapper(runtimerl)
        _ = tfa_env.reset()

        utils.validate_py_environment(tfa_env, episodes=5)
        _ = tf_py_environment.TFPyEnvironment(tfa_env)
Esempio n. 7
0
    def test_runner():
        params = ParameterServer(
            filename="tests/data/deterministic_scenario_test.json")
        base_dir = os.path.dirname(os.path.dirname(__file__))
        params["BaseDir"] = base_dir
        scenario_generation = DeterministicScenarioGeneration(num_scenarios=3,
                                                              random_seed=0,
                                                              params=params)
        state_observer = ClosestAgentsObserver(params=params)
        action_wrapper = DynamicModel(params=params)
        evaluator = GoalReached(params=params)
        viewer = MPViewer(params=params,
                          x_range=[-30, 30],
                          y_range=[-20, 40],
                          follow_agent_id=True)
        runtimerl = RuntimeRL(action_wrapper=action_wrapper,
                              observer=state_observer,
                              evaluator=evaluator,
                              step_time=0.2,
                              viewer=viewer,
                              scenario_generator=scenario_generation,
                              render=False)
        tfa_env = tf_py_environment.TFPyEnvironment(TFAWrapper(runtimerl))
        sac_agent = SACAgent(tfa_env, params=params)
        tfa_runner = TFARunner(tfa_env,
                               sac_agent,
                               params=params,
                               unwrapped_runtime=runtimerl)
        tfa_runner.collect_initial_episodes()

        # main functionalities
        tfa_runner.train()
        tfa_runner.visualize()
        tfa_runner.evaluate()
Esempio n. 8
0
    def test_agent(self):
        params = ParameterServer(
            filename="tests/data/deterministic_scenario_test.json")
        base_dir = os.path.dirname(os.path.dirname(__file__))
        params["BaseDir"] = base_dir
        scenario_generation = DeterministicScenarioGeneration(num_scenarios=2,
                                                              random_seed=0,
                                                              params=params)
        state_observer = ClosestAgentsObserver(params=params)
        action_wrapper = DynamicModel(params=params)
        evaluator = GoalReached(params=params)
        viewer = MPViewer(params=params,
                          x_range=[-30, 30],
                          y_range=[-20, 40],
                          follow_agent_id=True)  # use_world_bounds=True

        runtimerl = RuntimeRL(action_wrapper=action_wrapper,
                              observer=state_observer,
                              evaluator=evaluator,
                              step_time=0.05,
                              viewer=viewer,
                              scenario_generator=scenario_generation)

        tfa_env = tf_py_environment.TFPyEnvironment(TFAWrapper(runtimerl))
        sac_agent = SACAgent(tfa_env, params=params)
        self.assertEqual(sac_agent._agent.name, "sac_agent")
        sac_agent.reset()

        # TODO(@hart): does not work because of read-only file-system
        # sac_agent.save()
        sac_agent.load()
Esempio n. 9
0
    def test_runtime_rl(self):
        params = ParameterServer(
            filename="tests/data/deterministic_scenario_test.json")
        scenario_generation = DeterministicScenarioGeneration(num_scenarios=2,
                                                              random_seed=0,
                                                              params=params)
        state_observer = SimpleObserver(params=params)
        action_wrapper = DynamicModel(params=params)
        evaluator = GoalReached(params=params)
        viewer = MPViewer(params=params,
                          x_range=[-30, 30],
                          y_range=[-40, 40],
                          use_world_bounds=True)  #use_world_bounds=True) #

        runtimerl = RuntimeRL(action_wrapper=action_wrapper,
                              observer=state_observer,
                              evaluator=evaluator,
                              step_time=0.2,
                              viewer=viewer,
                              scenario_generator=scenario_generation,
                              render=False)

        start_time = time.time()
        for _ in range(0, 100):
            runtimerl.reset()
            done = False
            reward = 0.
            for _ in range(0, 50):  # run each scenario for 10 steps
                action = action_wrapper.action_space.sample(
                ) / 100  # to go straight
                next_observed_state, reward, done, info = \
                  runtimerl.step(action)
                # observer
                self.assertEqual(len(next_observed_state), 16)
                np.testing.assert_array_equal(
                    next_observed_state[0:4],
                    state_observer._normalize(
                        runtimerl._world.agents[100].state)[1:5])
                np.testing.assert_array_equal(
                    next_observed_state[4:8],
                    state_observer._normalize(
                        runtimerl._world.agents[101].state)[1:5])
                if done:
                    print("State: {} \n Reward: {} \n Done {}, Info: {} \n \
              ================================================="                                                                          . \
                      format(next_observed_state, reward, done, info))
                    break
            # must assert to equal as the agent reaches the goal in the
            # specified number of steps
            self.assertEqual(done, True)
            # goal must have been reached which returns a reward of 1.
            self.assertEqual(reward, 1.)
            self.assertEqual(runtimerl._world.agents[100].id, 100)
            self.assertEqual(runtimerl._world.agents[101].id, 101)
        end_time = time.time()
        print("100 runs took {}s.".format(str(end_time - start_time)))
Esempio n. 10
0
  def test_runtime(self):
    """Asserts the runtime to make sure the basic
       functionality is given by the current state of BARK.
    """
    param_server = ParameterServer(
      filename="tests/data/deterministic_scenario_test.json")
    scenario_generation = DeterministicScenarioGeneration(num_scenarios=3,
                                                          random_seed=0,
                                                          params=param_server)

    param_server["Visualization"]["Agents"]["DrawRoute",
      "Draw the local map of each agent",
      True]
    viewer = MPViewer(params=param_server,
                      use_world_bounds=True)
    env = Runtime(0.2,
                  viewer,
                  scenario_generation,
                  render=True)

    env.reset()
    agent_ids = []
    agent_states = []
    centers_of_driving_corridor= []
    for key, agent in env._world.agents.items():
      agent_ids.append(agent.id)
      agent_states.append(agent.state)
        
    for i in range(0, 5):
      print("Scenario {}:".format(
        str(env._scenario_generator._current_scenario_idx)))
      # assert scenario ids
      self.assertEqual(env._scenario_generator._current_scenario_idx, (i % 3) + 1)
      for _ in range(0, 35):
        # assert ids
        states_before = []
        for i, (key, agent) in enumerate(env._world.agents.items()):
          self.assertEqual(key, agent.id)
          self.assertEqual(agent_ids[i], agent.id)
          states_before.append(agent.state)
          # TODO(@hart): why does this not work
          print(key, agent.goal_definition.goal_shape)
        env.step()
        # assert state has been changed by the step() function
        for i, (key, agent) in enumerate(env._world.agents.items()):
          np.testing.assert_equal(np.any(np.not_equal(states_before[i],
                                         agent.state)), True)

      # check whether the reset works     
      env.reset()
      for i, (key, agent) in enumerate(env._world.agents.items()):
        self.assertEqual(key, agent.id)
        self.assertEqual(agent_ids[i], agent.id)
        np.testing.assert_array_equal(agent_states[i], agent.state)
Esempio n. 11
0
    def test_visualization(self):
        param_server = ParameterServer(
            filename="modules/runtime/tests/data/deterministic_scenario.json")
        scenario_generation = DeterministicScenarioGeneration(
            num_scenarios=3, random_seed=0, params=param_server)
        viewer = MPViewer(params=param_server,
                          follow_agent_id=False,
                          use_world_bounds=True)
        env = Runtime(0.2, viewer, scenario_generation, render=True)

        env.reset()

        for _ in range(0, 5):
            print("Scenario {}:".format(
                str(env._scenario_generator._current_scenario_idx)))
            for _ in range(0, 5):
                env.step()
            env.reset()
Esempio n. 12
0
  def test_python_model(self):
    param_server = ParameterServer(
      filename="modules/runtime/tests/data/deterministic_scenario.json")
    scenario_generation = DeterministicScenarioGeneration(num_scenarios=3,
                                                          random_seed=0,
                                                          params=param_server)
    viewer = MPViewer(params=param_server,
                      follow_agent_id=False,
                      use_world_bounds=True)
    env = Runtime(0.2,
                  viewer,
                  scenario_generation,
                  render=True)
    
    behavior_model = DummyBehaviorModel(param_server)

    env.reset()
    env._world.get_agent(0).behavior_model = behavior_model
    env._world.get_agent(0).behavior_model.clone()

    np.testing.assert_array_equal(
      env._world.get_agent(0).behavior_model.plan(0.2, env._world)[1],
      np.array([0.2, 5111.626, 5106.8305 + 0.2, 1.5, 10]))

    env.reset()
    env._world.get_agent(0).behavior_model = behavior_model
    env._world.step(0.2)
    np.testing.assert_array_equal(
      env._world.get_agent(0).state,
      np.array([0.2, 5111.626, 5106.8305 + 0.2, 1.5, 10], dtype=np.float32))
    env._world.step(0.2)
    np.testing.assert_array_equal(
      env._world.get_agent(0).state,
      np.array([0.4, 5111.626, 5106.8305 + 0.4, 1.5, 10], dtype=np.float32))
    env._world.step(0.2)
    np.testing.assert_array_equal(
      env._world.get_agent(0).state,
      np.array([0.6, 5111.626, 5106.8305 + 0.6, 1.5, 10], dtype=np.float32))

    print("History:", env._world.get_agent(0).history)
    # environment loop
    env.reset()
    for i in range(0, 7):
      env.step()