def test_collect_demonstrations(self): params = ParameterServer() bp = DiscreteHighwayBlueprint(params, number_of_senarios=10, random_seed=0) env = SingleAgentRuntime(blueprint=bp, render=False) env._observer = NearestAgentsObserver(params) env._action_wrapper = BehaviorDiscreteMacroActionsML(params) env._evaluator = TestEvaluator() demo_behavior = bark_ml.library_wrappers.lib_fqf_iqn_qrdqn.\ tests.test_demo_behavior.TestDemoBehavior(params) collector = DemonstrationCollector() collection_result = collector.CollectDemonstrations(env, demo_behavior, 4, "./test_demo_collected", \ use_mp_runner=False, runner_init_params={"deepcopy" : False}) self.assertTrue( os.path.exists("./test_demo_collected/collection_result")) print(collection_result.get_data_frame().to_string()) experiences = collector.ProcessCollectionResult( eval_criteria={"goal_r1": lambda x: x}) # expected length = 2 scenarios (only every second reaches goal) x 3 steps (4 executed, but first not counted) self.assertEqual(len(experiences), 2 * 3) collector.dump("./final_collections") loaded_collector = DemonstrationCollector.load("./final_collections") experiences_loaded = loaded_collector.GetDemonstrationExperiences() print(experiences_loaded) self.assertEqual(len(experiences_loaded), 2 * 3)
def test_general_evaluator(self): params = ParameterServer() bp = ContinuousSingleLaneBlueprint(params) env = SingleAgentRuntime(blueprint=bp, render=True) evaluator = GeneralEvaluator(params) env._evaluator = evaluator env.reset() for _ in range(0, 4): state, terminal, reward, info = env.step(np.array([0., 0.])) print(terminal, reward)