Пример #1
0
    def test_database_multiprocessing_runner(self):
        dbs = DatabaseSerializer(test_scenarios=4,
                                 test_world_steps=5,
                                 num_serialize_scenarios=10)
        dbs.process("data/database1")
        local_release_filename = dbs.release(version="test")

        db = BenchmarkDatabase(database_root=local_release_filename)
        evaluators = {
            "success": "EvaluatorGoalReached",
            "collision": "EvaluatorCollisionEgoAgent",
            "max_steps": "EvaluatorStepCount"
        }
        terminal_when = {
            "collision": lambda x: x,
            "max_steps": lambda x: x > 5
        }
        params = ParameterServer(
        )  # only for evaluated agents not passed to scenario!
        behaviors_tested = {
            "IDM": BehaviorIDMClassic(params),
            "Const": BehaviorConstantVelocity(params)
        }

        benchmark_runner = BenchmarkRunnerMP(benchmark_database=db,
                                             evaluators=evaluators,
                                             terminal_when=terminal_when,
                                             behaviors=behaviors_tested,
                                             log_eval_avg_every=10)

        result = benchmark_runner.run()

        params2 = ParameterServer()
        viewer = MPViewer(params=params2,
                          x_range=[5060, 5160],
                          y_range=[5070, 5150],
                          use_world_bounds=True)
        rst, _ = benchmark_runner.run_benchmark_config(10, viewer=viewer)

        rst = benchmark_runner.run(maintain_history=True)
        self.assertEqual(len(rst.get_histories()), 40)

        rst, scenario_history = benchmark_runner.run_benchmark_config(
            11, viewer=None, maintain_history=True)
        print(scenario_history)
        viewer = MPViewer(params=params2,
                          x_range=[5060, 5160],
                          y_range=[5070, 5150],
                          use_world_bounds=True)
        viewer.drawWorld(world=scenario_history[5].GetWorldState(),
                         eval_agent_ids=scenario_history[5].eval_agent_ids)

        viewer.show(block=True)

        df = result.get_data_frame()
        print(df)
        self.assertEqual(
            len(df.index),
            40)  # 2 Behaviors * 10 Serialize Scenarios * 2 scenario sets
Пример #2
0
    def test_database_run_and_analyze(self):
        dbs = DatabaseSerializer(test_scenarios=4,
                                 test_world_steps=5,
                                 num_serialize_scenarios=10)
        dbs.process("database")
        local_release_filename = dbs.release(version="test")

        db = BenchmarkDatabase(database_root=local_release_filename)
        evaluators = {
            "success": "EvaluatorGoalReached",
            "collision": "EvaluatorCollisionEgoAgent",
            "max_steps": "EvaluatorStepCount"
        }
        terminal_when = {
            "collision": lambda x: x,
            "max_steps": lambda x: x > 5
        }
        params = ParameterServer(
        )  # only for evaluated agents not passed to scenario!
        behaviors_tested = {
            "IDM": BehaviorIDMClassic(params),
            "Const": BehaviorConstantVelocity(params)
        }

        benchmark_runner = BenchmarkRunnerMP(benchmark_database=db,
                                             evaluators=evaluators,
                                             terminal_when=terminal_when,
                                             behaviors=behaviors_tested,
                                             log_eval_avg_every=10)

        result = benchmark_runner.run(maintain_history=True)

        result.dump(os.path.join("./benchmark_results.pickle"))
        result_loaded = BenchmarkResult.load(
            os.path.join("./benchmark_results.pickle"))

        params2 = ParameterServer()

        fig = plt.figure(figsize=[10, 10])
        viewer = MPViewer(params=params2,
                          center=[5112, 5165],
                          y_length=120,
                          enforce_y_length=True,
                          axis=fig.gca())

        analyzer = BenchmarkAnalyzer(benchmark_result=result_loaded)
        configs = analyzer.find_configs(criteria={
            "behavior": lambda x: x == "IDM",
            "success": lambda x: not x
        })
        configs_const = analyzer.find_configs(criteria={
            "behavior": lambda x: x == "Const",
            "success": lambda x: not x
        })

        #analyzer.visualize(configs_idx_list = configs,
        # viewer = viewer, real_time_factor=10, fontsize=12)
        plt.close(fig)

        fig, (ax1, ax2) = plt.subplots(1, 2)
        viewer1 = MPViewer(params=params2,
                           center=[5112, 5165],
                           y_length=120,
                           enforce_y_length=True,
                           axis=ax1)
        viewer2 = MPViewer(params=params2,
                           center=[5112, 5165],
                           y_length=120,
                           enforce_y_length=True,
                           axis=ax2)
        analyzer.visualize(configs_idx_list=[configs[1:3], configs_const[1:3]],
                           viewer=[viewer1, viewer2],
                           viewer_names=["IDM", "ConstVelocity"],
                           real_time_factor=1,
                           fontsize=12)