Esempio n. 1
0
    def test_dump_and_partial_load(self):
        result_num = 100
        confs, result_data, histories = random_benchmark_conf_data(result_num, 2000000, hist_size=1500000)
        br = BenchmarkResult(result_dict=result_data,
          benchmark_configs=confs, histories=histories)
        br.dump("./results_all", dump_configs=True, dump_histories=True, max_mb_per_file = 5)
        br_loaded = BenchmarkResult.load("./results_all")
        loaded_dict = br_loaded.get_result_dict()
        self.assertEqual(br.get_result_dict(), loaded_dict)

        loaded_configs_idx = list(range(10, 20))
        processed_files = br_loaded.load_benchmark_configs(config_idx_list = loaded_configs_idx)
        loaded_confs = br_loaded.get_benchmark_configs()
        self.assertEqual(len(loaded_confs), 10)
        # 2mb per conf, max 5 mb per file -> 2 confs per file -> 10/2 = 5files
        self.assertEqual(len(processed_files), 5) 
        for conf_idx in loaded_configs_idx:
            self.assertEqual(br_loaded.get_benchmark_config(conf_idx), confs[conf_idx])

        loaded_configs_idx = list(range(10, 27))
        processed_files = br_loaded.load_histories(config_idx_list = loaded_configs_idx)
        loaded_histories = br_loaded.get_histories()
        self.assertEqual(len(loaded_histories), 18) # one more as specified since it was in the last file
        # 1.5mb per history, max 5 mb per file -> 3 confs per file -> 17/3 = 6files
        self.assertEqual(len(processed_files), 6)
        for conf_idx in loaded_configs_idx:
            self.assertEqual(br_loaded.get_history(conf_idx), histories[conf_idx])
Esempio n. 2
0
    def merge_checkpoint_benchmark_results(checkpoint_dir):
        checkpoint_files = glob.glob(os.path.join(checkpoint_dir,
                                                  "**/*.ckpnt"),
                                     recursive=True)
        merged_result = BenchmarkResult()
        # merge all checkpoints with new results
        for checkpoint_file in checkpoint_files:
            logging.info("Loading checkpoint {}".format(
                os.path.abspath(checkpoint_file)))
            next_result = BenchmarkResult.load(os.path.abspath(checkpoint_file), \
                load_configs=True, load_histories=True)
            merged_result.extend(next_result)
        # dump merged result
        if len(merged_result.get_result_dict()) > 0:
            logging.info("Dumping merged result")
            merged_result_filename = os.path.join(checkpoint_dir,
                                                  "merged_results.ckpnt")
            merged_result.dump(merged_result_filename, \
                dump_configs=True, dump_histories=True)

        # delete checkpoints
        for checkpoint_file in checkpoint_files:
            if checkpoint_file == merged_result_filename:
                continue
            os.remove(checkpoint_file)
            logging.info(
                "Removed old checkpoint file {}".format(checkpoint_file))
        return merged_result
Esempio n. 3
0
 def test_dump_and_load_results(self):
     result_data = random_result_data(size = 10)
     br = BenchmarkResult(result_dict=result_data)
     br.dump("./results")
     br_loaded = BenchmarkResult.load("./results")
     loaded_dict = br_loaded.get_result_dict()
     self.assertEqual(result_data, loaded_dict)
Esempio n. 4
0
 def test_dump_and_load_benchmark_configs(self):
     result_num = 100
     confs, result_data, _ = random_benchmark_conf_data(result_num, 2000000)
     br = BenchmarkResult(result_dict=result_data, benchmark_configs=confs)
     br.dump("./results_with_confs", dump_configs=True, max_mb_per_file = 5)
     br_loaded = BenchmarkResult.load("./results_with_confs")
     br_loaded.load_benchmark_configs(config_idx_list = list(range(0, result_num)))
     loaded_confs = br_loaded.get_benchmark_configs()
     self.assertEqual(confs, loaded_confs)
     loaded_dict = br_loaded.get_result_dict()
     self.assertEqual(br.get_result_dict(), loaded_dict)
Esempio n. 5
0
 def test_dump_and_load_histories_one(self):
     result_num = 2
     result_data = random_result_data(size = result_num)
     histories = random_history_data(result_num, 20)
     br = BenchmarkResult(result_dict=result_data, histories=histories)
     br.dump("./results_with_history", dump_histories=True, max_mb_per_file = 2)
     br_loaded = BenchmarkResult.load("./results_with_history")
     br_loaded.load_histories(config_idx_list = list(histories.keys()))
     loaded_histories = br_loaded.get_histories()
     self.assertEqual(histories, loaded_histories)
     loaded_dict = br_loaded.get_result_dict()
     self.assertEqual(result_data, loaded_dict)
 def load(directory):
     collector = DemonstrationCollector()
     collection_result_fullname = os.path.join(
         directory, DemonstrationCollector.collection_result_filename())
     if os.path.exists(collection_result_fullname):
         collector._collection_result = BenchmarkResult.load(
             collection_result_fullname)
     demonstration_fullname = os.path.join(
         directory, DemonstrationCollector.demonstrations_filename())
     if os.path.exists(demonstration_fullname):
         collector._demonstrations = from_pickle(
             directory, DemonstrationCollector.demonstrations_filename())
     collector._directory = directory
     return collector
Esempio n. 7
0
    def test_extend_from_file(self):
        try:
          os.remove("./br1")
          os.remove("./br2")
          os.remove("./br3")
        except:
          pass
        result_num = 100
        confs, result_data, histories1 = random_benchmark_conf_data(result_num, 2000000, hist_size=1500000, offset=0)
        br1 = BenchmarkResult(result_dict=result_data,
          benchmark_configs=confs, histories=histories1)
        br1.dump("./br1", dump_histories=True, dump_configs=True)
        br1_df = br1.get_data_frame().copy()

        result_num = 30
        confs2, result_data2, histories2 = random_benchmark_conf_data(result_num, 2000000, hist_size=1500000, offset=200)
        br2 = BenchmarkResult(result_dict=result_data2,
          benchmark_configs=confs2, histories=histories2)
        br2.dump(filename="./br2", dump_histories=True, dump_configs=True)

        result_num = 10
        confs3, result_data3, histories3 = random_benchmark_conf_data(result_num, 2000000, hist_size=1500000, offset=400)
        br3 = BenchmarkResult(result_dict=result_data3,
          benchmark_configs=confs3, histories=histories3)
        br3.dump(filename="./br3", dump_histories=True, dump_configs=True)

        br1.extend(benchmark_result=br2, file_level=True)
        br1.extend(benchmark_result=br3, file_level=True)

        br_loaded = BenchmarkResult.load("./br1", load_histories=True, load_configs=True)
        df_desired = br1_df
        df_desired = pd.concat([df_desired, br2.get_data_frame()])
        df_desired = pd.concat([df_desired, br3.get_data_frame()])
        self.assertEqual(len(br_loaded.get_data_frame().index), len(df_desired.index))

        extended_confs = br_loaded.get_benchmark_configs()
        self.assertEqual(len(extended_confs), 140)
        extended_histories = br_loaded.get_histories()
        self.assertEqual(len(extended_histories), 140)
        extended_histories = histories1
        extended_histories.update(histories2)
        extended_histories.update(histories3)
        for bc in extended_confs:
            self.assertEqual(br_loaded.get_history(bc.config_idx), extended_histories[bc.config_idx])
Esempio n. 8
0
    def merge_checkpoint_benchmark_results(checkpoint_dir):
        checkpoint_files = glob.glob(os.path.join(checkpoint_dir, "**/*.ckpnt"), recursive=True)
        merged_result_filename = BenchmarkRunner.get_merged_result_filename(checkpoint_dir)
        if os.path.exists(merged_result_filename):
          merged_result = BenchmarkResult.load_results(filename=merged_result_filename)
        else:
          merged_result = BenchmarkResult(file_name=merged_result_filename)
        # merge all checkpoints with new results
        for checkpoint_file in checkpoint_files:
          loaded_result = BenchmarkResult.load(os.path.abspath(checkpoint_file))
          merged_result.extend(loaded_result, file_level=True)
          logging.info("Extending with checkpoint {}".format(checkpoint_file))

        # delete checkpoints
        for checkpoint_file in checkpoint_files:
          if "merged_result" in checkpoint_file:
            continue
          os.remove(checkpoint_file)
          logging.info("Removed old checkpoint file {}".format(checkpoint_file))
        return merged_result
    def test_database_run_and_analyze(self):
        dbs = DatabaseSerializer(test_scenarios=2, test_world_steps=3, num_serialize_scenarios=2)
        # to find database files
        cwd = os.getcwd()
        if not debug:
          os.chdir("../benchmark_database/")
        else:
          os.chdir("bazel-bin/bark/benchmark/tests/py_benchmark_process_tests.runfiles/benchmark_database")
        dbs.process("data/database1")
        local_release_filename = dbs.release(version="test")

        db = BenchmarkDatabase(database_root=local_release_filename)
        evaluators = {"success" : "EvaluatorGoalReached", "collision" : "EvaluatorCollisionEgoAgent",
                      "max_steps": "EvaluatorStepCount"}
        terminal_when = {"collision" :lambda x: x, "max_steps": lambda x : x>2}
        params = ParameterServer() # only for evaluated agents not passed to scenario!
        behaviors_tested = {"IDM": BehaviorIDMClassic(params), "Const" : BehaviorConstantAcceleration(params)}

        benchmark_runner = BenchmarkRunnerMP(benchmark_database=db,
                                           evaluators=evaluators,
                                           terminal_when=terminal_when,
                                           behaviors=behaviors_tested,
                                           log_eval_avg_every=2)

        result = benchmark_runner.run(maintain_history=True)

        result.dump(os.path.join("./benchmark_results"), dump_configs=True, \
                         dump_histories=True, max_mb_per_file=1)
        result_loaded = BenchmarkResult.load(os.path.join("./benchmark_results"))
        result_loaded.load_histories()
        result_loaded.load_benchmark_configs()

        params2 = ParameterServer()

        fig = plt.figure(figsize=[10, 10])
        viewer = MPViewer(
              params=params2,
              center=[5112, 5165],
              y_length = 120,
              enforce_y_length=True,
              axis = fig.gca())

        analyzer = BenchmarkAnalyzer(benchmark_result=result_loaded)
        configs = analyzer.find_configs(criteria={"behavior": lambda x: x=="IDM", "success": lambda x : not x})
        configs_const = analyzer.find_configs(criteria={"behavior": lambda x: x=="Const", "success": lambda x : not x})

        os.chdir(cwd)
        #analyzer.visualize(configs_idx_list = configs,
                         # viewer = viewer, real_time_factor=10, fontsize=12)
        plt.close(fig)

        
        fig, (ax1, ax2) = plt.subplots(1, 2)
        viewer1 = MPViewer(
              params=params2,
              center=[5112, 5165],
              y_length = 120,
              enforce_y_length=True,
              axis = ax1)
        viewer2 = MPViewer(
              params=params2,
              center=[5112, 5165],
              y_length = 120,
              enforce_y_length=True,
              axis = ax2)
        analyzer.visualize(configs_idx_list = [configs[1:3], configs_const[1:3]],
                          viewer = [viewer1, viewer2], viewer_names=["IDM", "ConstVelocity"], real_time_factor=10, fontsize=12)