Пример #1
0
if __name__ == "__main__":
    parser = argparse.ArgumentParser(description='Run benchmarks for autonet.')
    parser.add_argument("--run_id", default="0", help="An id for the run.")
    parser.add_argument("--task_id", default=-1, type=int, help="An id for the task. Necessary on cluster.")
    parser.add_argument("--partial_benchmark", default=None, help="Only run a part of the benchmark. Run other parts later or in parallel. 3-tuple: instance_slice, autonet_config_slice, run_number_range.")
    parser.add_argument("--result_dir", default=None, help="Override result dir in benchmark config.")
    parser.add_argument("--host_config", default=None, help="Override some configs according to host specifics.")
    parser.add_argument('benchmark', help='The benchmark to run')
    args = parser.parse_args()

    assert "-" not in args.run_id, "The run id must not contain a minus"
    
    benchmark_config_file = args.benchmark
    host_config_file = args.host_config

    benchmark = Benchmark()
    config_parser = benchmark.get_benchmark_config_file_parser()

    benchmark_config = config_parser.read(benchmark_config_file)
    benchmark_config.update(config_parser.read(host_config_file))

    if (args.result_dir is not None):
        benchmark_config['result_dir'] = os.path.join(ConfigFileParser.get_autonet_home(), args.result_dir)

    if (args.partial_benchmark is not None):
        split = args.partial_benchmark.split(',')
        if (len(split) > 0):
            benchmark_config['instance_slice'] = split[0]
        if (len(split) > 1):
            benchmark_config['autonet_config_slice'] = split[1]
        if (len(split) > 2):
Пример #2
0
    ]
    parsed_template_args = dict()
    for variable_name, default in runscript_template_args:
        try:
            value = [
                a.split("=")[1] for a in args.template_args
                if a.split("=")[0] == variable_name
            ][0]
        except IndexError:
            value = default
        parsed_template_args[variable_name] = value

    # get benchmark config
    benchmark_config_file = args.benchmark

    benchmark = Benchmark()
    config_parser = benchmark.get_benchmark_config_file_parser()

    benchmark_config = config_parser.read(benchmark_config_file)
    benchmark_config.update(config_parser.read(host_config_file))
    config_parser.set_defaults(benchmark_config)

    # get ranges of runs, autonet_configs and instances
    all_configs = ForAutoNetConfig.get_config_files(benchmark_config,
                                                    parse_slice=False)
    all_instances = ForInstance.get_instances(benchmark_config,
                                              instances_must_exist=True)

    runs_range = list(range(benchmark_config["num_runs"]))
    configs_range = list(range(len(all_configs)))
    instances_range = list(range(len(all_instances)))
                        help="Set font size.")
    parser.add_argument('benchmark', help='The benchmark to visualize')

    args = parser.parse_args()

    if "-" in args.run_id_range:
        run_id_range = range(int(args.run_id_range.split("-")[0]),
                             int(args.run_id_range.split("-")[1]) + 1)
    else:
        run_id_range = range(int(args.run_id_range),
                             int(args.run_id_range) + 1)

    benchmark_config_file = args.benchmark
    host_config_file = args.host_config

    benchmark = Benchmark()
    config_parser = benchmark.get_benchmark_config_file_parser()

    benchmark_config = config_parser.read(benchmark_config_file)
    benchmark_config.update(config_parser.read(host_config_file))

    if (args.result_dir is not None):
        benchmark_config['result_dir'] = os.path.join(
            ConfigFileParser.get_autonet_home(), args.result_dir)

    benchmark_config['run_id_range'] = run_id_range
    benchmark_config['plot_logs'] = args.plot_logs.split(
        ",") if args.plot_logs is not None else list()
    benchmark_config['only_finished_runs'] = args.only_finished_runs
    benchmark_config['output_folder'] = args.output_folder
    benchmark_config['scale_uncertainty'] = args.scale_uncertainty
    args = parser.parse_args()

    run_id_range = args.run_id_range
    if args.run_id_range is not None:
        if "-" in args.run_id_range:
            run_id_range = range(int(args.run_id_range.split("-")[0]),
                                 int(args.run_id_range.split("-")[1]) + 1)
        else:
            run_id_range = range(int(args.run_id_range),
                                 int(args.run_id_range) + 1)

    benchmark_config_file = args.benchmark
    host_config_file = args.host_config

    benchmark = Benchmark()
    config_parser = benchmark.get_benchmark_config_file_parser()

    benchmark_config = config_parser.read(benchmark_config_file)
    benchmark_config.update(config_parser.read(host_config_file))

    if (args.result_dir is not None):
        benchmark_config['result_dir'] = os.path.abspath(args.result_dir)

    if (args.partial_benchmark is not None):
        if (len(args.partial_benchmark) > 0):
            benchmark_config['instance_slice'] = args.partial_benchmark[0]
        if (len(args.partial_benchmark) > 1):
            benchmark_config['autonet_config_slice'] = args.partial_benchmark[
                1]
        if (len(args.partial_benchmark) > 2):