def run_benchmark(args):
    # modify the template config file and generate the user defined config file.
    cfg_process.generate_cfg(CONFIG_TEMPLATE_DIR, CONFIG_DIR, **vars(args))
    config.read(CONFIG_DIR)

    # the user defined config file should only have one task
    selected_task = config.sections()[0]
    metric_patterns = literal_eval(config.get(selected_task, "patterns"))
    metric_names = literal_eval(config.get(selected_task, "metrics"))
    metric_compute_methods = literal_eval(
        config.get(selected_task, "compute_method"))
    command_to_execute = config.get(selected_task, "command_to_execute")
    num_gpus = int(config.get(selected_task, "num_gpus"))

    metrics_manager.benchmark(command_to_execute=command_to_execute,
                              metric_patterns=metric_patterns,
                              metric_names=metric_names,
                              metric_compute_methods=metric_compute_methods,
                              num_gpus=num_gpus,
                              task_name=selected_task,
                              suffix=args.metrics_suffix,
                              framework=args.framework)

    # clean up
    os.remove(CONFIG_DIR)
Esempio n. 2
0
def run_benchmark(args):
    if 'imagenet' in args.data_set:
        data_manager.getImagenetData(args.data_set)

    config.read(args.metrics_template)

    for name, value in config.items(args.metrics_policy):
        if (name == 'patterns'):
            metric_patterns = literal_eval(value)
        elif (name == 'metrics'):
            metric_names = literal_eval(value)
        else:
            metric_compute_methods = literal_eval(value)

    metrics_manager.BenchmarkResultManager.uptime()

    metrics_manager.benchmark(command_to_execute=args.command_to_execute,
                              metric_patterns=metric_patterns,
                              metric_names=metric_names,
                              metric_compute_methods=metric_compute_methods,
                              num_gpus=args.num_gpus,
                              task_name=args.task_name,
                              suffix=args.metrics_suffix,
                              framework=args.framework)
Esempio n. 3
0
    parser.add_argument(
        '--data-set',
        type=str,
        help='The data set to use for benchmarking, eg. imagenet')

    args = parser.parse_args()

    if (args.data_set == 'imagenet'):
        data_manager.getImagenetData()

    config.read(CONFIG_TEMPLATE)

    for name, value in config.items(args.metrics_policy):
        if (name == 'patterns'):
            metric_patterns = literal_eval(value)
        elif (name == 'metrics'):
            metric_names = literal_eval(value)
        else:
            metric_compute_methods = literal_eval(value)

    metrics_manager.BenchmarkResultManager.uptime()

    metrics_manager.benchmark(command_to_execute=args.command_to_execute,
                              metric_patterns=metric_patterns,
                              metric_names=metric_names,
                              metric_compute_methods=metric_compute_methods,
                              num_gpus=args.num_gpus,
                              task_name=args.task_name,
                              suffix=args.metrics_suffix,
                              framework=args.framework)
    parser.add_argument('--metrics-template', type=str, help='The template file to use for metrics pattern', default=CONFIG_TEMPLATE)
    
    args = parser.parse_args()    
   
    if 'imagenet' in args.data_set: 
        data_manager.getImagenetData(args.data_set)

    config.read(args.metrics_template)

    for name, value in config.items(args.metrics_policy):
        if(name == 'patterns'):
            metric_patterns = literal_eval(value)
        elif(name == 'metrics'):
            metric_names= literal_eval(value)
        else:
            metric_compute_methods = literal_eval(value)
            
    metrics_manager.BenchmarkResultManager.uptime()        

    metrics_manager.benchmark(
        command_to_execute=args.command_to_execute,
        metric_patterns=metric_patterns,
        metric_names=metric_names,
        metric_compute_methods=metric_compute_methods,
        num_gpus=args.num_gpus,
        task_name=args.task_name,
        suffix=args.metrics_suffix,
        framework=args.framework
    )