Example #1
0
def main():
    args = common_args.parse_args(common_args.ACCURACY_ARGS)
    logging.info("Running accuracy test...")
    run_SSDResNet34_accuracy(args["engine_file"],
                             args["batch_size"],
                             args["num_samples"],
                             verbose=args["verbose"])
Example #2
0
def main():
    log_dir = common_args.parse_args(["log_dir"])["log_dir"]

    summary_file = os.path.join(log_dir, "perf_harness_summary.json")
    with open(summary_file) as f:
        results = json.load(f)

    print("")
    print(
        "======================= Perf harness results: ======================="
    )
    print("")

    for config_name in results:
        print("{:}:".format(config_name))
        for benchmark in results[config_name]:
            print("    {:}: {:}".format(benchmark,
                                        results[config_name][benchmark]))
        print("")

    summary_file = os.path.join(log_dir, "accuracy_summary.json")
    with open(summary_file) as f:
        results = json.load(f)

    print("")
    print("======================= Accuracy results: =======================")
    print("")

    for config_name in results:
        print("{:}:".format(config_name))
        for benchmark in results[config_name]:
            print("    {:}: {:}".format(benchmark,
                                        results[config_name][benchmark]))
        print("")
Example #3
0
def main():
    args = common_args.parse_args(common_args.ACCURACY_ARGS)
    logging.info("Running accuracy test...")
    acc = run_dlrm_accuracy(args["engine_file"],
                            args["batch_size"],
                            args["num_samples"],
                            verbose=args["verbose"])
    logging.info("Accuracy: {:}".format(acc))
Example #4
0
def apply_overrides(config, keys):
    # Make a copy so we don't modify original dict
    config = dict(config)
    override_args = common_args.parse_args(keys)
    for key in override_args:
        # Unset values (None) and unset store_true values (False) are both false-y
        if override_args[key]:
            config[key] = override_args[key]
    return config
def main():
    log_dir = common_args.parse_args(["log_dir"])["log_dir"]

    results = get_perf_summary(log_dir)
    print("")
    print("======================= Perf harness results: =======================")
    print("")
    for config_name in results:
        print("{:}:".format(config_name))
        for benchmark in results[config_name]:
            print("    {:}: {:}".format(benchmark, results[config_name][benchmark]))
        print("")

    results = get_acc_summary(log_dir)
    print("")
    print("======================= Accuracy results: =======================")
    print("")
    for config_name in results:
        print("{:}:".format(config_name))
        for benchmark in results[config_name]:
            print("    {:}: {:}".format(benchmark, results[config_name][benchmark]))
        print("")

    # If this is a power run, we should print out the average power
    power_vals = get_power_summary(log_dir)
    if power_vals != None:
        print("")
        print("======================= Power results: =======================")
        print("")
        for config_name in results:
            print("{:}:".format(config_name))
            for benchmark in results[config_name]:
                if len(power_vals) > 0:
                    avg_power = sum(power_vals) / len(power_vals)
                    print("    {}: avg power under load: {:.2f}W with {} power samples".format(benchmark, avg_power, len(power_vals)))
                else:
                    print("    {}: cannot find any power samples in the test window. Is the timezone setting correct?".format(benchmark))
            print("")
Example #6
0
def main():
    log_dir = common_args.parse_args(["log_dir"])["log_dir"]

    summary_file = os.path.join(log_dir, "perf_harness_summary.json")
    with open(summary_file) as f:
        results = json.load(f)

    print("")
    print(
        "======================= Perf harness results: ======================="
    )
    print("")

    for config_name in results:
        print("{:}:".format(config_name))
        for benchmark in results[config_name]:
            print("    {:}: {:}".format(benchmark,
                                        results[config_name][benchmark]))
        print("")

    summary_file = os.path.join(log_dir, "accuracy_summary.json")
    with open(summary_file) as f:
        results = json.load(f)

    print("")
    print("======================= Accuracy results: =======================")
    print("")

    for config_name in results:
        print("{:}:".format(config_name))
        for benchmark in results[config_name]:
            print("    {:}: {:}".format(benchmark,
                                        results[config_name][benchmark]))
        print("")

    # If this is a power run, we should print out the average power
    if os.path.exists(os.path.join(log_dir, "spl.txt")):
        print("")
        print("======================= Power results: =======================")
        print("")
        for config_name in results:
            print("{:}:".format(config_name))
            for benchmark in results[config_name]:
                # Get power_start and power_end
                detail_logs = glob.glob(os.path.join(log_dir, "**",
                                                     "mlperf_log_detail.txt"),
                                        recursive=True)
                if len(detail_logs) == 0:
                    raise RuntimeError(
                        "Could not find detail logs for power run!")
                elif len(detail_logs) > 1:
                    print(
                        "WARNING: Power harness run contains multiple benchmark-scenario runs. This is not advised."
                    )

                # Select the correct detail log
                scenario = config_name.split("-")[-1]
                detail_log_path = None
                for detail_log in detail_logs:
                    components = detail_log.split("/")
                    if scenario == components[-2] and benchmark == components[
                            -3]:
                        detail_log_path = detail_log
                        break

                if detail_log_path is None:
                    raise RuntimeError(
                        "Could not find mlperf_log_detail.txt for {}-{}".
                        format(benchmark, scenario))

                power_times = from_loadgen_by_keys(
                    os.path.dirname(detail_log_path),
                    ["power_begin", "power_end"])
                power_begin = from_timestamp(power_times["power_begin"])
                power_end = from_timestamp(power_times["power_end"])

                # Read power metrics from spl.txt
                with open(os.path.join(log_dir, "spl.txt")) as f:
                    lines = f.read().split("\n")

                power_vals = []
                for line in lines:
                    data = line.split(",")
                    if len(data) != 12:
                        continue

                    timestamp = data[1]
                    watts = float(data[3])
                    curr_time = from_timestamp(timestamp)

                    if power_begin <= curr_time and curr_time <= power_end:
                        power_vals.append(watts)
                avg_power = sum(power_vals) / len(power_vals)
                print(
                    "    {}: avg power under load: {:.2f}W with {} power samples"
                    .format(benchmark, avg_power, len(power_vals)))
            print("")
Example #7
0
                # Run harness
                handle_run_harness(benchmark_conf, need_gpu, need_dla, profile, power, compliance=True)

                # Cleanup audit.config
                logging.info("AUDIT HARNESS: Cleaning Up audit.config...")
                auditing.cleanup()
            elif main_args["action"] == "run_audit_verification":
                logging.info("Running compliance verification for test " + main_args['audit_test'])
                handle_audit_verification(audit_test_name=main_args['audit_test'], config=benchmark_conf)
                auditing.cleanup()
            elif main_args["action"] == "calibrate":
                # To generate calibration cache, we only need to run each benchmark once.
                # Use offline config.
                if benchmark_conf["scenario"] == SCENARIOS.Offline:
                    handle_calibrate(benchmark_conf)
            elif main_args["action"] == "generate_conf_files":
                handle_run_harness(benchmark_conf, need_gpu, need_dla, generate_conf_files_only=True)

if __name__ == "__main__":
    mp.set_start_method("spawn")

    # Check any invalid/misspelling flags.
    common_args.check_args()
    main_args = common_args.parse_args(common_args.MAIN_ARGS)

    # Load System ID
    system_id = get_system_id()
    logging.info("Detected System ID: " + system_id)

    main(main_args, system_id)