コード例 #1
0
def get_system_benchmark_config(config,
                                system_id,
                                enforce_type_equivalence=True):
    config_name = "{}/{}/config.json".format(config["benchmark"],
                                             config["scenario"])

    # Get by value (deepcopy) so that we don't modify the original dict
    benchmark_conf = copy.deepcopy(config.get("default", dict()))
    if "default" not in config:
        logging.warn(
            "{} does not have a 'default' setting.".format(config_name))

    system_conf = traverse_config(
        config_name,
        config,
        system_id, [],
        enforce_type_equivalence=enforce_type_equivalence)
    if system_conf is None:
        return None
    benchmark_conf.update(system_conf)

    # Passthrough for top level values
    benchmark_conf["system_id"] = system_id
    benchmark_conf["scenario"] = config["scenario"]
    benchmark_conf["benchmark"] = config["benchmark"]

    return benchmark_conf
コード例 #2
0
def traverse_config(config_name, full, target, seen):
    if target in seen:
        raise RuntimeError(
            "Error in config '{}': cyclical dependency on {}".format(
                config_name, target))

    target_conf = full.get(target, None)
    if target_conf is None:
        logging.warn("Could not find configuration for {} in {}".format(
            target, config_name))
        return None

    # The 2 keys that define inheritance dependencies are "extends" and "scales"
    extends = []
    if "extends" in target_conf:
        extends = target_conf["extends"]
        del target_conf["extends"]

    scales = dict()
    if "scales" in target_conf:
        scales = target_conf["scales"]
        del target_conf["scales"]

    # extends and scales cannot share the common elements
    common_keys = set(extends).intersection(set(scales.keys()))
    if len(common_keys) > 0:
        raise RuntimeError("{}:{} cannot both extend and scale {}".format(
            config_name, target,
            list(common_keys)[0]))

    conf = dict()
    # Apply extended configs
    for platform in extends:
        parent = traverse_config(config_name, full, platform, seen + [target])
        conf.update(parent)

    for platform in scales:
        parent = traverse_config(config_name, full, platform, seen + [target])
        for key in scales[platform]:
            if key not in parent:
                raise RuntimeError(
                    "{}:{} scales {}:{} which does not exist".format(
                        config_name, target, platform, key))
            parent[key] *= scales[platform][key]
        conf.update(parent)

    # Apply target overrides
    conf.update(target_conf)
    return conf
コード例 #3
0
def get_system_benchmark_config(config, system_id):
    config_name = "{}/{}/config.json".format(config["benchmark"], config["scenario"])

    benchmark_conf = config.get("default", dict())
    if "default" not in config:
        logging.warn("{} does not have a 'default' setting.".format(config_name))

    system_conf = traverse_config(config_name, config, system_id, [])
    if system_conf is None:
        return None
    benchmark_conf.update(system_conf)

    # Passthrough for top level values
    benchmark_conf["system_id"] = system_id
    benchmark_conf["scenario"] = config["scenario"]
    benchmark_conf["benchmark"] = config["benchmark"]

    return benchmark_conf
コード例 #4
0
ファイル: main.py プロジェクト: kllmia/inference_results_v0.7
def main(main_args, system_id):
    # Turn off MPS in case it's turned on.
    turn_off_mps()

    benchmarks = BENCHMARKS.ALL
    if main_args["benchmarks"] is not None:
        benchmarks = main_args["benchmarks"].split(",")
        for i, benchmark in enumerate(benchmarks):
            benchmarks[i] = BENCHMARKS.alias(benchmark)
    scenarios = SCENARIOS.ALL
    if main_args["scenarios"] is not None:
        scenarios = main_args["scenarios"].split(",")
        for i, scenario in enumerate(scenarios):
            scenarios[i] = SCENARIOS.alias(scenario)

    profile = main_args.get("profile", None)
    power = main_args.get("power", False)

    # Automatically detect architecture and scenarios and load configs
    config_files = main_args["configs"]
    if config_files == "" or config_files is None:
        config_files = find_config_files(benchmarks, scenarios)
        if config_files == "":
            logging.warn("Cannot find any valid configs for the specified benchmark-scenario pairs.")
            return

    logging.info("Using config files: {:}".format(str(config_files)))
    configs = load_configs(config_files)

    for config in configs:
        base_benchmark_conf = flatten_config(config, system_id)
        if base_benchmark_conf is None:
            continue

        base_benchmark_conf["config_name"] = "{:}_{:}_{:}".format(
            system_id,
            base_benchmark_conf["benchmark"],
            base_benchmark_conf["scenario"]
        )
        logging.info("Processing config \"{:}\"".format(base_benchmark_conf["config_name"]))

        # Load config_ver / apply overrides
        conf_vers = main_args.get("config_ver", "default").split(",")

        # Build default first. This is because some config_vers only modify harness args, and the engine is the same as
        # default. In this case, we build default first, and copy it instead of rebuilding it.
        if "default" in conf_vers:
            conf_vers = ["default"] + list(set(conf_vers) - {"default"})
        elif "all" in conf_vers:
            conf_vers = ["default"] + list(base_benchmark_conf.get("config_ver", {}).keys())

        for conf_ver in conf_vers:
            benchmark_conf = dict(base_benchmark_conf)  # Copy the config so we don't modify it

            # These fields are canonical names that refer to certain config versions
            benchmark_conf["accuracy_level"] = "99%"
            benchmark_conf["optimization_level"] = "plugin-enabled"
            benchmark_conf["inference_server"] = "lwis"

            """@etcheng
            NOTE: The original plan was to use a syntax like high_accuracy+triton to be able to combine already defined
            config_vers. However, since high_accuracy, triton, and high_accuracy+triton are likely to all have different
            expected QPS values, it makes more sense to keep high_accuracy_triton as a separate, individual config_ver.

            In the future, perhaps we can make an "extends": [ list of strings ] or { dict of config_ver name ->
            config_key } field in config_vers, so that we can define new config_vers that extend or combine previous
            config_vers.
            """

            equiv_to_default = False

            if conf_ver != "default":
                if "config_ver" not in benchmark_conf or conf_ver not in benchmark_conf["config_ver"]:
                    logging.warn(
                        "--config_ver={:} does not exist in config file '{:}'".format(conf_ver, benchmark_conf["config_name"]))
                    continue
                else:
                    if "high_accuracy" in conf_ver:
                        benchmark_conf["accuracy_level"] = "99.9%"
                    if "ootb" in conf_ver:
                        benchmark_conf["optimization_level"] = "ootb"
                    # "inference_server" is set when we run the harness

                    overrides = benchmark_conf["config_ver"][conf_ver]

                    # Check if this config_ver is equivalent to the default engine
                    gen_eng_argset = set(common_args.GENERATE_ENGINE_ARGS)
                    override_argset = set(overrides.keys())
                    equiv_to_default = (len(gen_eng_argset & override_argset) == 0)

                    benchmark_conf.update(overrides)

            # Update the config_ver key to be the actual string name, not the overrides
            benchmark_conf["config_ver"] = conf_ver

            need_gpu = not main_args["no_gpu"]
            need_dla = not main_args["gpu_only"]

            # Override the system_name if it exists
            if "system_name" in main_args:
                benchmark_conf["system_name"] = main_args["system_name"]

            if main_args["action"] == "generate_engines":
                # Turn on MPS if server scenario and if active_sms is specified.
                benchmark_conf = apply_overrides(benchmark_conf, ["active_sms"])
                active_sms = benchmark_conf.get("active_sms", None)

                copy_from_default = ("default" in conf_vers) and equiv_to_default
                if copy_from_default:
                    logging.info(
                        "config_ver={:} only modifies harness args. Re-using default engine.".format(conf_ver))

                _gen_args = [benchmark_conf]
                _gen_kwargs = {
                    "gpu": need_gpu,
                    "dla": need_dla,
                    "copy_from_default": copy_from_default
                }

                if not main_args["no_child_process"]:
                    if config["scenario"] == SCENARIOS.Server and active_sms is not None and active_sms < 100:
                        with ScopedMPS(active_sms):
                            launch_handle_generate_engine(*_gen_args, **_gen_kwargs)
                    else:
                        launch_handle_generate_engine(*_gen_args, **_gen_kwargs)
                else:
                    handle_generate_engine(*_gen_args, **_gen_kwargs)
            elif main_args["action"] == "run_harness":
                # In case there's a leftover audit.config file from a prior compliance run or other reason
                # we need to delete it or we risk silent failure.
                auditing.cleanup()

                handle_run_harness(benchmark_conf, need_gpu, need_dla, profile, power)
            elif main_args["action"] == "run_audit_harness":
                logging.info('\n\n\nRunning compliance harness for test ' + main_args['audit_test'] + '\n\n\n')

                # Find the correct audit.config file and move it in current directory
                dest_config = auditing.load(main_args['audit_test'], benchmark_conf['benchmark'])

                # Make sure the log_file override is valid
                os.makedirs("build/compliance_logs", exist_ok=True)

                # Pass audit test name to handle_run_harness via benchmark_conf
                benchmark_conf['audit_test_name'] = main_args['audit_test']

                # Run harness
                handle_run_harness(benchmark_conf, need_gpu, need_dla, profile, power, compliance=True)

                # Cleanup audit.config
                logging.info("AUDIT HARNESS: Cleaning Up audit.config...")
                auditing.cleanup()
            elif main_args["action"] == "run_audit_verification":
                logging.info("Running compliance verification for test " + main_args['audit_test'])
                handle_audit_verification(audit_test_name=main_args['audit_test'], config=benchmark_conf)
                auditing.cleanup()
            elif main_args["action"] == "calibrate":
                # To generate calibration cache, we only need to run each benchmark once.
                # Use offline config.
                if benchmark_conf["scenario"] == SCENARIOS.Offline:
                    handle_calibrate(benchmark_conf)
            elif main_args["action"] == "generate_conf_files":
                handle_run_harness(benchmark_conf, need_gpu, need_dla, generate_conf_files_only=True)
コード例 #5
0
def handle_run_harness(config,
                       gpu=True,
                       dla=True,
                       profile=None,
                       power=False,
                       generate_conf_files_only=False,
                       compliance=False):
    """Run harness for given benchmark and scenario."""

    benchmark_name = config["benchmark"]

    logging.info("Running harness for {:} benchmark in {:} scenario...".format(
        benchmark_name, config["scenario"]))

    arglist = common_args.getScenarioBasedHarnessArgs(config["scenario"],
                                                      benchmark_name)

    config = apply_overrides(config, arglist)

    # Validate arguments
    if not dla:
        config["dla_batch_size"] = None
    if not gpu:
        config["gpu_batch_size"] = None

    # If we only want to generate conf_files, then set flag to true
    if generate_conf_files_only:
        config["generate_conf_files_only"] = True
        profile = None
        power = False

    # MLPINF-829: Disable CUDA graphs when there is a profiler
    if profile is not None:
        logging.warn(
            "Due to MLPINF-829, CUDA graphs results in a CUDA illegal memory access when run with a profiler \
                on r460 driver. Force-disabling CUDA graphs.")
        config["use_graphs"] = False

    harness, config = get_harness(config, profile)

    if power:
        try:
            from code.internal.power_measurements import PowerMeasurements
            power_logfile_name = "{}_{}_{}_{}".format(
                config.get("config_name"), config.get("accuracy_level"),
                config.get("optimization_level"),
                config.get("inference_server"))
            power_measurements = PowerMeasurements("{}/{}/{}".format(
                os.getcwd(), "power_measurements", power_logfile_name))
            power_measurements.start()
        except BaseException:
            power_measurements = None

    for key, value in config.items():
        print("{} : {}".format(key, value))
    result = ""

    if compliance:
        # AP: We need to keep the compliance logs separated from accuracy and perf
        # otherwise it messes up the update_results process
        config['log_dir'] = os.path.join('build/compliance_logs',
                                         config['audit_test_name'])
        logging.info(
            'AUDIT HARNESS: Overriding log_dir for compliance run. Set to ' +
            config['log_dir'])

    # Launch the harness
    passed = True
    try:
        result = harness.run_harness()
        logging.info("Result: {:}".format(result))
    except Exception as _:
        traceback.print_exc(file=sys.stdout)
        passed = False
    finally:
        if power and power_measurements is not None:
            power_measurements.stop()
    if not passed:
        raise RuntimeError("Run harness failed!")

    if generate_conf_files_only and result == "Generated conf files":
        return

    # Append result to perf result summary log.
    log_dir = config["log_dir"]
    summary_file = os.path.join(log_dir, "perf_harness_summary.json")
    results = {}
    if os.path.exists(summary_file):
        with open(summary_file) as f:
            results = json.load(f)

    config_name = "{:}-{:}-{:}".format(harness.get_system_name(),
                                       config["config_ver"],
                                       config["scenario"])
    if config_name not in results:
        results[config_name] = {}
    results[config_name][benchmark_name] = result

    with open(summary_file, "w") as f:
        json.dump(results, f)

    # Check accuracy from loadgen logs.
    if not compliance:
        # TEST01 fails the accuracy test because it produces fewer predictions than expected
        accuracy = check_accuracy(
            os.path.join(harness.get_full_log_dir(),
                         "mlperf_log_accuracy.json"), config)
        summary_file = os.path.join(log_dir, "accuracy_summary.json")
        results = {}
        if os.path.exists(summary_file):
            with open(summary_file) as f:
                results = json.load(f)

        if config_name not in results:
            results[config_name] = {}
        results[config_name][benchmark_name] = accuracy

        with open(summary_file, "w") as f:
            json.dump(results, f)
コード例 #6
0
def main(main_args, system):
    """
    Args:
        main_args: Args parsed from user input.
        system: System to use
    """
    system_id = system.get_id()

    # Turn off MPS in case it's turned on.
    turn_off_mps()

    # Get user's benchmarks, else run all.
    benchmarks = BENCHMARKS.ALL
    if main_args["benchmarks"] is not None:
        benchmarks = main_args["benchmarks"].split(",")
        benchmarks = [BENCHMARKS.alias(b) for b in benchmarks]

    # Get user's scenarios, else use all.
    scenarios = SCENARIOS.ALL
    if main_args["scenarios"] is not None:
        scenarios = main_args["scenarios"].split(",")
        scenarios = [SCENARIOS.alias(s) for s in scenarios]

    profile = main_args.get("profile", None)
    power = main_args.get("power", False)

    # Automatically find config file paths
    config_files = main_args["configs"]
    if config_files == "" or config_files is None:
        config_files = find_config_files(benchmarks, scenarios)
        if config_files == "":
            logging.warn(
                "Cannot find any valid configs for the specified benchmark-scenario pairs."
            )
            return

    logging.info("Using config files: {:}".format(str(config_files)))
    configs = load_configs(config_files)

    for config in configs:
        base_benchmark_conf = get_system_benchmark_config(config, system_id)
        if base_benchmark_conf is None:
            continue

        base_benchmark_conf["config_name"] = "{:}_{:}_{:}".format(
            system_id, base_benchmark_conf["benchmark"],
            base_benchmark_conf["scenario"])
        logging.info("Processing config \"{:}\"".format(
            base_benchmark_conf["config_name"]))

        # Load config_ver / apply overrides
        conf_vers = main_args.get("config_ver", "default").split(",")

        # Build default first. This is because some config_vers only modify harness args, and the engine is the same as
        # default. In this case, we build default first, and copy it instead of rebuilding it.
        if "default" in conf_vers:
            conf_vers = ["default"] + list(set(conf_vers) - {"default"})
        elif "all" in conf_vers:
            tmp = ["default"] + list(
                base_benchmark_conf.get("config_ver", {}).keys())
            # As per request, 'all' should skip 'maxQ' config_vers for now. MaxQ should only be run when specified
            # directly.
            conf_vers = []
            for s in tmp:
                if "maxq" not in s.lower() and "hetero" not in s.lower():
                    conf_vers.append(s)

        for conf_ver in conf_vers:
            benchmark_conf = dict(
                base_benchmark_conf)  # Copy the config so we don't modify it

            # These fields are canonical names that refer to certain config versions
            benchmark_conf["accuracy_level"] = "99%"
            benchmark_conf["optimization_level"] = "plugin-enabled"
            benchmark_conf["inference_server"] = "lwis"

            equiv_to_default = False

            if conf_ver != "default":
                if "config_ver" not in benchmark_conf or conf_ver not in benchmark_conf[
                        "config_ver"]:
                    logging.warn(
                        "--config_ver={:} does not exist in config file '{:}'".
                        format(conf_ver, benchmark_conf["config_name"]))
                    continue
                else:
                    if "high_accuracy" in conf_ver:
                        benchmark_conf["accuracy_level"] = "99.9%"
                    if "ootb" in conf_ver:
                        benchmark_conf["optimization_level"] = "ootb"
                    # "inference_server" is set when we run the harness

                    overrides = benchmark_conf["config_ver"][conf_ver]

                    # Enforce Triton check
                    if "triton" in conf_ver.lower() and not overrides.get(
                            "use_triton", False):
                        raise RuntimeError(
                            "conf_ver={} references Triton harness, but 'use_triton' is false"
                            .format(conf_ver))

                    # Check if this config_ver is equivalent to the default engine
                    # RNNT has multiple engines, so disable the equiv_to_default.
                    if benchmark_conf["benchmark"] != BENCHMARKS.RNNT:
                        gen_eng_argset = set(common_args.GENERATE_ENGINE_ARGS)
                        override_argset = set(overrides.keys())
                        equiv_to_default = (len(gen_eng_argset
                                                & override_argset) == 0)

                    benchmark_conf.update(overrides)

            # Update the config_ver key to be the actual string name, not the overrides
            benchmark_conf["config_ver"] = conf_ver

            need_gpu = not main_args["no_gpu"]
            need_dla = not main_args["gpu_only"]

            # Override the system_name if it exists
            if "system_name" in main_args:
                benchmark_conf["system_name"] = main_args["system_name"]

            # Check for use_cpu
            if system_id.startswith("Triton_CPU"):
                benchmark_conf["use_cpu"] = True

            # Generate engines.
            if main_args["action"] == "generate_engines":
                # Turn on MPS if server scenario and if active_sms is specified.
                benchmark_conf = apply_overrides(benchmark_conf,
                                                 ["active_sms"])
                active_sms = benchmark_conf.get("active_sms", None)

                copy_from_default = ("default"
                                     in conf_vers) and equiv_to_default
                if copy_from_default:
                    logging.info(
                        "config_ver={:} only modifies harness args. Re-using default engine."
                        .format(conf_ver))

                _gen_args = [benchmark_conf]
                _gen_kwargs = {
                    "gpu": need_gpu,
                    "dla": need_dla,
                    "copy_from_default": copy_from_default
                }

                if not main_args["no_child_process"]:
                    if config[
                            "scenario"] == SCENARIOS.Server and active_sms is not None and active_sms < 100:
                        with ScopedMPS(active_sms):
                            launch_handle_generate_engine(
                                *_gen_args, **_gen_kwargs)
                    else:
                        launch_handle_generate_engine(*_gen_args,
                                                      **_gen_kwargs)
                else:
                    handle_generate_engine(*_gen_args, **_gen_kwargs)

            # Run CPU harness:
            elif main_args["action"] == "run_cpu_harness":
                auditing.cleanup()
                benchmark_conf["use_cpu"] = True
                handle_run_harness(benchmark_conf, False, False, None, power)
            # Run harness.
            elif main_args["action"] == "run_harness":
                # In case there's a leftover audit.config file from a prior compliance run or other reason
                # we need to delete it or we risk silent failure.
                auditing.cleanup()

                handle_run_harness(benchmark_conf, need_gpu, need_dla, profile,
                                   power)
            elif main_args["action"] == "run_audit_harness" or main_args[
                    "action"] == "run_cpu_audit_harness":
                logging.info('\n\n\nRunning compliance harness for test ' +
                             main_args['audit_test'] + '\n\n\n')

                # Find the correct audit.config file and move it in current directory
                dest_config = auditing.load(main_args['audit_test'],
                                            benchmark_conf['benchmark'])

                # Make sure the log_file override is valid
                os.makedirs("build/compliance_logs", exist_ok=True)

                # Pass audit test name to handle_run_harness via benchmark_conf
                benchmark_conf['audit_test_name'] = main_args['audit_test']

                if main_args["action"] == "run_cpu_audit_harness":
                    need_gpu = False
                    need_dla = False
                    profile = None
                    benchmark_conf["use_cpu"] = True

                # Run harness
                handle_run_harness(benchmark_conf,
                                   need_gpu,
                                   need_dla,
                                   profile,
                                   power,
                                   compliance=True)

                # Cleanup audit.config
                logging.info("AUDIT HARNESS: Cleaning Up audit.config...")
                auditing.cleanup()
            elif main_args["action"] == "run_audit_verification":
                logging.info("Running compliance verification for test " +
                             main_args['audit_test'])
                handle_audit_verification(
                    audit_test_name=main_args['audit_test'],
                    config=benchmark_conf)
                auditing.cleanup()
            elif main_args["action"] == "run_cpu_audit_verification":
                logging.info("Running compliance verification for test " +
                             main_args['audit_test'])
                benchmark_conf["use_cpu"] = True
                handle_audit_verification(
                    audit_test_name=main_args['audit_test'],
                    config=benchmark_conf)
                auditing.cleanup()
            elif main_args["action"] == "calibrate":
                # To generate calibration cache, we only need to run each benchmark once.
                # Use offline config.
                if benchmark_conf["scenario"] == SCENARIOS.Offline:
                    handle_calibrate(benchmark_conf)
            elif main_args["action"] == "generate_conf_files":
                handle_run_harness(benchmark_conf,
                                   need_gpu,
                                   need_dla,
                                   generate_conf_files_only=True)
コード例 #7
0
def run_SSDMobileNet_accuracy(engine_file,
                              batch_size,
                              num_images,
                              verbose=False,
                              output_file="build/out/SSDMobileNet/dump.json"):
    logging.info(
        "Running SSDMobileNet functionality test for engine [ {:} ] with batch size {:}"
        .format(engine_file, batch_size))

    runner = EngineRunner(engine_file, verbose=verbose)
    input_dtype, input_format = get_input_format(runner.engine)
    if input_dtype == trt.DataType.FLOAT:
        format_string = "fp32"
    elif input_dtype == trt.DataType.INT8:
        if input_format == trt.TensorFormat.LINEAR:
            format_string = "int8_linear"
        elif input_format == trt.TensorFormat.CHW4:
            format_string = "int8_chw4"
    image_dir = os.path.join(
        os.getenv("PREPROCESSED_DATA_DIR", "build/preprocessed_data"),
        "coco/val2017/SSDMobileNet", format_string)
    annotations_path = os.path.join(
        os.getenv("PREPROCESSED_DATA_DIR", "build/preprocessed_data"),
        "coco/annotations/instances_val2017.json")
    val_map = "data_maps/coco/val_map.txt"

    if len(glob(image_dir)) == 0:
        logging.warn("Cannot find data directory in ({:})".format(image_dir))
        pytest.skip("Cannot find data directory ({:})".format(image_dir))

    coco = COCO(annotation_file=annotations_path)

    coco_detections = []
    image_ids = coco.getImgIds()
    num_images = min(num_images, len(image_ids))

    logging.info(
        "Running validation on {:} images. Please wait...".format(num_images))
    batch_idx = 0
    for image_idx in range(0, num_images, batch_size):
        batch_image_ids = image_ids[image_idx:image_idx + batch_size]
        actual_batch_size = len(batch_image_ids)
        batch_images = np.ascontiguousarray(
            np.stack([
                np.load(
                    os.path.join(image_dir,
                                 coco.imgs[id]["file_name"] + ".npy"))
                for id in batch_image_ids
            ]))

        start_time = time.time()
        [outputs] = runner([batch_images], actual_batch_size)
        if verbose:
            logging.info("Batch {:d} >> Inference time:  {:f}".format(
                batch_idx,
                time.time() - start_time))

        batch_detections = outputs.reshape(batch_size,
                                           100 * 7 + 1)[:actual_batch_size]

        for detections, image_id in zip(batch_detections, batch_image_ids):
            keep_count = detections[100 * 7].view('int32')
            image_width = coco.imgs[image_id]["width"]
            image_height = coco.imgs[image_id]["height"]
            for detection in detections[:keep_count * 7].reshape(
                    keep_count, 7):
                score = float(detection[PredictionLayout.CONFIDENCE])
                bbox_coco_fmt = [
                    detection[PredictionLayout.XMIN] * image_width,
                    detection[PredictionLayout.YMIN] * image_height,
                    (detection[PredictionLayout.XMAX] -
                     detection[PredictionLayout.XMIN]) * image_width,
                    (detection[PredictionLayout.YMAX] -
                     detection[PredictionLayout.YMIN]) * image_height,
                ]

                coco_detection = {
                    "image_id": image_id,
                    "category_id": int(detection[PredictionLayout.LABEL]),
                    "bbox": bbox_coco_fmt,
                    "score": score,
                }
                coco_detections.append(coco_detection)

        batch_idx += 1

    output_dir = os.path.dirname(output_file)
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    with open(output_file, "w") as f:
        json.dump(coco_detections, f)

    cocoDt = coco.loadRes(output_file)
    eval = COCOeval(coco, cocoDt, 'bbox')
    eval.params.imgIds = image_ids[:num_images]

    eval.evaluate()
    eval.accumulate()
    eval.summarize()

    map_score = eval.stats[0]

    logging.info("Get mAP score = {:f} Target = {:f}".format(
        map_score, 0.22386))
    return map_score
コード例 #8
0
def traverse_config(config_name,
                    full,
                    target,
                    seen,
                    enforce_type_equivalence=True):
    if target in seen:
        raise RuntimeError(
            "Error in config '{}': cyclical dependency on {}".format(
                config_name, target))

    target_conf = full.get(target, None)
    if target_conf is None:
        logging.warn("Could not find configuration for {} in {}".format(
            target, config_name))
        return None

    # Do not overwrite existing
    target_conf = copy.deepcopy(target_conf)

    # The 2 keys that define inheritance dependencies are "extends" and "scales"
    extends = []
    if "extends" in target_conf:
        extends = target_conf["extends"]
        del target_conf["extends"]

    scales = dict()
    if "scales" in target_conf:
        scales = target_conf["scales"]
        del target_conf["scales"]

    # extends and scales cannot share the common elements
    common_keys = set(extends).intersection(set(scales.keys()))
    if len(common_keys) > 0:
        raise RuntimeError("{}:{} cannot both extend and scale {}".format(
            config_name, target,
            list(common_keys)[0]))

    conf = dict()

    # Apply extended configs
    for platform in extends:
        parent = traverse_config(
            config_name,
            full,
            platform,
            seen + [target],
            enforce_type_equivalence=enforce_type_equivalence)
        update_nested(conf,
                      parent,
                      enforce_type_equivalence=enforce_type_equivalence)

    for platform in scales:
        parent = traverse_config(
            config_name,
            full,
            platform,
            seen + [target],
            enforce_type_equivalence=enforce_type_equivalence)
        for key in scales[platform]:
            if key not in parent:
                raise RuntimeError(
                    "{}:{} scales {}:{} which does not exist".format(
                        config_name, target, platform, key))
            parent[key] *= scales[platform][key]
            if "config_ver" in parent:
                for config_ver in parent["config_ver"]:
                    if key in parent["config_ver"][config_ver]:
                        parent["config_ver"][config_ver][key] *= scales[
                            platform][key]
        update_nested(conf,
                      parent,
                      enforce_type_equivalence=enforce_type_equivalence)

    update_nested(conf,
                  target_conf,
                  enforce_type_equivalence=enforce_type_equivalence)
    return conf