Beispiel #1
0
    def run(self, args):
        G_LOGGER.start("Starting iterations")

        builder, network, parser = util.unpack_args(
            self.arg_groups[TrtNetworkLoaderArgs].load_network(), 3)

        with contextlib.ExitStack() as stack:
            stack.enter_context(builder)
            stack.enter_context(network)
            if parser:
                stack.enter_context(parser)

            self.setup(args, network)

            num_passed = 0
            num_total = 0

            success = True
            MAX_COUNT = 100000  # We don't want to loop forever. This many iterations ought to be enough for anybody.
            for iteration in range(MAX_COUNT):
                G_LOGGER.start("RUNNING | Iteration {:}".format(iteration + 1))

                self.process_network(network, success)

                # Don't need to keep the engine around in memory - just serialize to disk and free it.
                with self.arg_groups[TrtEngineLoaderArgs].build_engine(
                    (builder, network)) as engine:
                    self.arg_groups[TrtEngineSaveArgs].save_engine(
                        engine,
                        self.arg_groups[ArtifactSorterArgs].iter_artifact)

                success = self.arg_groups[ArtifactSorterArgs].sort_artifacts(
                    iteration + 1)

                num_total += 1
                if success:
                    num_passed += 1

                if self.stop(iteration, success):
                    break
            else:
                G_LOGGER.warning(
                    "Maximum number of iterations reached: {:}.\n"
                    "Iteration has been halted to prevent an infinite loop!".
                    format(MAX_COUNT))

        G_LOGGER.finish(
            "Finished {:} iteration(s) | Passed: {:}/{:} | Pass Rate: {:}%".
            format(iteration + 1, num_passed, num_total,
                   float(num_passed) * 100 / float(num_total)))
Beispiel #2
0
    def run(self, args):
        try:
            until = int(args.until) - 1
        except:
            until = args.until
            if until not in ["good", "bad"]:
                G_LOGGER.critical(
                    "--until value must be an integer, 'good', or 'bad', but was: {:}"
                    .format(args.until))

        def stop(index, success):
            if until == "good":
                return success
            elif until == "bad":
                return not success

            return index >= until

        G_LOGGER.start("Starting iterations")

        num_passed = 0
        num_total = 0

        success = True
        MAX_COUNT = 100000  # We don't want to loop forever. This many iterations ought to be enough for anybody.
        for iteration in range(MAX_COUNT):
            G_LOGGER.start("RUNNING | Iteration {:}".format(iteration + 1))

            success = self.arg_groups[ArtifactSorterArgs].sort_artifacts(
                iteration + 1)

            num_total += 1
            if success:
                num_passed += 1

            if stop(iteration, success):
                break
        else:
            G_LOGGER.warning(
                "Maximum number of iterations reached: {:}.\n"
                "Iteration has been halted to prevent an infinite loop!".
                format(MAX_COUNT))

        G_LOGGER.finish(
            "Finished {:} iteration(s) | Passed: {:}/{:} | Pass Rate: {:}%".
            format(iteration + 1, num_passed, num_total,
                   float(num_passed) * 100 / float(num_total)))
Beispiel #3
0
        def execute_runner(runner, loader_cache):
            with runner as active_runner:
                input_metadata = active_runner.get_input_metadata()
                G_LOGGER.info("{:35}\n---- Model Input(s) ----\n{:}".format(active_runner.name, input_metadata),
                              mode=LogMode.ONCE)

                # DataLoaderCache will ensure that the feed_dict does not contain any extra entries
                # based on the provided input_metadata.
                loader_cache.set_input_metadata(input_metadata)

                if warm_up:
                    G_LOGGER.start("{:35} | Running {:} warm-up run(s)".format(active_runner.name, warm_up))
                    try:
                        feed_dict = loader_cache[0]
                    except IndexError:
                        G_LOGGER.warning("{:} warm-up run(s) were requested, but data loader did not supply any data. "
                                         "Skipping warm-up run(s)".format(warm_up))
                    else:
                        G_LOGGER.ultra_verbose("Warm-up Input Buffers:\n{:}".format(util.indent_block(feed_dict)))
                        # First do a few warm-up runs, and don't time them.
                        for _ in range(warm_up):
                            active_runner.infer(feed_dict=feed_dict)
                    G_LOGGER.finish("{:35} | Finished {:} warm-up run(s)".format(active_runner.name, warm_up))

                # Then, actual iterations.
                index = 0
                iteration_results = []

                total_runtime = 0
                for index, feed_dict in enumerate(loader_cache):
                    G_LOGGER.extra_verbose(lambda: "{:35} | Feeding inputs:\n{:}".format(active_runner.name, util.indent_block(feed_dict)))
                    outputs = active_runner.infer(feed_dict=feed_dict)

                    runtime = active_runner.last_inference_time()
                    total_runtime += runtime
                    # Without a deep copy here, outputs will always reference the output of the last run
                    iteration_results.append(IterationResult(outputs=copy.deepcopy(outputs), runtime=runtime, runner_name=active_runner.name))

                    G_LOGGER.info(lambda: "{:35}\n---- Model Output(s) ----\n{:}".format(
                                            active_runner.name, TensorMetadata().from_feed_dict(outputs)),
                                  mode=LogMode.ONCE)
                    G_LOGGER.extra_verbose(lambda: "{:35} | Inference Time: {:.3f} ms | Received outputs:\n{:}".format(
                                                        active_runner.name, runtime * 1000.0, util.indent_block(outputs)))

                total_runtime_ms = total_runtime * 1000.0
                G_LOGGER.finish("{:35} | Completed {:} iteration(s) in {:.4g} ms | Average inference time: {:.4g} ms.".format(active_runner.name, index + 1, total_runtime_ms, total_runtime_ms / float(index + 1)))
                return iteration_results
Beispiel #4
0
        def validate_output(runner_name, output_name, output):
            G_LOGGER.start("{:35} | Validating output: {:} (check_inf={:}, check_nan={:})".format(
                runner_name, output_name, check_inf, check_nan))
            with G_LOGGER.indent():
                comp_util.log_output_stats(output)

                output_valid = True
                if check_nan:
                    output_valid &= is_not_nan(output)
                if check_inf:
                    output_valid &= is_finite(output)

                if output_valid:
                    G_LOGGER.finish("PASSED | Output: {:} is valid".format(output_name))
                else:
                    G_LOGGER.error("FAILED | Errors detected in output: {:}".format(output_name))
                return output_valid
Beispiel #5
0
        def compare_output(iter_result0, iter_result1):
            """
            Compare the outputs of two runners from a single iteration.

            This function will always iterate over the output names of the first IterationResult,
                and attempt to find corresponding output names in the second.
            If no corresponding output name is found, the output is skipped.
            If all output names are skipped, then this function raises an error.

            Args:
                iter_result0 (IterationResult): The result of the first runner.
                iter_result1 (IterationResult): The result of the second runner.

            Returns:
                OrderedDict[str, OutputCompareResult]:
                        The name of the outputs compared, derived from the first IterationResult,
                        and whether they matched. If an output name is not found, it is omitted from this dictionary.

            Raises:
                PolygraphyException: If all output names are skipped, and thus no outputs are compared.
            """
            def check_dict(dct, dict_name):
                if isinstance(dct, dict):
                    util.check_dict_contains(dct, set(iter_result0.keys()) | set(iter_result1.keys()) | set([""]),
                                             check_missing=False, dict_name=dict_name)


            check_dict(rtol, "the rtol dictionary")
            check_dict(atol, "the atol dictionary")
            check_dict(check_error_stat, "the chcek_error_stat dictionary")


            # Returns whether the outputs match
            def check_outputs_match(out0, out0_name, out1, out1_name, per_out_rtol, per_out_atol, per_out_err_stat):
                VALID_CHECK_ERROR_STATS = ["max", "mean", "median", "elemwise"]
                if per_out_err_stat not in VALID_CHECK_ERROR_STATS:
                    G_LOGGER.critical("Invalid choice for check_error_stat: {:}.\n"
                                      "Note: Valid choices are: {:}".format(per_out_err_stat, VALID_CHECK_ERROR_STATS))

                G_LOGGER.super_verbose("{:35} | Output: {:} (dtype={:}, shape={:}):\n{:}".format(
                                            iter_result0.runner_name, out0_name, out0.dtype, out0.shape, util.indent_block(out0)))
                G_LOGGER.super_verbose("{:35} | Output: {:} (dtype={:}, shape={:}):\n{:}".format(
                                            iter_result1.runner_name, out1_name, out1.dtype, out1.shape, util.indent_block(out1)))

                # Check difference vs. tolerances
                if np.issubdtype(out0.dtype, np.bool_) and np.issubdtype(out1.dtype, np.bool_):
                    absdiff = np.logical_xor(out0, out1)
                else:
                    absdiff = np.abs(out0 - out1)

                absout1 = np.abs(out1)
                with np.testing.suppress_warnings() as sup:
                    sup.filter(RuntimeWarning)
                    reldiff = absdiff / absout1

                max_absdiff = comp_util.compute_max(absdiff)
                mean_absdiff = comp_util.compute_mean(absdiff)
                median_absdiff = comp_util.compute_median(absdiff)
                max_reldiff = comp_util.compute_max(reldiff)
                mean_reldiff = comp_util.compute_mean(reldiff)
                median_reldiff = comp_util.compute_median(reldiff)

                max_elemwiseabs = "Unknown"
                max_elemwiserel = "Unknown"

                if per_out_err_stat == "mean":
                    failed = mean_absdiff > per_out_atol and (np.isnan(mean_reldiff) or mean_reldiff > per_out_rtol)
                elif per_out_err_stat == "median":
                    failed = median_absdiff > per_out_atol and (np.isnan(median_reldiff) or median_reldiff > per_out_rtol)
                elif per_out_err_stat == "max":
                    failed = max_absdiff > per_out_atol and (np.isnan(max_reldiff) or max_reldiff > per_out_rtol)
                else:
                    assert per_out_err_stat == "elemwise", "This branch should be unreachable unless per_out_err_stat is 'elemwise'"
                    mismatches = (absdiff > per_out_atol) & (reldiff > per_out_rtol)

                    failed = np.any(mismatches)
                    try:
                        # Special because we need to account for tolerances too.
                        max_elemwiseabs = comp_util.compute_max(absdiff[mismatches])
                        max_elemwiserel = comp_util.compute_max(reldiff[mismatches])

                        with G_LOGGER.indent():
                            G_LOGGER.super_verbose("Mismatched indices:\n{:}".format(np.argwhere(mismatches)))
                            G_LOGGER.extra_verbose("{:35} | Mismatched values:\n{:}".format(iter_result0.runner_name, out0[mismatches]))
                            G_LOGGER.extra_verbose("{:35} | Mismatched values:\n{:}".format(iter_result1.runner_name, out1[mismatches]))
                    except Exception as err:
                        G_LOGGER.warning("Failing to log mismatches.\nNote: Error was: {:}".format(err))

                # Log information about the outputs
                hist_bin_range = (min(comp_util.compute_min(out0), comp_util.compute_min(out1)),
                                  max(comp_util.compute_max(out0), comp_util.compute_max(out1)))
                comp_util.log_output_stats(out0, failed, iter_result0.runner_name + ": " + out0_name, hist_range=hist_bin_range)
                comp_util.log_output_stats(out1, failed, iter_result1.runner_name + ": " + out1_name, hist_range=hist_bin_range)

                G_LOGGER.info("Error Metrics: {:}".format(out0_name))
                with G_LOGGER.indent():
                    def req_tol(mean_diff, median_diff, max_diff, elemwise_diff):
                        return {
                            "mean": mean_diff,
                            "median": median_diff,
                            "max": max_diff,
                            "elemwise": elemwise_diff,
                        }[per_out_err_stat]

                    G_LOGGER.info("Minimum Required Tolerance: {:} error | [abs={:.5g}] OR [rel={:.5g}]".format(
                                    per_out_err_stat,
                                    req_tol(mean_absdiff, median_absdiff, max_absdiff, max_elemwiseabs),
                                    req_tol(mean_reldiff, median_reldiff, max_reldiff, max_elemwiserel)))
                    comp_util.log_output_stats(absdiff, failed, "Absolute Difference")
                    comp_util.log_output_stats(reldiff, failed, "Relative Difference")

                # Finally show summary.
                if failed:
                    G_LOGGER.error("FAILED | Difference exceeds tolerance (rel={:}, abs={:})".format(per_out_rtol, per_out_atol))
                else:
                    G_LOGGER.finish("PASSED | Difference is within tolerance (rel={:}, abs={:})".format(per_out_rtol, per_out_atol))

                G_LOGGER.extra_verbose("Finished comparing: '{:}' (dtype={:}, shape={:}) [{:}] and '{:}' (dtype={:}, shape={:}) [{:}]"
                                .format(out0_name, out0.dtype, out0.shape, iter_result0.runner_name, out1_name, out1.dtype, out1.shape, iter_result1.runner_name))
                return OutputCompareResult(not failed, max_absdiff, max_reldiff, mean_absdiff, mean_reldiff, median_absdiff, median_reldiff)
                #
                # End: def check_outputs_match
                #

            output_status = OrderedDict() # OrderedDict[str, bool] Maps output names to whether they matched.

            if not check_shapes:
                G_LOGGER.info("Strict shape checking disabled. Will attempt to match output shapes before comparisons")


            def default_find_output_func(output_name, index, iter_result):
                found_name = util.find_in_dict(output_name, iter_result, index)
                if found_name is None:
                    return None
                elif found_name != output_name:
                    exact_match = util.find_in_dict(found_name, iter_result0)
                    if exact_match == found_name:
                        G_LOGGER.verbose("Will not compare {:} with {:}, since the former already has an exact match: {:}".format(
                                            found_name, output_name, exact_match))
                        return None # If the found output is being compared against another output already, skip this non-exact match
                    G_LOGGER.warning("Output names did not match exactly. Assuming {:} output: {:} "
                                    "corresponds to output: {:}".format(
                                        iter_result.runner_name, found_name, output_name))
                return [found_name]


            nonlocal find_output_func
            find_output_func = util.default(find_output_func, default_find_output_func)

            for index, (out0_name, output0) in enumerate(iter_result0.items()):
                out1_names = util.default(find_output_func(out0_name, index, iter_result1), [])

                if len(out1_names) > 1:
                    G_LOGGER.info("Will attempt to compare output: '{:}' [{:}] with multiple outputs: '{:}' [{:}]".format(
                                    out0_name, iter_result0.runner_name, list(out1_names), iter_result1.runner_name))

                for out1_name in out1_names:
                    if out1_name is None or out1_name not in iter_result1:
                        G_LOGGER.warning("For output: '{:}' [{:}], skipping corresponding output: '{:}' [{:}], "
                                         "since the output was not found".format(out0_name, iter_result0.runner_name,
                                                                                 out1_name, iter_result1.runner_name))
                        continue


                    def get_tol(tol_dict, default):
                        if isinstance(tol_dict, numbers.Number):
                            return tol_dict

                        if out0_name in tol_dict:
                            return tol_dict[out0_name]
                        elif "" in tol_dict:
                            return tol_dict[""]
                        return default


                    def get_error_stat():
                        if isinstance(check_error_stat, str):
                            return check_error_stat

                        if out0_name in check_error_stat:
                            return check_error_stat[out0_name]
                        elif "" in check_error_stat:
                            return  check_error_stat[""]
                        return default_error_stat


                    per_out_atol = get_tol(atol, default_atol)
                    per_out_rtol = get_tol(rtol, default_rtol)
                    per_out_err_stat = get_error_stat()

                    output1 = iter_result1[out1_name]
                    G_LOGGER.start("Comparing Output: '{:}' (dtype={:}, shape={:}) with '{:}' (dtype={:}, shape={:}) | "
                                   "Tolerance: [abs={:.5g}, rel={:.5g}] | Checking {:} error".format(
                                        out0_name, output0.dtype, output0.shape,
                                        out1_name, output1.dtype, output1.shape,
                                        per_out_atol, per_out_rtol, per_out_err_stat))
                    G_LOGGER.extra_verbose("Note: Comparing {:} vs. {:}".format(iter_result0.runner_name, iter_result1.runner_name))


                    with G_LOGGER.indent():
                        if check_shapes and output0.shape != output1.shape:
                            G_LOGGER.error("Will not compare outputs of different shapes. Note: Output shapes are "
                                           "{:} and {:}.".format(output0.shape, output1.shape))
                            G_LOGGER.error("Note: Use --no-strict-shape-checking or set check_shapes=False to "
                                           "attempt to compare values anyway.", mode=LogMode.ONCE)
                            outputs_match = False
                        else:
                            output1 = util.try_match_shape(output1, output0.shape)
                            output0 = output0.reshape(output1.shape)
                            outputs_match = check_outputs_match(output0, out0_name, output1, out1_name,
                                                                per_out_rtol=per_out_rtol, per_out_atol=per_out_atol,
                                                                per_out_err_stat=per_out_err_stat)

                        output_status[out0_name] = outputs_match
                        if fail_fast and not outputs_match:
                            return output_status


            mismatched_output_names = [name for name, matched in output_status.items() if not matched]
            if mismatched_output_names:
                G_LOGGER.error("FAILED | Mismatched outputs: {:}".format(mismatched_output_names))
            else:
                G_LOGGER.finish("PASSED | All outputs matched | Outputs: {:}".format(list(output_status.keys())))

            # This is useful for catching cases were Polygraphy does something wrong with the runner output buffers
            if not output_status and (bool(iter_result0.keys()) or bool(iter_result1.keys())):
                r0_name = iter_result0.runner_name
                r0_outs = list(iter_result0.keys())
                r1_name = iter_result1.runner_name
                r1_outs = list(iter_result1.keys())
                G_LOGGER.critical("All outputs were skipped, no common outputs found! Note:\n{:} outputs: "
                                  "{:}\n{:} outputs: {:}".format(r0_name, r0_outs, r1_name, r1_outs))

            return output_status
Beispiel #6
0
    def run(self, args):
        G_LOGGER.start("Starting iterations")

        builder, network, parser = util.unpack_args(
            self.arg_groups[TrtNetworkLoaderArgs].load_network(), 3)

        with contextlib.ExitStack() as stack:
            stack.enter_context(builder)
            stack.enter_context(network)
            if parser:
                stack.enter_context(parser)

            self.setup(args, network)

            num_passed = 0
            num_total = 0

            success = True
            MAX_COUNT = 100000  # We don't want to loop forever. This many iterations ought to be enough for anybody.
            for iteration in range(MAX_COUNT):
                remaining = self.remaining()
                G_LOGGER.start("RUNNING | Iteration {:}{:}".format(
                    iteration + 1,
                    " | Approximately {:} iteration(s) remaining".format(
                        remaining) if remaining is not None else "",
                ))

                self.process_network(network, success)

                try:
                    engine = self.arg_groups[TrtEngineLoaderArgs].build_engine(
                        (builder, network))
                except Exception as err:
                    G_LOGGER.warning(
                        "Failed to create network or engine, continuing to the next iteration.\n"
                        "Note: Error was: {:}".format(err))
                    G_LOGGER.internal_error(
                        "Failed to create network or engine. See warning above for details."
                    )
                    success = False
                else:
                    # Don't need to keep the engine around in memory - just serialize to disk and free it.
                    with engine:
                        self.arg_groups[TrtEngineSaveArgs].save_engine(
                            engine,
                            self.arg_groups[ArtifactSorterArgs].iter_artifact)
                    success = self.arg_groups[
                        ArtifactSorterArgs].sort_artifacts(iteration + 1)

                num_total += 1
                if success:
                    num_passed += 1

                if self.stop(iteration, success):
                    break
            else:
                G_LOGGER.warning(
                    "Maximum number of iterations reached: {:}.\n"
                    "Iteration has been halted to prevent an infinite loop!".
                    format(MAX_COUNT))

        G_LOGGER.finish(
            "Finished {:} iteration(s) | Passed: {:}/{:} | Pass Rate: {:}%".
            format(iteration + 1, num_passed, num_total,
                   float(num_passed) * 100 / float(num_total)))
Beispiel #7
0
    def validate(run_results, check_inf=None, check_nan=None, fail_fast=None):
        """
        Checks output validity.

        Args:
            run_results (Dict[str, List[IterationResult]]): The result of Comparator.run().
            check_inf (bool): Whether to fail on Infs. Defaults to False.
            check_nan (bool): Whether to fail on NaNs. Defaults to True.
            fail_fast (bool): Whether to fail after the first invalid value. Defaults to False.

        Returns:
            bool: True if all outputs were valid, False otherwise.
        """
        check_inf = util.default(check_inf, False)
        check_nan = util.default(check_nan, True)
        fail_fast = util.default(fail_fast, False)

        def is_finite(output):
            non_finite = np.logical_not(np.isfinite(output))
            if np.any(non_finite):
                G_LOGGER.error(
                    "Inf Detected | One or more non-finite values were encountered in this output"
                )
                G_LOGGER.info(
                    "Note: Use -vv or set logging verbosity to EXTRA_VERBOSE to display non-finite values",
                    mode=LogMode.ONCE,
                )
                G_LOGGER.extra_verbose(
                    "Note: non-finite values at:\n{:}".format(non_finite))
                G_LOGGER.extra_verbose("Note: non-finite values:\n{:}".format(
                    output[non_finite]))
                return False
            return True

        def is_not_nan(output):
            nans = np.isnan(output)
            if np.any(nans):
                G_LOGGER.error(
                    "NaN Detected | One or more NaNs were encountered in this output"
                )
                G_LOGGER.info(
                    "Note: Use -vv or set logging verbosity to EXTRA_VERBOSE to display locations of NaNs",
                    mode=LogMode.ONCE,
                )
                G_LOGGER.extra_verbose("Note: NaNs at:\n{:}".format(nans))
                return False
            return True

        def validate_output(runner_name, output_name, output):
            G_LOGGER.start(
                "{:35} | Validating output: {:} (check_inf={:}, check_nan={:})"
                .format(runner_name, output_name, check_inf, check_nan))
            with G_LOGGER.indent():
                comp_util.log_output_stats(output)

                output_valid = True
                if check_nan:
                    output_valid &= is_not_nan(output)
                if check_inf:
                    output_valid &= is_finite(output)

                if output_valid:
                    G_LOGGER.finish(
                        "PASSED | Output: {:} is valid".format(output_name))
                else:
                    G_LOGGER.error(
                        "FAILED | Errors detected in output: {:}".format(
                            output_name))
                return output_valid

        all_valid = True
        G_LOGGER.start("Output Validation | Runners: {:}".format(
            list(run_results.keys())))
        with G_LOGGER.indent():
            for runner_name, results in run_results:
                for result in results:
                    for output_name, output in result.items():
                        all_valid &= validate_output(runner_name, output_name,
                                                     output)
                        if fail_fast and not all_valid:
                            return False

            if all_valid:
                G_LOGGER.finish("PASSED | Output Validation")
            else:
                G_LOGGER.error("FAILED | Output Validation")

        return all_valid
Beispiel #8
0
    def compare_accuracy(run_results,
                         fail_fast=False,
                         comparisons=None,
                         compare_func=None):
        """
        Args:
            run_results (RunResults): The result of Comparator.run()


            fail_fast (bool): Whether to exit after the first failure
            comparisons (List[Tuple[int, int]]):
                    Comparisons to perform, specified by runner indexes. For example, [(0, 1), (1, 2)]
                    would compare the first runner with the second, and the second with the third.
                    By default, this compares each result to the subsequent one.
            compare_func (Callable(IterationResult, IterationResult) -> OrderedDict[str, bool]):
                    A function that takes in two IterationResults, and returns a dictionary that maps output
                    names to a boolean (or anything convertible to a boolean) indicating whether outputs matched.
                    The order of arguments to this function is guaranteed to be the same as the ordering of the
                    tuples contained in `comparisons`.

        Returns:
            AccuracyResult:
                    A summary of the results of the comparisons. The order of the keys (i.e. runner pairs) is
                    guaranteed to be the same as the order of `comparisons`. For more details, see the AccuracyResult
                    docstring (e.g. help(AccuracyResult)).
        """
        def find_mismatched(match_dict):
            return [
                name for name, matched in match_dict.items()
                if not bool(matched)
            ]

        compare_func = util.default(compare_func, CompareFunc.simple())
        comparisons = util.default(comparisons,
                                   Comparator.default_comparisons(run_results))

        accuracy_result = AccuracyResult()
        for runner0_index, runner1_index in comparisons:
            (runner0_name, results0), (
                runner1_name, results1
            ) = run_results[runner0_index], run_results[runner1_index]

            G_LOGGER.start("Accuracy Comparison | {:} vs. {:}".format(
                runner0_name, runner1_name))
            with G_LOGGER.indent():
                runner_pair = (runner0_name, runner1_name)
                accuracy_result[runner_pair] = []

                num_iters = min(len(results0), len(results1))
                for iteration, (result0,
                                result1) in enumerate(zip(results0, results1)):
                    if num_iters > 1:
                        G_LOGGER.info("Iteration: {:}".format(iteration))
                    with contextlib.ExitStack() as stack:
                        if num_iters > 1:
                            stack.enter_context(G_LOGGER.indent())
                        iteration_match_dict = compare_func(result0, result1)
                        accuracy_result[runner_pair].append(
                            iteration_match_dict)

                        mismatched_outputs = find_mismatched(
                            iteration_match_dict)
                        if fail_fast and mismatched_outputs:
                            return accuracy_result

                G_LOGGER.extra_verbose(
                    "Finished comparing {:} with {:}".format(
                        runner0_name,
                        runner1_name,
                    ))

                passed, _, total = accuracy_result.stats(runner_pair)
                pass_rate = accuracy_result.percentage(runner_pair) * 100.0
                if num_iters > 1 or len(comparisons) > 1:
                    msg = "Accuracy Summary | {:} vs. {:} | Passed: {:}/{:} iterations | Pass Rate: {:}%".format(
                        runner0_name, runner1_name, passed, total, pass_rate)
                    if passed == total:
                        G_LOGGER.finish(msg)
                    else:
                        G_LOGGER.error(msg)
        return accuracy_result
Beispiel #9
0
    def call_impl(self):
        """
        Returns:
            onnx.ModelProto: The new ONNX model with constants folded.
        """
        def run_const_fold_pass(model):
            graph = gs_from_onnx(model)
            del model

            try:
                graph.fold_constants(fold_shapes=self.fold_shapes,
                                     partitioning=self.partitioning)
            except TypeError as err:  # Using an old version of ONNX-GS
                if self.partitioning:
                    G_LOGGER.critical(
                        "This version of ONNX-GraphSurgeon may not support partitioning the graph.\n"
                        "Please upgrade to a newer version of ONNX-GraphSurgeon or disable partitioning.\n"
                        "Note: Error was:\n{:}".format(err))
                if self.fold_shapes:
                    G_LOGGER.critical(
                        "This version of ONNX-GraphSurgeon may not support folding shapes.\n"
                        "Please upgrade to a newer version of ONNX-GraphSurgeon or disable shape folding.\n"
                        "Note: Error was:\n{:}".format(err))

                graph.fold_constants()

            model = gs.export_onnx(graph.cleanup(), do_type_check=False)
            del graph

            if self.fold_shapes and self.do_shape_inference:
                model = infer_shapes(model)
            return model

        if not mod.has_mod(onnxrt):
            G_LOGGER.error(
                "ONNX-Runtime is not installed, so constant folding may be suboptimal or not work at all.\n"
                "Consider installing ONNX-Runtime: {:} -m pip install onnxruntime"
                .format(sys.executable))

        model = self.load()

        prefold_num_nodes = len(model.graph.node)
        postfold_num_nodes = -1
        index = 0

        while (prefold_num_nodes != postfold_num_nodes) and (
                self.num_passes is None or index < self.num_passes):
            prefold_num_nodes = onnx_util.get_num_nodes(model)

            G_LOGGER.start("Folding Constants | Pass {:}".format(index + 1))
            try:
                model = run_const_fold_pass(model)
            except Exception as err:
                if not self.error_ok:
                    raise
                G_LOGGER.warning(
                    "Constant folding pass failed. Skipping subsequent passes.\nNote: Error was:\n{:}"
                    .format(err))
                break
            else:
                postfold_num_nodes = onnx_util.get_num_nodes(model)
                index += 1

                G_LOGGER.finish(
                    "\tTotal Nodes | Original: {:5}, After Folding: {:5} | {:5} Nodes Folded"
                    .format(prefold_num_nodes, postfold_num_nodes,
                            prefold_num_nodes - postfold_num_nodes))

        return model
Beispiel #10
0
    def check_network(self, suffix):
        """
        Checks whether the provided network is accurate compared to golden values.

        Returns:
            OrderedDict[str, OutputCompareResult]:
                    A mapping of output names to an object describing whether they matched, and what the
                    required tolerances were.
        """
        from polygraphy.backend.trt import (EngineFromNetwork, ModifyNetwork,
                                            SaveEngine, TrtRunner)
        from polygraphy.comparator import Comparator, CompareFunc

        with G_LOGGER.verbosity(severity=G_LOGGER.severity if self.args.
                                show_output else G_LOGGER.CRITICAL):
            data_loader = self.makers[DataLoaderArgs].get_data_loader()

            self.makers[
                TrtLoaderArgs].strict_types = True  # HACK: Override strict types so things actually run in the right precision.
            config = func.invoke(
                self.makers[TrtLoaderArgs].get_trt_config_loader(data_loader),
                self.builder, self.network)

            suffix = "-{:}-{:}".format(suffix, self.precision)
            engine_path = misc.insert_suffix(
                self.makers[TrtRunnerArgs].save_engine, suffix)

            self.builder, self.network, self.parser = func.invoke(
                ModifyNetwork((self.builder, self.network, self.parser),
                              outputs=self.makers[TrtLoaderArgs].outputs))

            engine_loader = SaveEngine(EngineFromNetwork(
                (self.builder, self.network, self.parser), config),
                                       path=engine_path)

            runners = [TrtRunner(engine_loader)]

            results = Comparator.run(runners, data_loader=data_loader)
            if self.makers[ComparatorCompareArgs].validate:
                Comparator.validate(results)
            results.update(self.golden)

            compare_func = CompareFunc.basic_compare_func(
                atol=self.makers[ComparatorCompareArgs].atol,
                rtol=self.makers[ComparatorCompareArgs].rtol,
                check_shapes=not self.makers[ComparatorCompareArgs].
                no_shape_check)
            accuracy_result = Comparator.compare_accuracy(
                results, compare_func=compare_func)

        tolerances = list(accuracy_result.values())[0][
            0]  # First iteration of first runner pair
        for name, req_tol in tolerances.items():
            if bool(req_tol):
                G_LOGGER.finish(
                    "PASSED | Output: {:} | Required Tolerances: {:}".format(
                        name, req_tol))
            else:
                G_LOGGER.error(
                    "FAILED | Output: {:} | Required Tolerances: {:}".format(
                        name, req_tol))
        return accuracy_result
Beispiel #11
0
    def call_impl(self):
        """
        Returns:
            bytes: The serialized engine that was created.
        """
        # If network is a callable, then we own its return value
        ret, owns_network = util.invoke_if_callable(self._network)
        builder, network, parser = util.unpack_args(ret, num=3)

        if builder is None or network is None:
            G_LOGGER.critical(
                "Expected to recevie a (builder, network) tuple for the `network` parameter, "
                "but received: ({:}, {:})".format(builder, network))

        with contextlib.ExitStack() as stack:
            if owns_network:
                stack.enter_context(builder)
                stack.enter_context(network)
                if parser is not None:
                    stack.enter_context(parser)
            else:
                provided = "Builder and Network" if parser is None else "Builder, Network, and Parser"
                G_LOGGER.verbose(
                    "{:} were provided directly instead of via a Callable. This loader will not assume ownership. "
                    "Please ensure that they are freed.".format(provided))

            config, owns_config = util.invoke_if_callable(
                self._config, builder, network)
            if owns_config:
                stack.enter_context(config)
            else:
                G_LOGGER.verbose(
                    "Builder configuration was provided directly instead of via a Callable. This loader will not assume "
                    "ownership. Please ensure it is freed.")

            try:
                config.int8_calibrator.__enter__  # Polygraphy calibrator frees device buffers on exit.
            except AttributeError:
                pass
            else:
                stack.enter_context(config.int8_calibrator)

            network_log_mode = "full" if G_LOGGER.severity <= G_LOGGER.ULTRA_VERBOSE else "attrs"
            G_LOGGER.super_verbose(
                lambda: ("Displaying TensorRT Network:\n" + trt_util.
                         str_from_network(network, mode=network_log_mode)))

            G_LOGGER.start("Building engine with configuration:\n{:}".format(
                trt_util.str_from_config(config)))

            start_time = time.time()
            try:
                engine_bytes = builder.build_serialized_network(
                    network, config)
            except AttributeError:
                engine = builder.build_engine(network, config)
                if not engine:
                    G_LOGGER.critical(
                        "Invalid Engine. Please ensure the engine was built correctly"
                    )
                stack.enter_context(engine)
                engine_bytes = engine.serialize()
            end_time = time.time()

            if not engine_bytes:
                G_LOGGER.critical(
                    "Invalid Engine. Please ensure the engine was built correctly"
                )

            G_LOGGER.finish(
                "Finished engine building in {:.3f} seconds".format(
                    end_time - start_time))

            try:
                timing_cache = config.get_timing_cache()
            except AttributeError:
                if self.timing_cache_path:
                    trt_util.fail_unavailable(
                        "save_timing_cache in EngineBytesFromNetwork")
            else:
                if timing_cache and self.timing_cache_path:
                    with timing_cache.serialize() as buffer:
                        util.save_file(buffer,
                                       self.timing_cache_path,
                                       description="tactic timing cache")

            return engine_bytes
Beispiel #12
0
    def run(self, args):
        supported, nodelists, _ = supports_model(
            self.arg_groups[ModelArgs].model_file)
        if supported:
            G_LOGGER.info(
                "Graph is fully supported by TensorRT; Will not generate subgraphs."
            )
            return

        parent_graph = onnx_backend.gs_from_onnx(
            self.arg_groups[OnnxLoaderArgs].load_onnx())

        def partition(nodelists, offset):
            """
            Partitions a set of subgraphs into supported and unsupported subgraphs.

            Args:
                nodelists (List[Tuple[List[int], bool]]):
                        A list that maps node indices to a boolean indicating whether they
                        are supported by TensorRT.

            Returns:
                List[List[int]]:
                        A list of subgraphs supported by TensorRT, each described by a list of node indices.
            """
            supported_subgraphs = []
            for (node_indices, supported) in nodelists:
                if supported:
                    supported_subgraphs.append(
                        [index + offset for index in node_indices])
                    continue

                start = node_indices[0] + offset
                end = node_indices[-1] + offset + 1
                subgraph_path = save_subgraph(
                    self.arg_groups[OnnxSaveArgs],
                    parent_graph,
                    start,
                    end,
                    prefix="intermediate_",
                    use_tmp_file=True,
                )
                _, new_nodelists, _ = supports_model(subgraph_path)
                # Recursively partition each unsupported subgraph.
                supported_subgraphs += partition(new_nodelists, start)

            return supported_subgraphs

        supported_subgraphs = partition(nodelists, offset=0)
        unsupported_node_dict = UnsupportedNodeDict()

        def save_unsupported_graph(start, end):
            """
            Saves an unsupported subgraph, determines the error reason and adds it
            to unsupported_node_dict

            Args:
                start (int): The (inclusive) index of the start node.
                end (int): The (exclusive) index of the end node.
            """
            subgraph_path = save_subgraph(self.arg_groups[OnnxSaveArgs],
                                          parent_graph, start, end,
                                          "unsupported")
            _, _, parser = supports_model(subgraph_path)

            err_string = (" | ".join([
                str(parser.get_error(err_idx))
                for err_idx in range(parser.num_errors)
            ]) or "UNKNOWN ERROR")
            unsupported_node_dict.add(parent_graph.nodes[start].op, err_string,
                                      [start, end])

        # Log errors for all the unsupported graphs between supported subgraphs.
        for index, subg_node_idxs in enumerate(supported_subgraphs):
            save_subgraph(
                self.arg_groups[OnnxSaveArgs],
                parent_graph,
                subg_node_idxs[0],
                subg_node_idxs[-1] + 1,
                "supported",
            )

            if index == 0 and subg_node_idxs[0] != 0:
                save_unsupported_graph(0, subg_node_idxs[0])

            if index == len(supported_subgraphs) - 1 and supported_subgraphs[
                    -1][-1] != len(parent_graph.nodes) - 1:
                save_unsupported_graph(subg_node_idxs[-1] + 1,
                                       len(parent_graph.nodes))

            if index < len(supported_subgraphs) - 1:
                next_subg_node_idxs = supported_subgraphs[index + 1]
                save_unsupported_graph(subg_node_idxs[-1] + 1,
                                       next_subg_node_idxs[0])

        summary = gen_results_summary(unsupported_node_dict)

        G_LOGGER.finish(summary)
        util.save_file(summary,
                       os.path.join(self.arg_groups[OnnxSaveArgs].path,
                                    "results.txt"),
                       "w",
                       description="results")
Beispiel #13
0
 def success_message(self):
     which_layers = "first" if self.direction == "forward" else "last"
     G_LOGGER.finish(
         "To achieve acceptable accuracy, try running the {:} {:} "
         "layer(s) in higher precision".format(which_layers, self.good)
     )
Beispiel #14
0
            def check_outputs_match(out0, out0_name, out1, out1_name, per_out_rtol, per_out_atol, per_out_err_stat):
                VALID_CHECK_ERROR_STATS = ["max", "mean", "median", "elemwise"]
                if per_out_err_stat not in VALID_CHECK_ERROR_STATS:
                    G_LOGGER.critical("Invalid choice for check_error_stat: {:}.\n"
                                      "Note: Valid choices are: {:}".format(per_out_err_stat, VALID_CHECK_ERROR_STATS))

                G_LOGGER.super_verbose("{:35} | Output: {:} (dtype={:}, shape={:}):\n{:}".format(
                                            iter_result0.runner_name, out0_name, out0.dtype, out0.shape, util.indent_block(out0)))
                G_LOGGER.super_verbose("{:35} | Output: {:} (dtype={:}, shape={:}):\n{:}".format(
                                            iter_result1.runner_name, out1_name, out1.dtype, out1.shape, util.indent_block(out1)))

                # Check difference vs. tolerances
                if np.issubdtype(out0.dtype, np.bool_) and np.issubdtype(out1.dtype, np.bool_):
                    absdiff = np.logical_xor(out0, out1)
                else:
                    absdiff = np.abs(out0 - out1)

                absout1 = np.abs(out1)
                with np.testing.suppress_warnings() as sup:
                    sup.filter(RuntimeWarning)
                    reldiff = absdiff / absout1

                max_absdiff = comp_util.compute_max(absdiff)
                mean_absdiff = comp_util.compute_mean(absdiff)
                median_absdiff = comp_util.compute_median(absdiff)
                max_reldiff = comp_util.compute_max(reldiff)
                mean_reldiff = comp_util.compute_mean(reldiff)
                median_reldiff = comp_util.compute_median(reldiff)

                max_elemwiseabs = "Unknown"
                max_elemwiserel = "Unknown"

                if per_out_err_stat == "mean":
                    failed = mean_absdiff > per_out_atol and (np.isnan(mean_reldiff) or mean_reldiff > per_out_rtol)
                elif per_out_err_stat == "median":
                    failed = median_absdiff > per_out_atol and (np.isnan(median_reldiff) or median_reldiff > per_out_rtol)
                elif per_out_err_stat == "max":
                    failed = max_absdiff > per_out_atol and (np.isnan(max_reldiff) or max_reldiff > per_out_rtol)
                else:
                    assert per_out_err_stat == "elemwise", "This branch should be unreachable unless per_out_err_stat is 'elemwise'"
                    mismatches = (absdiff > per_out_atol) & (reldiff > per_out_rtol)

                    failed = np.any(mismatches)
                    try:
                        # Special because we need to account for tolerances too.
                        max_elemwiseabs = comp_util.compute_max(absdiff[mismatches])
                        max_elemwiserel = comp_util.compute_max(reldiff[mismatches])

                        with G_LOGGER.indent():
                            G_LOGGER.super_verbose("Mismatched indices:\n{:}".format(np.argwhere(mismatches)))
                            G_LOGGER.extra_verbose("{:35} | Mismatched values:\n{:}".format(iter_result0.runner_name, out0[mismatches]))
                            G_LOGGER.extra_verbose("{:35} | Mismatched values:\n{:}".format(iter_result1.runner_name, out1[mismatches]))
                    except Exception as err:
                        G_LOGGER.warning("Failing to log mismatches.\nNote: Error was: {:}".format(err))

                # Log information about the outputs
                hist_bin_range = (min(comp_util.compute_min(out0), comp_util.compute_min(out1)),
                                  max(comp_util.compute_max(out0), comp_util.compute_max(out1)))
                comp_util.log_output_stats(out0, failed, iter_result0.runner_name + ": " + out0_name, hist_range=hist_bin_range)
                comp_util.log_output_stats(out1, failed, iter_result1.runner_name + ": " + out1_name, hist_range=hist_bin_range)

                G_LOGGER.info("Error Metrics: {:}".format(out0_name))
                with G_LOGGER.indent():
                    def req_tol(mean_diff, median_diff, max_diff, elemwise_diff):
                        return {
                            "mean": mean_diff,
                            "median": median_diff,
                            "max": max_diff,
                            "elemwise": elemwise_diff,
                        }[per_out_err_stat]

                    G_LOGGER.info("Minimum Required Tolerance: {:} error | [abs={:.5g}] OR [rel={:.5g}]".format(
                                    per_out_err_stat,
                                    req_tol(mean_absdiff, median_absdiff, max_absdiff, max_elemwiseabs),
                                    req_tol(mean_reldiff, median_reldiff, max_reldiff, max_elemwiserel)))
                    comp_util.log_output_stats(absdiff, failed, "Absolute Difference")
                    comp_util.log_output_stats(reldiff, failed, "Relative Difference")

                # Finally show summary.
                if failed:
                    G_LOGGER.error("FAILED | Difference exceeds tolerance (rel={:}, abs={:})".format(per_out_rtol, per_out_atol))
                else:
                    G_LOGGER.finish("PASSED | Difference is within tolerance (rel={:}, abs={:})".format(per_out_rtol, per_out_atol))

                G_LOGGER.extra_verbose("Finished comparing: '{:}' (dtype={:}, shape={:}) [{:}] and '{:}' (dtype={:}, shape={:}) [{:}]"
                                .format(out0_name, out0.dtype, out0.shape, iter_result0.runner_name, out1_name, out1.dtype, out1.shape, iter_result1.runner_name))
                return OutputCompareResult(not failed, max_absdiff, max_reldiff, mean_absdiff, mean_reldiff, median_absdiff, median_reldiff)
Beispiel #15
0
        def compare_output(iter_result0, iter_result1):
            """
            Compare the outputs of two runners from a single iteration.

            This function will always iterate over the output names of the first IterationResult,
                and attempt to find corresponding output names in the second.
            If no corresponding output name is found, the output is skipped.
            If all output names are skipped, then this function raises an error.

            Args:
                iter_result0 (IterationResult): The result of the first runner.
                iter_result1 (IterationResult): The result of the second runner.

            Returns:
                OrderedDict[str, OutputCompareResult]:
                        The name of the outputs compared, derived from the first IterationResult,
                        and whether they matched. If an output name is not found, it is omitted from this dictionary.

            Raises:
                PolygraphyException: If all output names are skipped, and thus no outputs are compared.
            """
            def check_dict(dct, dict_name):
                if isinstance(dct, dict):
                    util.check_dict_contains(
                        dct,
                        set(iter_result0.keys()) | set(iter_result1.keys())
                        | {""},
                        check_missing=False,
                        dict_name=dict_name,
                    )

            check_dict(rtol, "the rtol dictionary")
            check_dict(atol, "the atol dictionary")
            check_dict(check_error_stat, "the check_error_stat dictionary")

            output_status = OrderedDict(
            )  # OrderedDict[str, bool] Maps output names to whether they matched.

            if not check_shapes:
                G_LOGGER.info(
                    "Strict shape checking disabled. Will attempt to match output shapes before comparisons"
                )

            def default_find_output_func(output_name, index, iter_result):
                found_name = util.find_in_dict(output_name, iter_result, index)
                if found_name is None:
                    return None
                elif found_name != output_name:
                    exact_match = util.find_in_dict(found_name, iter_result0)
                    if exact_match == found_name:
                        G_LOGGER.verbose(
                            "Will not compare {:} with {:}, since the former already has an exact match: {:}"
                            .format(found_name, output_name, exact_match))
                        return None  # If the found output is being compared against another output already, skip this non-exact match
                    G_LOGGER.warning(
                        "Output names did not match exactly. Assuming {:} output: {:} "
                        "corresponds to output: {:}".format(
                            iter_result.runner_name, found_name, output_name))
                return [found_name]

            nonlocal find_output_func
            find_output_func = util.default(find_output_func,
                                            default_find_output_func)

            for index, (out0_name, output0) in enumerate(iter_result0.items()):
                out1_names = util.default(
                    find_output_func(out0_name, index, iter_result1), [])

                if len(out1_names) > 1:
                    G_LOGGER.info(
                        "Will attempt to compare output: '{:}' [{:}] with multiple outputs: '{:}' [{:}]"
                        .format(out0_name, iter_result0.runner_name,
                                list(out1_names), iter_result1.runner_name))

                for out1_name in out1_names:
                    if out1_name is None or out1_name not in iter_result1:
                        G_LOGGER.warning(
                            "For output: '{:}' [{:}], skipping corresponding output: '{:}' [{:}], "
                            "since the output was not found".format(
                                out0_name, iter_result0.runner_name, out1_name,
                                iter_result1.runner_name))
                        continue

                    per_out_atol = util.value_or_from_dict(
                        atol, out0_name, default_atol)
                    per_out_rtol = util.value_or_from_dict(
                        rtol, out0_name, default_rtol)
                    per_out_err_stat = util.value_or_from_dict(
                        check_error_stat, out0_name, default_error_stat)

                    output1 = iter_result1[out1_name]
                    G_LOGGER.start(
                        "Comparing Output: '{:}' (dtype={:}, shape={:}) with '{:}' (dtype={:}, shape={:}) | "
                        "Tolerance: [abs={:.5g}, rel={:.5g}] | Checking {:} error"
                        .format(
                            out0_name,
                            output0.dtype,
                            output0.shape,
                            out1_name,
                            output1.dtype,
                            output1.shape,
                            per_out_atol,
                            per_out_rtol,
                            per_out_err_stat,
                        ))
                    G_LOGGER.extra_verbose(
                        "Note: Comparing {:} vs. {:}".format(
                            iter_result0.runner_name,
                            iter_result1.runner_name))

                    with G_LOGGER.indent():
                        if check_shapes and output0.shape != output1.shape:
                            G_LOGGER.error(
                                "Will not compare outputs of different shapes. Note: Output shapes are "
                                "{:} and {:}.".format(output0.shape,
                                                      output1.shape))
                            G_LOGGER.error(
                                "Note: Use --no-shape-check or set check_shapes=False to "
                                "attempt to compare values anyway.",
                                mode=LogMode.ONCE,
                            )
                            outputs_match = False
                        else:
                            output1 = util.try_match_shape(
                                output1, output0.shape)
                            output0 = output0.reshape(output1.shape)
                            outputs_match = check_outputs_match(
                                output0,
                                out0_name,
                                output1,
                                out1_name,
                                per_out_rtol=per_out_rtol,
                                per_out_atol=per_out_atol,
                                per_out_err_stat=per_out_err_stat,
                                runner0_name=iter_result0.runner_name,
                                runner1_name=iter_result1.runner_name,
                            )

                        output_status[out0_name] = outputs_match
                        if fail_fast and not outputs_match:
                            return output_status

            mismatched_output_names = [
                name for name, matched in output_status.items() if not matched
            ]
            if mismatched_output_names:
                G_LOGGER.error("FAILED | Mismatched outputs: {:}".format(
                    mismatched_output_names))
            else:
                G_LOGGER.finish(
                    "PASSED | All outputs matched | Outputs: {:}".format(
                        list(output_status.keys())))

            # This is useful for catching cases were Polygraphy does something wrong with the runner output buffers
            if not output_status and (bool(iter_result0.keys())
                                      or bool(iter_result1.keys())):
                r0_name = iter_result0.runner_name
                r0_outs = list(iter_result0.keys())
                r1_name = iter_result1.runner_name
                r1_outs = list(iter_result1.keys())
                G_LOGGER.critical(
                    "All outputs were skipped, no common outputs found! Note:\n{:} outputs: "
                    "{:}\n{:} outputs: {:}".format(r0_name, r0_outs, r1_name,
                                                   r1_outs))

            return output_status
Beispiel #16
0
    def sort_artifacts(self, iteration, suffix=None):
        """
        Run the check command and move artifacts into the correct subdirectory.

        Args:
            iteration (int):
                    The current iteration index. This is used to name artifacts
                    and display logging messages.

            suffix (str):
                    A custom suffix to add to the artifact prior to moving it.
                    This will be applied in addition to the default suffix.
        Returns:
            bool: True if the command succeeded, False otherwise.
        """
        def move_artifacts(subdir, returncode):
            """
            Moves artifacts (args.artifacts) into the specified subdirectory or args.output and
            appends an index and timestamp. Creates parent directories as required.

            Args:
                subdir (str): The destination path as a subdirectory of args.output.
                index (int): The iteration index.
            """
            for art in self.artifacts:
                basename, ext = os.path.splitext(os.path.basename(art))
                if suffix:
                    basename += suffix
                name = "{:}_{:}_{:}_N{:}_ret{:}{:}".format(
                    basename, self.start_date, self.start_time, iteration,
                    returncode, ext)
                dest = os.path.join(self.output, subdir, name)

                if not os.path.exists(art):
                    G_LOGGER.error(
                        "Artifact: {:} does not exist, skipping.\n"
                        "Was the artifact supposed to be generated?".format(
                            art))
                    continue

                if os.path.exists(dest):
                    G_LOGGER.error(
                        "Destination path: {:} already exists.\n"
                        "Refusing to overwrite. This artifact will be skipped!"
                        .format(dest))
                    continue

                G_LOGGER.info("Moving {:} to {:}".format(art, dest))

                dir_path = os.path.dirname(dest)
                if dir_path:
                    dir_path = os.path.realpath(dir_path)
                    os.makedirs(dir_path, exist_ok=True)
                shutil.move(art, dest)

        def try_remove(path):
            def func():
                try:
                    os.remove(path)
                except:
                    G_LOGGER.verbose("Could not remove: {:}".format(path))

            return func

        def is_success(status):
            has_fail_regex = None
            if self.fail_regexes is not None:
                output = status.stdout.decode() + status.stderr.decode()
                has_fail_regex = any(
                    regex.search(output) is not None
                    for regex in self.fail_regexes)

            if self.fail_codes is not None:
                # If a fail-code is specified, then we should also check has_fail_regex if provided.
                failed = status.returncode in self.fail_codes
                if has_fail_regex is not None:
                    failed &= has_fail_regex
            else:
                # If a fail-code is not specified, we should trigger failures even on 0-status
                # if the fail regex is found.
                failed = status.returncode != 0 if has_fail_regex is None else has_fail_regex
            return not failed

        with contextlib.ExitStack() as stack, G_LOGGER.indent():
            if self.iter_artifact and self.remove_intermediate:
                stack.callback(try_remove(self.iter_artifact))

            if self.iteration_info:
                util.save_json({"iteration": iteration}, self.iteration_info)
                stack.callback(try_remove(self.iteration_info))

            G_LOGGER.info("Running check command: {:}".format(" ".join(
                self.check)))
            status = sp.run(self.check, stdout=sp.PIPE, stderr=sp.PIPE)
            success = is_success(status)

            if self.show_output:
                stderr_log_level = G_LOGGER.WARNING if success else G_LOGGER.ERROR
                G_LOGGER.info(
                    "========== CAPTURED STDOUT ==========\n{:}".format(
                        status.stdout.decode()))
                G_LOGGER.log(
                    "========== CAPTURED STDERR ==========\n{:}".format(
                        status.stderr.decode()),
                    severity=stderr_log_level)

            if success:
                move_artifacts("good", status.returncode)
                G_LOGGER.finish("PASSED | Iteration {:}".format(iteration))
                return True
            else:
                move_artifacts("bad", status.returncode)
                G_LOGGER.error("FAILED | Iteration {:}".format(iteration))
                return False
                  min=[1, 1, 28, 28],
                  opt=[4, 1, 28, 28],
                  max=[16, 1, 28, 28])
]
create_trt_config = CreateTrtConfig(max_workspace_size=1000000000,
                                    profiles=profiles)
build_engine = EngineFromNetwork(parse_network_from_onnx,
                                 config=create_trt_config)
save_engine = SaveEngine(build_engine, path='model-FP32.plan')

# Runners
runners = [
    OnnxrtRunner(build_onnxrt_session),
    TrtRunner(save_engine),
]

# Runner Execution
results = Comparator.run(runners, data_loader=data_loader)

success = True
# Accuracy Comparison
compare_func = CompareFunc.simple(rtol={'': 0.001}, atol={'': 0.001})
success &= bool(Comparator.compare_accuracy(results,
                                            compare_func=compare_func))

# Report Results
cmd_run = ' '.join(sys.argv)
if not success:
    G_LOGGER.critical("FAILED | Command: {}".format(cmd_run))
G_LOGGER.finish("PASSED | Command: {}".format(cmd_run))