示例#1
0
 def is_not_nan(output):
     nans = np.isnan(output)
     if np.any(nans):
         G_LOGGER.error("NaN Detected | One or more NaNs were encountered in this output")
         G_LOGGER.info("Note: Use -vv or set logging verbosity to EXTRA_VERBOSE to display locations of NaNs", mode=LogMode.ONCE)
         G_LOGGER.extra_verbose("Note: NaNs at:\n{:}".format(nans))
         return False
     return True
示例#2
0
def check_onnx_parser_errors(parser, success):
    if parser.num_errors > 0:
        for index in range(parser.num_errors):
            G_LOGGER.error(parser.get_error(index))
        G_LOGGER.critical("Could not parse ONNX correctly")

    if not success:
        G_LOGGER.critical("Failed to parse ONNX model. Does the model file exist and contain a valid ONNX model?")
示例#3
0
    def check_network(self, suffix):
        """
        Checks whether the provided network is accurate compared to golden values.

        Returns:
            OrderedDict[str, OutputCompareResult]:
                    A mapping of output names to an object describing whether they matched, and what the
                    required tolerances were.
        """
        from polygraphy.comparator import Comparator, CompareFunc, DataLoader
        from polygraphy.backend.trt import EngineFromNetwork, TrtRunner, ModifyNetwork, SaveEngine

        with G_LOGGER.verbosity(severity=G_LOGGER.severity if self.args.
                                show_output else G_LOGGER.CRITICAL):
            data_loader = tool_util.get_data_loader(self.args)

            self.args.strict_types = True  # HACK: Override strict types so things actually run in the right precision.
            config = tool_util.get_trt_config_loader(self.args,
                                                     data_loader)(self.builder,
                                                                  self.network)

            suffix = "-{:}-{:}".format(suffix, self.precision)
            engine_path = misc.insert_suffix(self.args.save_engine, suffix)

            self.builder, self.network, self.parser = ModifyNetwork(
                (self.builder, self.network, self.parser),
                outputs=self.args.trt_outputs)()

            engine_loader = SaveEngine(EngineFromNetwork(
                (self.builder, self.network, self.parser), config),
                                       path=engine_path)

            runners = [TrtRunner(engine_loader)]

            results = Comparator.run(runners, data_loader=data_loader)
            if self.args.validate:
                Comparator.validate(results)
            results.update(self.golden)

            compare_func = CompareFunc.basic_compare_func(
                atol=self.args.atol,
                rtol=self.args.rtol,
                check_shapes=not self.args.no_shape_check)
            accuracy_result = Comparator.compare_accuracy(
                results, compare_func=compare_func)

        tolerances = list(accuracy_result.values())[0][
            0]  # First iteration of first runner pair
        for name, req_tol in tolerances.items():
            if bool(req_tol):
                G_LOGGER.success(
                    "PASSED | Output: {:} | Required Tolerances: {:}".format(
                        name, req_tol))
            else:
                G_LOGGER.error(
                    "FAILED | Output: {:} | Required Tolerances: {:}".format(
                        name, req_tol))
        return accuracy_result
示例#4
0
 def is_finite(output):
     non_finite = np.logical_not(np.isfinite(output))
     if np.any(non_finite):
         G_LOGGER.error("Inf Detected | One or more non-finite values were encountered in this output")
         G_LOGGER.info("Note: Use -vv or set logging verbosity to EXTRA_VERBOSE to display non-finite values", mode=LogMode.ONCE)
         G_LOGGER.extra_verbose("Note: non-finite values at:\n{:}".format(non_finite))
         G_LOGGER.extra_verbose("Note: non-finite values:\n{:}".format(output[non_finite]))
         return False
     return True
示例#5
0
    def to_trt(self, builder, network):
        """
        Creates a TensorRT IOptimizationProfile based on the values set in this Profile.

        Args:
            builder (trt.Builder):
                    A TensorRT builder. This will be used to construct the IOptimizationProfile.
            network (trt.INetworkDefinition):
                    The TensorRT network the profile applies to.

        Returns:
            trt.IOptimizationProfile: A TensorRT optimization profile.
        """
        trt_profile = builder.create_optimization_profile()
        unused_keys = set(self.keys())
        available_inputs = set()
        for idx in range(network.num_inputs):
            inp = network.get_input(idx)
            if inp.name in unused_keys:
                unused_keys.remove(inp.name)
            available_inputs.add(inp.name)

            with G_LOGGER.verbosity():  # WAR for spam from TRT
                is_shape_tensor = inp.is_shape_tensor

            if is_shape_tensor:
                if inp.name in self:
                    shapes = self[inp.name]
                    trt_profile.set_shape_input(inp.name, shapes.min,
                                                shapes.opt, shapes.max)
                    G_LOGGER.verbose(
                        "{:} | Setting input shape-tensor value range to: {:}".
                        format(trt_util.str_from_tensor(inp, is_shape_tensor),
                               shapes))
                else:
                    G_LOGGER.warning(
                        "{:} | No values provided. "
                        "Assuming this is not a dynamic shape-tensor.".format(
                            trt_util.str_from_tensor(inp, is_shape_tensor)),
                        mode=LogMode.ONCE,
                    )
            else:
                shapes = self[inp.name]
                trt_profile.set_shape(inp.name, shapes.min, shapes.opt,
                                      shapes.max)
                G_LOGGER.verbose(
                    "{:} | Setting input tensor shapes to: {:}".format(
                        trt_util.str_from_tensor(inp, is_shape_tensor),
                        shapes))

        if unused_keys:
            G_LOGGER.error(
                "Invalid inputs were provided to the optimization profile: {:}\n"
                "Note: Inputs available in the TensorRT network are: {:}".
                format(unused_keys, available_inputs))

        return trt_util.check_profile(trt_profile)
示例#6
0
    def stop(self, index, success):
        if success:
            self.success_message()
            return True

        if index >= (self.max_layers - 1):
            G_LOGGER.error("Could not find a configuration that satisfied accuracy requirements.")
            return True
        return False
示例#7
0
    def stop(self, index, success):
        # If good and bad are within 1 layer of each other,
        # then we already have the information we need.
        if abs(self.good - self.bad) <= 1:
            if self.good >= self.max_layers:
                G_LOGGER.error("Could not find a configuration that satisfied accuracy requirements.")
            else:
                self.success_message()
            return True

        if index >= (self.max_layers - 1):
            G_LOGGER.error("Could not find a configuration that satisfied accuracy requirements.")
            return True
        return False
示例#8
0
        def validate_output(runner_name, output_name, output):
            G_LOGGER.start("{:35} | Validating output: {:} (check_inf={:}, check_nan={:})".format(
                runner_name, output_name, check_inf, check_nan))
            with G_LOGGER.indent():
                comp_util.log_output_stats(output)

                output_valid = True
                if check_nan:
                    output_valid &= is_not_nan(output)
                if check_inf:
                    output_valid &= is_finite(output)

                if output_valid:
                    G_LOGGER.finish("PASSED | Output: {:} is valid".format(output_name))
                else:
                    G_LOGGER.error("FAILED | Errors detected in output: {:}".format(output_name))
                return output_valid
示例#9
0
文件: util.py 项目: clayne/TensorRT
def np_type_from_str(dt_str):
    """
    Converts a string representation of a data type to a NumPy data type.

    Args:
        dt_str (str): The string representation of the data type.

    Returns:
        np.dtype: The NumPy data type.

    Raises:
        KeyError: If the provided string does not correspond to a NumPy data type.
    """
    try:
        return {np.dtype(dtype).name: np.dtype(dtype) for dtype in np.sctypeDict.values()}[dt_str]
    except KeyError:
        G_LOGGER.error("Could not understand data type: {:}. Did you forget to specify a data type? "
                         "Please use one of: {:} or `auto`.".format(dt_str, np_types()))
        raise
示例#10
0
    def sort_artifacts(self, iteration, suffix=None):
        """
        Run the check command and move artifacts into the correct subdirectory.

        Args:
            iteration (int):
                    The current iteration index. This is used to name artifacts
                    and display logging messages.

            suffix (str):
                    A custom suffix to add to the artifact prior to moving it.
                    This will be applied in addition to the default suffix.
        Returns:
            bool: True if the command succeeded, False otherwise.
        """
        def move_artifacts(subdir, returncode):
            """
            Moves artifacts (args.artifacts) into the specified subdirectory or args.output and
            appends an index and timestamp. Creates parent directories as required.

            Args:
                subdir (str): The destination path as a subdirectory of args.output.
                index (int): The iteration index.
            """
            for art in self.artifacts:
                basename, ext = os.path.splitext(os.path.basename(art))
                if suffix:
                    basename += suffix
                name = "{:}_{:}_{:}_N{:}_ret{:}{:}".format(
                    basename, self.start_date, self.start_time, iteration,
                    returncode, ext)
                dest = os.path.join(self.output, subdir, name)

                if not os.path.exists(art):
                    G_LOGGER.error(
                        "Artifact: {:} does not exist, skipping.\n"
                        "Was the artifact supposed to be generated?".format(
                            art))
                    continue

                if os.path.exists(dest):
                    G_LOGGER.error(
                        "Destination path: {:} already exists.\n"
                        "Refusing to overwrite. This artifact will be skipped!"
                        .format(dest))
                    continue

                G_LOGGER.info("Moving {:} to {:}".format(art, dest))

                dir_path = os.path.dirname(dest)
                if dir_path:
                    dir_path = os.path.realpath(dir_path)
                    os.makedirs(dir_path, exist_ok=True)
                shutil.move(art, dest)

        def try_remove(path):
            def func():
                try:
                    os.remove(path)
                except:
                    G_LOGGER.verbose("Could not remove: {:}".format(path))

            return func

        def is_success(status):
            has_fail_regex = None
            if self.fail_regexes is not None:
                output = status.stdout.decode() + status.stderr.decode()
                has_fail_regex = any(
                    regex.search(output) is not None
                    for regex in self.fail_regexes)

            if self.fail_codes is not None:
                # If a fail-code is specified, then we should also check has_fail_regex if provided.
                failed = status.returncode in self.fail_codes
                if has_fail_regex is not None:
                    failed &= has_fail_regex
            else:
                # If a fail-code is not specified, we should trigger failures even on 0-status
                # if the fail regex is found.
                failed = status.returncode != 0 if has_fail_regex is None else has_fail_regex
            return not failed

        with contextlib.ExitStack() as stack, G_LOGGER.indent():
            if self.iter_artifact and self.remove_intermediate:
                stack.callback(try_remove(self.iter_artifact))

            if self.iteration_info:
                util.save_json({"iteration": iteration}, self.iteration_info)
                stack.callback(try_remove(self.iteration_info))

            G_LOGGER.info("Running check command: {:}".format(" ".join(
                self.check)))
            status = sp.run(self.check, stdout=sp.PIPE, stderr=sp.PIPE)
            success = is_success(status)

            if self.show_output:
                stderr_log_level = G_LOGGER.WARNING if success else G_LOGGER.ERROR
                G_LOGGER.info(
                    "========== CAPTURED STDOUT ==========\n{:}".format(
                        status.stdout.decode()))
                G_LOGGER.log(
                    "========== CAPTURED STDERR ==========\n{:}".format(
                        status.stderr.decode()),
                    severity=stderr_log_level)

            if success:
                move_artifacts("good", status.returncode)
                G_LOGGER.finish("PASSED | Iteration {:}".format(iteration))
                return True
            else:
                move_artifacts("bad", status.returncode)
                G_LOGGER.error("FAILED | Iteration {:}".format(iteration))
                return False
示例#11
0
            def check_outputs_match(out0, out0_name, out1, out1_name, per_out_rtol, per_out_atol, per_out_err_stat):
                VALID_CHECK_ERROR_STATS = ["max", "mean", "median", "elemwise"]
                if per_out_err_stat not in VALID_CHECK_ERROR_STATS:
                    G_LOGGER.critical("Invalid choice for check_error_stat: {:}.\n"
                                      "Note: Valid choices are: {:}".format(per_out_err_stat, VALID_CHECK_ERROR_STATS))

                G_LOGGER.super_verbose("{:35} | Output: {:} (dtype={:}, shape={:}):\n{:}".format(
                                            iter_result0.runner_name, out0_name, out0.dtype, out0.shape, util.indent_block(out0)))
                G_LOGGER.super_verbose("{:35} | Output: {:} (dtype={:}, shape={:}):\n{:}".format(
                                            iter_result1.runner_name, out1_name, out1.dtype, out1.shape, util.indent_block(out1)))

                # Check difference vs. tolerances
                if np.issubdtype(out0.dtype, np.bool_) and np.issubdtype(out1.dtype, np.bool_):
                    absdiff = np.logical_xor(out0, out1)
                else:
                    absdiff = np.abs(out0 - out1)

                absout1 = np.abs(out1)
                with np.testing.suppress_warnings() as sup:
                    sup.filter(RuntimeWarning)
                    reldiff = absdiff / absout1

                max_absdiff = comp_util.compute_max(absdiff)
                mean_absdiff = comp_util.compute_mean(absdiff)
                median_absdiff = comp_util.compute_median(absdiff)
                max_reldiff = comp_util.compute_max(reldiff)
                mean_reldiff = comp_util.compute_mean(reldiff)
                median_reldiff = comp_util.compute_median(reldiff)

                max_elemwiseabs = "Unknown"
                max_elemwiserel = "Unknown"

                if per_out_err_stat == "mean":
                    failed = mean_absdiff > per_out_atol and (np.isnan(mean_reldiff) or mean_reldiff > per_out_rtol)
                elif per_out_err_stat == "median":
                    failed = median_absdiff > per_out_atol and (np.isnan(median_reldiff) or median_reldiff > per_out_rtol)
                elif per_out_err_stat == "max":
                    failed = max_absdiff > per_out_atol and (np.isnan(max_reldiff) or max_reldiff > per_out_rtol)
                else:
                    assert per_out_err_stat == "elemwise", "This branch should be unreachable unless per_out_err_stat is 'elemwise'"
                    mismatches = (absdiff > per_out_atol) & (reldiff > per_out_rtol)

                    failed = np.any(mismatches)
                    try:
                        # Special because we need to account for tolerances too.
                        max_elemwiseabs = comp_util.compute_max(absdiff[mismatches])
                        max_elemwiserel = comp_util.compute_max(reldiff[mismatches])

                        with G_LOGGER.indent():
                            G_LOGGER.super_verbose("Mismatched indices:\n{:}".format(np.argwhere(mismatches)))
                            G_LOGGER.extra_verbose("{:35} | Mismatched values:\n{:}".format(iter_result0.runner_name, out0[mismatches]))
                            G_LOGGER.extra_verbose("{:35} | Mismatched values:\n{:}".format(iter_result1.runner_name, out1[mismatches]))
                    except Exception as err:
                        G_LOGGER.warning("Failing to log mismatches.\nNote: Error was: {:}".format(err))

                # Log information about the outputs
                hist_bin_range = (min(comp_util.compute_min(out0), comp_util.compute_min(out1)),
                                  max(comp_util.compute_max(out0), comp_util.compute_max(out1)))
                comp_util.log_output_stats(out0, failed, iter_result0.runner_name + ": " + out0_name, hist_range=hist_bin_range)
                comp_util.log_output_stats(out1, failed, iter_result1.runner_name + ": " + out1_name, hist_range=hist_bin_range)

                G_LOGGER.info("Error Metrics: {:}".format(out0_name))
                with G_LOGGER.indent():
                    def req_tol(mean_diff, median_diff, max_diff, elemwise_diff):
                        return {
                            "mean": mean_diff,
                            "median": median_diff,
                            "max": max_diff,
                            "elemwise": elemwise_diff,
                        }[per_out_err_stat]

                    G_LOGGER.info("Minimum Required Tolerance: {:} error | [abs={:.5g}] OR [rel={:.5g}]".format(
                                    per_out_err_stat,
                                    req_tol(mean_absdiff, median_absdiff, max_absdiff, max_elemwiseabs),
                                    req_tol(mean_reldiff, median_reldiff, max_reldiff, max_elemwiserel)))
                    comp_util.log_output_stats(absdiff, failed, "Absolute Difference")
                    comp_util.log_output_stats(reldiff, failed, "Relative Difference")

                # Finally show summary.
                if failed:
                    G_LOGGER.error("FAILED | Difference exceeds tolerance (rel={:}, abs={:})".format(per_out_rtol, per_out_atol))
                else:
                    G_LOGGER.finish("PASSED | Difference is within tolerance (rel={:}, abs={:})".format(per_out_rtol, per_out_atol))

                G_LOGGER.extra_verbose("Finished comparing: '{:}' (dtype={:}, shape={:}) [{:}] and '{:}' (dtype={:}, shape={:}) [{:}]"
                                .format(out0_name, out0.dtype, out0.shape, iter_result0.runner_name, out1_name, out1.dtype, out1.shape, iter_result1.runner_name))
                return OutputCompareResult(not failed, max_absdiff, max_reldiff, mean_absdiff, mean_reldiff, median_absdiff, median_reldiff)
示例#12
0
        def compare_output(iter_result0, iter_result1):
            """
            Compare the outputs of two runners from a single iteration.

            This function will always iterate over the output names of the first IterationResult,
                and attempt to find corresponding output names in the second.
            If no corresponding output name is found, the output is skipped.
            If all output names are skipped, then this function raises an error.

            Args:
                iter_result0 (IterationResult): The result of the first runner.
                iter_result1 (IterationResult): The result of the second runner.

            Returns:
                OrderedDict[str, OutputCompareResult]:
                        The name of the outputs compared, derived from the first IterationResult,
                        and whether they matched. If an output name is not found, it is omitted from this dictionary.

            Raises:
                PolygraphyException: If all output names are skipped, and thus no outputs are compared.
            """
            def check_dict(dct, dict_name):
                if isinstance(dct, dict):
                    util.check_dict_contains(dct, set(iter_result0.keys()) | set(iter_result1.keys()) | set([""]),
                                             check_missing=False, dict_name=dict_name)


            check_dict(rtol, "the rtol dictionary")
            check_dict(atol, "the atol dictionary")
            check_dict(check_error_stat, "the chcek_error_stat dictionary")


            # Returns whether the outputs match
            def check_outputs_match(out0, out0_name, out1, out1_name, per_out_rtol, per_out_atol, per_out_err_stat):
                VALID_CHECK_ERROR_STATS = ["max", "mean", "median", "elemwise"]
                if per_out_err_stat not in VALID_CHECK_ERROR_STATS:
                    G_LOGGER.critical("Invalid choice for check_error_stat: {:}.\n"
                                      "Note: Valid choices are: {:}".format(per_out_err_stat, VALID_CHECK_ERROR_STATS))

                G_LOGGER.super_verbose("{:35} | Output: {:} (dtype={:}, shape={:}):\n{:}".format(
                                            iter_result0.runner_name, out0_name, out0.dtype, out0.shape, util.indent_block(out0)))
                G_LOGGER.super_verbose("{:35} | Output: {:} (dtype={:}, shape={:}):\n{:}".format(
                                            iter_result1.runner_name, out1_name, out1.dtype, out1.shape, util.indent_block(out1)))

                # Check difference vs. tolerances
                if np.issubdtype(out0.dtype, np.bool_) and np.issubdtype(out1.dtype, np.bool_):
                    absdiff = np.logical_xor(out0, out1)
                else:
                    absdiff = np.abs(out0 - out1)

                absout1 = np.abs(out1)
                with np.testing.suppress_warnings() as sup:
                    sup.filter(RuntimeWarning)
                    reldiff = absdiff / absout1

                max_absdiff = comp_util.compute_max(absdiff)
                mean_absdiff = comp_util.compute_mean(absdiff)
                median_absdiff = comp_util.compute_median(absdiff)
                max_reldiff = comp_util.compute_max(reldiff)
                mean_reldiff = comp_util.compute_mean(reldiff)
                median_reldiff = comp_util.compute_median(reldiff)

                max_elemwiseabs = "Unknown"
                max_elemwiserel = "Unknown"

                if per_out_err_stat == "mean":
                    failed = mean_absdiff > per_out_atol and (np.isnan(mean_reldiff) or mean_reldiff > per_out_rtol)
                elif per_out_err_stat == "median":
                    failed = median_absdiff > per_out_atol and (np.isnan(median_reldiff) or median_reldiff > per_out_rtol)
                elif per_out_err_stat == "max":
                    failed = max_absdiff > per_out_atol and (np.isnan(max_reldiff) or max_reldiff > per_out_rtol)
                else:
                    assert per_out_err_stat == "elemwise", "This branch should be unreachable unless per_out_err_stat is 'elemwise'"
                    mismatches = (absdiff > per_out_atol) & (reldiff > per_out_rtol)

                    failed = np.any(mismatches)
                    try:
                        # Special because we need to account for tolerances too.
                        max_elemwiseabs = comp_util.compute_max(absdiff[mismatches])
                        max_elemwiserel = comp_util.compute_max(reldiff[mismatches])

                        with G_LOGGER.indent():
                            G_LOGGER.super_verbose("Mismatched indices:\n{:}".format(np.argwhere(mismatches)))
                            G_LOGGER.extra_verbose("{:35} | Mismatched values:\n{:}".format(iter_result0.runner_name, out0[mismatches]))
                            G_LOGGER.extra_verbose("{:35} | Mismatched values:\n{:}".format(iter_result1.runner_name, out1[mismatches]))
                    except Exception as err:
                        G_LOGGER.warning("Failing to log mismatches.\nNote: Error was: {:}".format(err))

                # Log information about the outputs
                hist_bin_range = (min(comp_util.compute_min(out0), comp_util.compute_min(out1)),
                                  max(comp_util.compute_max(out0), comp_util.compute_max(out1)))
                comp_util.log_output_stats(out0, failed, iter_result0.runner_name + ": " + out0_name, hist_range=hist_bin_range)
                comp_util.log_output_stats(out1, failed, iter_result1.runner_name + ": " + out1_name, hist_range=hist_bin_range)

                G_LOGGER.info("Error Metrics: {:}".format(out0_name))
                with G_LOGGER.indent():
                    def req_tol(mean_diff, median_diff, max_diff, elemwise_diff):
                        return {
                            "mean": mean_diff,
                            "median": median_diff,
                            "max": max_diff,
                            "elemwise": elemwise_diff,
                        }[per_out_err_stat]

                    G_LOGGER.info("Minimum Required Tolerance: {:} error | [abs={:.5g}] OR [rel={:.5g}]".format(
                                    per_out_err_stat,
                                    req_tol(mean_absdiff, median_absdiff, max_absdiff, max_elemwiseabs),
                                    req_tol(mean_reldiff, median_reldiff, max_reldiff, max_elemwiserel)))
                    comp_util.log_output_stats(absdiff, failed, "Absolute Difference")
                    comp_util.log_output_stats(reldiff, failed, "Relative Difference")

                # Finally show summary.
                if failed:
                    G_LOGGER.error("FAILED | Difference exceeds tolerance (rel={:}, abs={:})".format(per_out_rtol, per_out_atol))
                else:
                    G_LOGGER.finish("PASSED | Difference is within tolerance (rel={:}, abs={:})".format(per_out_rtol, per_out_atol))

                G_LOGGER.extra_verbose("Finished comparing: '{:}' (dtype={:}, shape={:}) [{:}] and '{:}' (dtype={:}, shape={:}) [{:}]"
                                .format(out0_name, out0.dtype, out0.shape, iter_result0.runner_name, out1_name, out1.dtype, out1.shape, iter_result1.runner_name))
                return OutputCompareResult(not failed, max_absdiff, max_reldiff, mean_absdiff, mean_reldiff, median_absdiff, median_reldiff)
                #
                # End: def check_outputs_match
                #

            output_status = OrderedDict() # OrderedDict[str, bool] Maps output names to whether they matched.

            if not check_shapes:
                G_LOGGER.info("Strict shape checking disabled. Will attempt to match output shapes before comparisons")


            def default_find_output_func(output_name, index, iter_result):
                found_name = util.find_in_dict(output_name, iter_result, index)
                if found_name is None:
                    return None
                elif found_name != output_name:
                    exact_match = util.find_in_dict(found_name, iter_result0)
                    if exact_match == found_name:
                        G_LOGGER.verbose("Will not compare {:} with {:}, since the former already has an exact match: {:}".format(
                                            found_name, output_name, exact_match))
                        return None # If the found output is being compared against another output already, skip this non-exact match
                    G_LOGGER.warning("Output names did not match exactly. Assuming {:} output: {:} "
                                    "corresponds to output: {:}".format(
                                        iter_result.runner_name, found_name, output_name))
                return [found_name]


            nonlocal find_output_func
            find_output_func = util.default(find_output_func, default_find_output_func)

            for index, (out0_name, output0) in enumerate(iter_result0.items()):
                out1_names = util.default(find_output_func(out0_name, index, iter_result1), [])

                if len(out1_names) > 1:
                    G_LOGGER.info("Will attempt to compare output: '{:}' [{:}] with multiple outputs: '{:}' [{:}]".format(
                                    out0_name, iter_result0.runner_name, list(out1_names), iter_result1.runner_name))

                for out1_name in out1_names:
                    if out1_name is None or out1_name not in iter_result1:
                        G_LOGGER.warning("For output: '{:}' [{:}], skipping corresponding output: '{:}' [{:}], "
                                         "since the output was not found".format(out0_name, iter_result0.runner_name,
                                                                                 out1_name, iter_result1.runner_name))
                        continue


                    def get_tol(tol_dict, default):
                        if isinstance(tol_dict, numbers.Number):
                            return tol_dict

                        if out0_name in tol_dict:
                            return tol_dict[out0_name]
                        elif "" in tol_dict:
                            return tol_dict[""]
                        return default


                    def get_error_stat():
                        if isinstance(check_error_stat, str):
                            return check_error_stat

                        if out0_name in check_error_stat:
                            return check_error_stat[out0_name]
                        elif "" in check_error_stat:
                            return  check_error_stat[""]
                        return default_error_stat


                    per_out_atol = get_tol(atol, default_atol)
                    per_out_rtol = get_tol(rtol, default_rtol)
                    per_out_err_stat = get_error_stat()

                    output1 = iter_result1[out1_name]
                    G_LOGGER.start("Comparing Output: '{:}' (dtype={:}, shape={:}) with '{:}' (dtype={:}, shape={:}) | "
                                   "Tolerance: [abs={:.5g}, rel={:.5g}] | Checking {:} error".format(
                                        out0_name, output0.dtype, output0.shape,
                                        out1_name, output1.dtype, output1.shape,
                                        per_out_atol, per_out_rtol, per_out_err_stat))
                    G_LOGGER.extra_verbose("Note: Comparing {:} vs. {:}".format(iter_result0.runner_name, iter_result1.runner_name))


                    with G_LOGGER.indent():
                        if check_shapes and output0.shape != output1.shape:
                            G_LOGGER.error("Will not compare outputs of different shapes. Note: Output shapes are "
                                           "{:} and {:}.".format(output0.shape, output1.shape))
                            G_LOGGER.error("Note: Use --no-strict-shape-checking or set check_shapes=False to "
                                           "attempt to compare values anyway.", mode=LogMode.ONCE)
                            outputs_match = False
                        else:
                            output1 = util.try_match_shape(output1, output0.shape)
                            output0 = output0.reshape(output1.shape)
                            outputs_match = check_outputs_match(output0, out0_name, output1, out1_name,
                                                                per_out_rtol=per_out_rtol, per_out_atol=per_out_atol,
                                                                per_out_err_stat=per_out_err_stat)

                        output_status[out0_name] = outputs_match
                        if fail_fast and not outputs_match:
                            return output_status


            mismatched_output_names = [name for name, matched in output_status.items() if not matched]
            if mismatched_output_names:
                G_LOGGER.error("FAILED | Mismatched outputs: {:}".format(mismatched_output_names))
            else:
                G_LOGGER.finish("PASSED | All outputs matched | Outputs: {:}".format(list(output_status.keys())))

            # This is useful for catching cases were Polygraphy does something wrong with the runner output buffers
            if not output_status and (bool(iter_result0.keys()) or bool(iter_result1.keys())):
                r0_name = iter_result0.runner_name
                r0_outs = list(iter_result0.keys())
                r1_name = iter_result1.runner_name
                r1_outs = list(iter_result1.keys())
                G_LOGGER.critical("All outputs were skipped, no common outputs found! Note:\n{:} outputs: "
                                  "{:}\n{:} outputs: {:}".format(r0_name, r0_outs, r1_name, r1_outs))

            return output_status
示例#13
0
    def validate(run_results, check_inf=None, check_nan=None, fail_fast=None):
        """
        Checks output validity.

        Args:
            run_results (Dict[str, List[IterationResult]]): The result of Comparator.run().
            check_inf (bool): Whether to fail on Infs. Defaults to False.
            check_nan (bool): Whether to fail on NaNs. Defaults to True.
            fail_fast (bool): Whether to fail after the first invalid value. Defaults to False.

        Returns:
            bool: True if all outputs were valid, False otherwise.
        """
        check_inf = util.default(check_inf, False)
        check_nan = util.default(check_nan, True)
        fail_fast = util.default(fail_fast, False)

        def is_finite(output):
            non_finite = np.logical_not(np.isfinite(output))
            if np.any(non_finite):
                G_LOGGER.error(
                    "Inf Detected | One or more non-finite values were encountered in this output"
                )
                G_LOGGER.info(
                    "Note: Use -vv or set logging verbosity to EXTRA_VERBOSE to display non-finite values",
                    mode=LogMode.ONCE,
                )
                G_LOGGER.extra_verbose(
                    "Note: non-finite values at:\n{:}".format(non_finite))
                G_LOGGER.extra_verbose("Note: non-finite values:\n{:}".format(
                    output[non_finite]))
                return False
            return True

        def is_not_nan(output):
            nans = np.isnan(output)
            if np.any(nans):
                G_LOGGER.error(
                    "NaN Detected | One or more NaNs were encountered in this output"
                )
                G_LOGGER.info(
                    "Note: Use -vv or set logging verbosity to EXTRA_VERBOSE to display locations of NaNs",
                    mode=LogMode.ONCE,
                )
                G_LOGGER.extra_verbose("Note: NaNs at:\n{:}".format(nans))
                return False
            return True

        def validate_output(runner_name, output_name, output):
            G_LOGGER.start(
                "{:35} | Validating output: {:} (check_inf={:}, check_nan={:})"
                .format(runner_name, output_name, check_inf, check_nan))
            with G_LOGGER.indent():
                comp_util.log_output_stats(output)

                output_valid = True
                if check_nan:
                    output_valid &= is_not_nan(output)
                if check_inf:
                    output_valid &= is_finite(output)

                if output_valid:
                    G_LOGGER.finish(
                        "PASSED | Output: {:} is valid".format(output_name))
                else:
                    G_LOGGER.error(
                        "FAILED | Errors detected in output: {:}".format(
                            output_name))
                return output_valid

        all_valid = True
        G_LOGGER.start("Output Validation | Runners: {:}".format(
            list(run_results.keys())))
        with G_LOGGER.indent():
            for runner_name, results in run_results:
                for result in results:
                    for output_name, output in result.items():
                        all_valid &= validate_output(runner_name, output_name,
                                                     output)
                        if fail_fast and not all_valid:
                            return False

            if all_valid:
                G_LOGGER.finish("PASSED | Output Validation")
            else:
                G_LOGGER.error("FAILED | Output Validation")

        return all_valid
示例#14
0
    def compare_accuracy(run_results,
                         fail_fast=False,
                         comparisons=None,
                         compare_func=None):
        """
        Args:
            run_results (RunResults): The result of Comparator.run()


            fail_fast (bool): Whether to exit after the first failure
            comparisons (List[Tuple[int, int]]):
                    Comparisons to perform, specified by runner indexes. For example, [(0, 1), (1, 2)]
                    would compare the first runner with the second, and the second with the third.
                    By default, this compares each result to the subsequent one.
            compare_func (Callable(IterationResult, IterationResult) -> OrderedDict[str, bool]):
                    A function that takes in two IterationResults, and returns a dictionary that maps output
                    names to a boolean (or anything convertible to a boolean) indicating whether outputs matched.
                    The order of arguments to this function is guaranteed to be the same as the ordering of the
                    tuples contained in `comparisons`.

        Returns:
            AccuracyResult:
                    A summary of the results of the comparisons. The order of the keys (i.e. runner pairs) is
                    guaranteed to be the same as the order of `comparisons`. For more details, see the AccuracyResult
                    docstring (e.g. help(AccuracyResult)).
        """
        def find_mismatched(match_dict):
            return [
                name for name, matched in match_dict.items()
                if not bool(matched)
            ]

        compare_func = util.default(compare_func, CompareFunc.simple())
        comparisons = util.default(comparisons,
                                   Comparator.default_comparisons(run_results))

        accuracy_result = AccuracyResult()
        for runner0_index, runner1_index in comparisons:
            (runner0_name, results0), (
                runner1_name, results1
            ) = run_results[runner0_index], run_results[runner1_index]

            G_LOGGER.start("Accuracy Comparison | {:} vs. {:}".format(
                runner0_name, runner1_name))
            with G_LOGGER.indent():
                runner_pair = (runner0_name, runner1_name)
                accuracy_result[runner_pair] = []

                num_iters = min(len(results0), len(results1))
                for iteration, (result0,
                                result1) in enumerate(zip(results0, results1)):
                    if num_iters > 1:
                        G_LOGGER.info("Iteration: {:}".format(iteration))
                    with contextlib.ExitStack() as stack:
                        if num_iters > 1:
                            stack.enter_context(G_LOGGER.indent())
                        iteration_match_dict = compare_func(result0, result1)
                        accuracy_result[runner_pair].append(
                            iteration_match_dict)

                        mismatched_outputs = find_mismatched(
                            iteration_match_dict)
                        if fail_fast and mismatched_outputs:
                            return accuracy_result

                G_LOGGER.extra_verbose(
                    "Finished comparing {:} with {:}".format(
                        runner0_name,
                        runner1_name,
                    ))

                passed, _, total = accuracy_result.stats(runner_pair)
                pass_rate = accuracy_result.percentage(runner_pair) * 100.0
                if num_iters > 1 or len(comparisons) > 1:
                    msg = "Accuracy Summary | {:} vs. {:} | Passed: {:}/{:} iterations | Pass Rate: {:}%".format(
                        runner0_name, runner1_name, passed, total, pass_rate)
                    if passed == total:
                        G_LOGGER.finish(msg)
                    else:
                        G_LOGGER.error(msg)
        return accuracy_result
示例#15
0
    def call_impl(self):
        """
        Returns:
            onnx.ModelProto: The new ONNX model with constants folded.
        """
        def run_const_fold_pass(model):
            graph = gs_from_onnx(model)
            del model

            try:
                graph.fold_constants(fold_shapes=self.fold_shapes,
                                     partitioning=self.partitioning)
            except TypeError as err:  # Using an old version of ONNX-GS
                if self.partitioning:
                    G_LOGGER.critical(
                        "This version of ONNX-GraphSurgeon may not support partitioning the graph.\n"
                        "Please upgrade to a newer version of ONNX-GraphSurgeon or disable partitioning.\n"
                        "Note: Error was:\n{:}".format(err))
                if self.fold_shapes:
                    G_LOGGER.critical(
                        "This version of ONNX-GraphSurgeon may not support folding shapes.\n"
                        "Please upgrade to a newer version of ONNX-GraphSurgeon or disable shape folding.\n"
                        "Note: Error was:\n{:}".format(err))

                graph.fold_constants()

            model = gs.export_onnx(graph.cleanup(), do_type_check=False)
            del graph

            if self.fold_shapes and self.do_shape_inference:
                model = infer_shapes(model)
            return model

        if not mod.has_mod(onnxrt):
            G_LOGGER.error(
                "ONNX-Runtime is not installed, so constant folding may be suboptimal or not work at all.\n"
                "Consider installing ONNX-Runtime: {:} -m pip install onnxruntime"
                .format(sys.executable))

        model = self.load()

        prefold_num_nodes = len(model.graph.node)
        postfold_num_nodes = -1
        index = 0

        while (prefold_num_nodes != postfold_num_nodes) and (
                self.num_passes is None or index < self.num_passes):
            prefold_num_nodes = onnx_util.get_num_nodes(model)

            G_LOGGER.start("Folding Constants | Pass {:}".format(index + 1))
            try:
                model = run_const_fold_pass(model)
            except Exception as err:
                if not self.error_ok:
                    raise
                G_LOGGER.warning(
                    "Constant folding pass failed. Skipping subsequent passes.\nNote: Error was:\n{:}"
                    .format(err))
                break
            else:
                postfold_num_nodes = onnx_util.get_num_nodes(model)
                index += 1

                G_LOGGER.finish(
                    "\tTotal Nodes | Original: {:5}, After Folding: {:5} | {:5} Nodes Folded"
                    .format(prefold_num_nodes, postfold_num_nodes,
                            prefold_num_nodes - postfold_num_nodes))

        return model
示例#16
0
        def compare_output(iter_result0, iter_result1):
            """
            Compare the outputs of two runners from a single iteration.

            This function will always iterate over the output names of the first IterationResult,
                and attempt to find corresponding output names in the second.
            If no corresponding output name is found, the output is skipped.
            If all output names are skipped, then this function raises an error.

            Args:
                iter_result0 (IterationResult): The result of the first runner.
                iter_result1 (IterationResult): The result of the second runner.

            Returns:
                OrderedDict[str, OutputCompareResult]:
                        The name of the outputs compared, derived from the first IterationResult,
                        and whether they matched. If an output name is not found, it is omitted from this dictionary.

            Raises:
                PolygraphyException: If all output names are skipped, and thus no outputs are compared.
            """
            def check_dict(dct, dict_name):
                if isinstance(dct, dict):
                    util.check_dict_contains(
                        dct,
                        set(iter_result0.keys()) | set(iter_result1.keys())
                        | {""},
                        check_missing=False,
                        dict_name=dict_name,
                    )

            check_dict(rtol, "the rtol dictionary")
            check_dict(atol, "the atol dictionary")
            check_dict(check_error_stat, "the check_error_stat dictionary")

            output_status = OrderedDict(
            )  # OrderedDict[str, bool] Maps output names to whether they matched.

            if not check_shapes:
                G_LOGGER.info(
                    "Strict shape checking disabled. Will attempt to match output shapes before comparisons"
                )

            def default_find_output_func(output_name, index, iter_result):
                found_name = util.find_in_dict(output_name, iter_result, index)
                if found_name is None:
                    return None
                elif found_name != output_name:
                    exact_match = util.find_in_dict(found_name, iter_result0)
                    if exact_match == found_name:
                        G_LOGGER.verbose(
                            "Will not compare {:} with {:}, since the former already has an exact match: {:}"
                            .format(found_name, output_name, exact_match))
                        return None  # If the found output is being compared against another output already, skip this non-exact match
                    G_LOGGER.warning(
                        "Output names did not match exactly. Assuming {:} output: {:} "
                        "corresponds to output: {:}".format(
                            iter_result.runner_name, found_name, output_name))
                return [found_name]

            nonlocal find_output_func
            find_output_func = util.default(find_output_func,
                                            default_find_output_func)

            for index, (out0_name, output0) in enumerate(iter_result0.items()):
                out1_names = util.default(
                    find_output_func(out0_name, index, iter_result1), [])

                if len(out1_names) > 1:
                    G_LOGGER.info(
                        "Will attempt to compare output: '{:}' [{:}] with multiple outputs: '{:}' [{:}]"
                        .format(out0_name, iter_result0.runner_name,
                                list(out1_names), iter_result1.runner_name))

                for out1_name in out1_names:
                    if out1_name is None or out1_name not in iter_result1:
                        G_LOGGER.warning(
                            "For output: '{:}' [{:}], skipping corresponding output: '{:}' [{:}], "
                            "since the output was not found".format(
                                out0_name, iter_result0.runner_name, out1_name,
                                iter_result1.runner_name))
                        continue

                    per_out_atol = util.value_or_from_dict(
                        atol, out0_name, default_atol)
                    per_out_rtol = util.value_or_from_dict(
                        rtol, out0_name, default_rtol)
                    per_out_err_stat = util.value_or_from_dict(
                        check_error_stat, out0_name, default_error_stat)

                    output1 = iter_result1[out1_name]
                    G_LOGGER.start(
                        "Comparing Output: '{:}' (dtype={:}, shape={:}) with '{:}' (dtype={:}, shape={:}) | "
                        "Tolerance: [abs={:.5g}, rel={:.5g}] | Checking {:} error"
                        .format(
                            out0_name,
                            output0.dtype,
                            output0.shape,
                            out1_name,
                            output1.dtype,
                            output1.shape,
                            per_out_atol,
                            per_out_rtol,
                            per_out_err_stat,
                        ))
                    G_LOGGER.extra_verbose(
                        "Note: Comparing {:} vs. {:}".format(
                            iter_result0.runner_name,
                            iter_result1.runner_name))

                    with G_LOGGER.indent():
                        if check_shapes and output0.shape != output1.shape:
                            G_LOGGER.error(
                                "Will not compare outputs of different shapes. Note: Output shapes are "
                                "{:} and {:}.".format(output0.shape,
                                                      output1.shape))
                            G_LOGGER.error(
                                "Note: Use --no-shape-check or set check_shapes=False to "
                                "attempt to compare values anyway.",
                                mode=LogMode.ONCE,
                            )
                            outputs_match = False
                        else:
                            output1 = util.try_match_shape(
                                output1, output0.shape)
                            output0 = output0.reshape(output1.shape)
                            outputs_match = check_outputs_match(
                                output0,
                                out0_name,
                                output1,
                                out1_name,
                                per_out_rtol=per_out_rtol,
                                per_out_atol=per_out_atol,
                                per_out_err_stat=per_out_err_stat,
                                runner0_name=iter_result0.runner_name,
                                runner1_name=iter_result1.runner_name,
                            )

                        output_status[out0_name] = outputs_match
                        if fail_fast and not outputs_match:
                            return output_status

            mismatched_output_names = [
                name for name, matched in output_status.items() if not matched
            ]
            if mismatched_output_names:
                G_LOGGER.error("FAILED | Mismatched outputs: {:}".format(
                    mismatched_output_names))
            else:
                G_LOGGER.finish(
                    "PASSED | All outputs matched | Outputs: {:}".format(
                        list(output_status.keys())))

            # This is useful for catching cases were Polygraphy does something wrong with the runner output buffers
            if not output_status and (bool(iter_result0.keys())
                                      or bool(iter_result1.keys())):
                r0_name = iter_result0.runner_name
                r0_outs = list(iter_result0.keys())
                r1_name = iter_result1.runner_name
                r1_outs = list(iter_result1.keys())
                G_LOGGER.critical(
                    "All outputs were skipped, no common outputs found! Note:\n{:} outputs: "
                    "{:}\n{:} outputs: {:}".format(r0_name, r0_outs, r1_name,
                                                   r1_outs))

            return output_status
示例#17
0
    def import_mod():
        from polygraphy import config
        from polygraphy.logger import G_LOGGER, LogMode

        def install_mod(raise_error=True):
            modname = name.split(".")[0]
            pkg = _MODULE_TO_PKG_NAME.get(modname, modname)
            extra_flags = _MODULE_EXTRA_FLAGS.get(modname, [])

            if version == LATEST_VERSION:
                extra_flags.append("--upgrade")
            elif version is not None:
                pkg += version

            cmd = config.INSTALL_CMD + [pkg] + extra_flags
            G_LOGGER.info(
                "{:} is required, but not installed. Attempting to install now.\n"
                "Running: {:}".format(pkg, " ".join(cmd)))
            status = sp.run(cmd)
            if status.returncode != 0:
                G_LOGGER.log(
                    "Could not automatically install required package: {:}. Please install it manually."
                    .format(pkg),
                    severity=G_LOGGER.CRITICAL
                    if raise_error else G_LOGGER.WARNING,
                )

            mod = importlib.import_module(name)
            return mod

        mod = None
        try:
            mod = importlib.import_module(name)
        except:
            if config.AUTOINSTALL_DEPS:
                mod = install_mod()
            else:
                G_LOGGER.error(
                    "Module: {:} is required but could not be imported.\n"
                    "You can try setting POLYGRAPHY_AUTOINSTALL_DEPS=1 in your environment variables "
                    "to allow Polygraphy to automatically install missing packages.\n"
                    "Note that this may cause existing packages to be overwritten - hence, it may be "
                    "desirable to use a Python virtual environment or container. "
                    .format(name))
                raise

        # Auto-upgrade if necessary
        if version is not None and hasattr(
                mod,
                "__version__") and not _version_ok(mod.__version__, version):
            if config.AUTOINSTALL_DEPS:
                G_LOGGER.info(
                    "Note: Package: '{name}' version {cur_ver} is installed, but version {rec_ver} is recommended.\n"
                    "Upgrading...".format(name=name,
                                          cur_ver=mod.__version__,
                                          rec_ver=version))
                mod = install_mod(
                    raise_error=False
                )  # We can try to use the other version if install fails.
            elif version != LATEST_VERSION:
                G_LOGGER.warning(
                    "Package: '{name}' version {cur_ver} is installed, but version {rec_ver} is recommended.\n"
                    "Consider installing the recommended version or setting POLYGRAPHY_AUTOINSTALL_DEPS=1 in your "
                    "environment variables to do so automatically. ".format(
                        name=name, cur_ver=mod.__version__, rec_ver=version),
                    mode=LogMode.ONCE,
                )

        if log:
            G_LOGGER.module_info(mod)

        return mod
示例#18
0
 def _check_dtype_matches(self, host_buffer):
     if host_buffer.dtype != self.dtype:
         G_LOGGER.error(
             "Host buffer type: {:} does not match the type of this device buffer: {:}. "
             "This may cause CUDA errors!".format(host_buffer.dtype,
                                                  self.dtype))