예제 #1
0
 def check_dict(dct, dict_name):
     if isinstance(dct, dict):
         util.check_dict_contains(
             dct,
             set(iter_result0.keys()) | set(iter_result1.keys())
             | {""},
             check_missing=False,
             dict_name=dict_name,
         )
예제 #2
0
    def infer(self, feed_dict, check_inputs=True, *args, **kwargs):
        """
        Runs inference using the provided feed_dict.

        NOTE: Some runners may accept additional parameters in infer().
        For details on these, see the documentation for their `infer_impl()` methods.

        Args:
            feed_dict (OrderedDict[str, numpy.ndarray]):
                    A mapping of input tensor names to corresponding input NumPy arrays.

            check_inputs (bool):
                    Whether to check that the provided ``feed_dict`` includes the expected inputs
                    with the expected data types and shapes.
                    Disabling this may improve performance.
                    Defaults to True.

        Returns:
            OrderedDict[str, numpy.ndarray]:
                    A mapping of output tensor names to their corresponding NumPy arrays.

                    IMPORTANT: Runners may reuse these output buffers. Thus, if you need to save
                    outputs from multiple inferences, you should make a copy with ``copy.deepcopy(outputs)``.
        """
        if not self.is_active:
            G_LOGGER.critical(
                "{:35} | Must be activated prior to calling infer()".format(
                    self.name))

        if check_inputs:
            input_metadata = self.get_input_metadata()
            G_LOGGER.verbose(
                "Runner input metadata is: {:}".format(input_metadata))

            util.check_dict_contains(feed_dict,
                                     input_metadata.keys(),
                                     dict_name="feed_dict",
                                     log_func=G_LOGGER.critical)

            for name, inp in feed_dict.items():
                meta = input_metadata[name]
                if not np.issubdtype(inp.dtype, meta.dtype):
                    G_LOGGER.critical(
                        "Input tensor: {:} | Received unexpected dtype: {:}.\n"
                        "Note: Expected type: {:}".format(
                            name, inp.dtype, meta.dtype))

                if not util.is_valid_shape_override(inp.shape, meta.shape):
                    G_LOGGER.critical(
                        "Input tensor: {:} | Received incompatible shape: {:}.\n"
                        "Note: Expected a shape compatible with: {:}".format(
                            name, inp.shape, meta.shape))

        return self.infer_impl(feed_dict, *args, **kwargs)
예제 #3
0
        def get_batch(self, names):
            if not self.is_active:
                G_LOGGER.error(
                    "Calibrator must be activated prior to use. Please use a context manager. "
                    "For example:\nwith calibrator:\n\t# Use calibrator here")
                return None

            try:
                buffers = next(self.data_loader_iter)
            except StopIteration:
                if not self.num_batches:
                    G_LOGGER.error(
                        "Calibrator data loader provided no data.\nPossible reasons for this include:\n(1) data loader "
                        "has no data to provide\n(2) data loader was a generator, and the calibrator is being "
                        "used multiple times (generators cannot be rewound)")
                return None
            else:
                self.num_batches += 1

            if not util.check_dict_contains(buffers,
                                            names,
                                            dict_name="calibration data",
                                            log_func=G_LOGGER.error):
                return None

            ptrs = []
            for name in names:
                buf = buffers[name]

                if isinstance(buf, cuda.DeviceView):
                    ptrs.append(buf.ptr)
                elif isinstance(buf, np.ndarray):
                    if name not in self.device_buffers:
                        self.device_buffers[name] = cuda.DeviceArray(
                            shape=buf.shape, dtype=buf.dtype)
                        G_LOGGER.verbose("Allocated: {:}".format(
                            self.device_buffers[name]))

                    ptrs.append(self.device_buffers[name].copy_from(buf).ptr)
                elif isinstance(buf, int):
                    ptrs.append(buf)
                else:
                    G_LOGGER.error(
                        "Calibration data loader provided an unrecognized type: {:} for input: {:}.\n"
                        "Please provide either a NumPy array, Polygraphy DeviceView, or GPU pointer. "
                        .format(type(buf).__name__, name))
                    return None

            return ptrs
예제 #4
0
    def __getitem__(self, index):
        """
        Generates random input data.

        May update the DataLoader's `input_metadata` attribute.

        Args:
            index (int):
                    Since this class behaves like an iterable, it takes an index parameter.
                    Generated data is guaranteed to be the same for the same index.

        Returns:
            OrderedDict[str, numpy.ndarray]: A mapping of input names to input numpy buffers.
        """
        if index >= self.iterations:
            raise IndexError()

        G_LOGGER.verbose(
            "Generating data using numpy seed: {:}".format(self.seed + index))
        rng = np.random.RandomState(self.seed + index)

        def get_static_shape(name, shape):
            static_shape = shape
            if util.is_shape_dynamic(shape):
                static_shape = util.override_dynamic_shape(shape)
                if static_shape != shape:
                    if not util.is_valid_shape_override(static_shape, shape):
                        G_LOGGER.critical(
                            "Input tensor: {:} | Cannot override original shape: {:} to {:}"
                            .format(name, shape, static_shape))
                    G_LOGGER.warning(
                        "Input tensor: {:} | Will generate data of shape: {:}.\n"
                        "If this is incorrect, please set input_metadata "
                        "or provide a custom data loader.".format(
                            name, static_shape),
                        mode=LogMode.ONCE,
                    )
            return static_shape

        # Whether the user provided the values for a shape tensor input,
        # rather than the shape of the input.
        # If the shape is 1D, and has a value equal to the rank of the provided default shape, it is
        # likely to be a shape tensor, and so its value, not shape, should be overriden.
        def is_shape_tensor(name, dtype):
            if name not in self.input_metadata or name not in self.user_input_metadata:
                return False

            _, shape = self.input_metadata[name]
            is_shape = np.issubdtype(dtype, np.integer) and (
                not util.is_shape_dynamic(shape)) and (len(shape) == 1)

            user_shape = self.user_input_metadata[name].shape
            is_shape &= len(user_shape) == shape[0]
            is_shape &= not util.is_shape_dynamic(
                user_shape)  # Shape of shape cannot be dynamic.
            return is_shape

        def generate_buffer(name, dtype, shape):
            if is_shape_tensor(name, dtype):
                buffer = np.array(shape, dtype=dtype)
                G_LOGGER.info(
                    "Assuming {:} is a shape tensor. Setting input values to: {:}. If this is not correct, "
                    "please set it correctly in 'input_metadata' or by providing --input-shapes"
                    .format(name, buffer),
                    mode=LogMode.ONCE,
                )
            elif np.issubdtype(dtype, np.integer) or np.issubdtype(
                    dtype, np.bool_):
                imin, imax = self._get_range(name,
                                             cast_type=int if np.issubdtype(
                                                 dtype, np.integer) else bool)
                G_LOGGER.verbose(
                    "Input tensor: {:} | Generating input data in range: [{:}, {:}]"
                    .format(name, imin, imax),
                    mode=LogMode.ONCE,
                )
                # high is 1 greater than the max int drawn.
                buffer = rng.randint(low=imin,
                                     high=imax + 1,
                                     size=shape,
                                     dtype=dtype)
            else:
                fmin, fmax = self._get_range(name, cast_type=float)
                G_LOGGER.verbose(
                    "Input tensor: {:} | Generating input data in range: [{:}, {:}]"
                    .format(name, fmin, fmax),
                    mode=LogMode.ONCE,
                )
                buffer = (rng.random_sample(size=shape) * (fmax - fmin) +
                          fmin).astype(dtype)

            buffer = np.array(
                buffer
            )  # To handle scalars, since the above functions return a float if shape is ().
            return buffer

        if self.input_metadata is None and self.user_input_metadata is not None:
            self.input_metadata = self.user_input_metadata

        buffers = OrderedDict()
        for name, (dtype, shape) in self.input_metadata.items():
            if name in self.user_input_metadata:
                user_dtype, user_shape = self.user_input_metadata[name]

                dtype = util.default(user_dtype, dtype)
                is_valid_shape_override = user_shape is not None and util.is_valid_shape_override(
                    user_shape, shape)

                if util.is_shape_dynamic(user_shape):
                    G_LOGGER.warning(
                        "Input tensor: {:} | Provided input shape: {:} is dynamic.\n"
                        "Dynamic shapes cannot be used to generate inference data. "
                        "Will use default shape instead.\n"
                        "To avoid this, please provide a fixed shape to the data loader. "
                        .format(name, user_shape))
                elif not is_valid_shape_override and not is_shape_tensor(
                        name, dtype):
                    G_LOGGER.warning(
                        "Input tensor: {:} | Cannot use provided custom shape: {:} "
                        "to override: {:}. Will use default shape instead.".
                        format(name, user_shape, shape),
                        mode=LogMode.ONCE,
                    )
                else:
                    shape = util.default(user_shape, shape)

            static_shape = get_static_shape(name, shape)
            buffers[name] = generate_buffer(name, dtype, shape=static_shape)

        # Warn about unused metadata
        for name in self.user_input_metadata.keys():
            if name not in self.input_metadata:
                msg = "Input tensor: {:} | Metadata was provided, but the input does not exist in one or more runners.".format(
                    name)
                close_match = util.find_in_dict(name, self.input_metadata)
                if close_match:
                    msg += "\nMaybe you meant to set: {:}".format(close_match)
                G_LOGGER.warning(msg)

        # Warn about unused val_range
        if not isinstance(self.val_range, tuple):
            util.check_dict_contains(self.val_range,
                                     list(self.input_metadata.keys()) + [""],
                                     check_missing=False,
                                     dict_name="val_range")

        return buffers