Example #1
0
        def is_shape_tensor(name, dtype):
            if name not in self.input_metadata or name not in self.user_input_metadata:
                return False

            _, shape = self.input_metadata[name]
            is_shape = np.issubdtype(dtype, np.integer) and (not util.is_shape_dynamic(shape)) and (len(shape) == 1)

            user_shape = self.user_input_metadata[name].shape
            is_shape &= len(user_shape) == shape[0]
            is_shape &= not util.is_shape_dynamic(user_shape)  # Shape of shape cannot be dynamic.
            return is_shape
Example #2
0
    def _set_shapes_from_feed_dict(self, feed_dict):
        """
        Sets context shapes according to the provided feed_dict.

        Note that ``infer()`` will call this function automatically, and hence
        you should only use it if you plan to use this runner's context manually.

        Args:
            feed_dict (OrderedDict[str, numpy.ndarray]):
                    A mapping of input tensor names to corresponding input NumPy arrays.

        Returns:
            Tuple[int, int]: The start and end binding indices of the modified bindings.
        """
        def is_dynamic_shape_input(binding):
            try:
                self.context.engine.get_profile_shape_input(0, binding)
                return True
            except RuntimeError:
                return False

        start_binding, end_binding = trt_util.get_active_profile_bindings(
            self.context)
        for name, inp in feed_dict.items():
            binding = start_binding + self.context.engine[name]
            # Only set shapes if required.
            # get_shape/get_binding_shape will return what a shape input/data input is currently set to.
            if is_dynamic_shape_input(binding):  # For input shape tensors
                if isinstance(inp, cuda.DeviceView):
                    G_LOGGER.critical(
                        "A DeviceView was provided for input: {:}, but since this is a "
                        "shape tensor, it must reside in host memory. "
                        "Please use a NumPy array instead. ".format(name))

                if tuple(self.context.get_shape(binding)) != tuple(inp):
                    G_LOGGER.verbose(
                        "Setting shape binding: {:} (index: {:}) to: {:}".
                        format(name, binding, inp))
                    self.context.set_shape_input(binding, inp)

            elif util.is_shape_dynamic(
                    self.context.engine.get_binding_shape(binding)):
                shape = inp.shape
                if tuple(self.context.get_binding_shape(binding)) != tuple(
                        shape):
                    G_LOGGER.verbose(
                        "Setting binding: {:} (index: {:}) to shape: {:}".
                        format(name, binding, shape))
                    self.context.set_binding_shape(binding, shape)

        if not self.context.all_binding_shapes_specified:
            G_LOGGER.critical("Some input shapes were not specified.\n"
                              "Note: Network inputs are: {:}".format(
                                  self.get_input_metadata()))
        if not self.context.all_shape_inputs_specified:
            G_LOGGER.critical("Some shape inputs were not specified.\n"
                              "Note: Network inputs are: {:}".format(
                                  self.get_input_metadata()))

        return start_binding, end_binding
Example #3
0
 def get_static_shape(name, shape):
     static_shape = shape
     if util.is_shape_dynamic(shape):
         static_shape = util.override_dynamic_shape(shape)
         if static_shape != shape and name not in self.user_input_metadata:
             if not util.is_valid_shape_override(static_shape, shape):
                 G_LOGGER.critical("Input tensor: {:} | Cannot override original shape: {:} to {:}".format(name, shape, static_shape))
             G_LOGGER.warning("Input tensor: {:} | Will generate data of shape: {:}.\n"
                              "If this is incorrect, please set input_metadata "
                              "or provide a custom data loader.".format(name, static_shape), mode=LogMode.ONCE)
     return static_shape
Example #4
0
    def __getitem__(self, index):
        """
        Generates random input data.

        May update the DataLoader's `input_metadata` attribute.

        Args:
            index (int):
                    Since this class behaves like an iterable, it takes an index parameter.
                    Generated data is guaranteed to be the same for the same index.

        Returns:
            OrderedDict[str, numpy.ndarray]: A mapping of input names to input numpy buffers.
        """
        if index >= self.iterations:
            raise IndexError()

        G_LOGGER.verbose(
            "Generating data using numpy seed: {:}".format(self.seed + index))
        rng = np.random.RandomState(self.seed + index)

        def get_static_shape(name, shape):
            static_shape = shape
            if util.is_shape_dynamic(shape):
                static_shape = util.override_dynamic_shape(shape)
                if static_shape != shape:
                    if not util.is_valid_shape_override(static_shape, shape):
                        G_LOGGER.critical(
                            "Input tensor: {:} | Cannot override original shape: {:} to {:}"
                            .format(name, shape, static_shape))
                    G_LOGGER.warning(
                        "Input tensor: {:} | Will generate data of shape: {:}.\n"
                        "If this is incorrect, please set input_metadata "
                        "or provide a custom data loader.".format(
                            name, static_shape),
                        mode=LogMode.ONCE,
                    )
            return static_shape

        # Whether the user provided the values for a shape tensor input,
        # rather than the shape of the input.
        # If the shape is 1D, and has a value equal to the rank of the provided default shape, it is
        # likely to be a shape tensor, and so its value, not shape, should be overriden.
        def is_shape_tensor(name, dtype):
            if name not in self.input_metadata or name not in self.user_input_metadata:
                return False

            _, shape = self.input_metadata[name]
            is_shape = np.issubdtype(dtype, np.integer) and (
                not util.is_shape_dynamic(shape)) and (len(shape) == 1)

            user_shape = self.user_input_metadata[name].shape
            is_shape &= len(user_shape) == shape[0]
            is_shape &= not util.is_shape_dynamic(
                user_shape)  # Shape of shape cannot be dynamic.
            return is_shape

        def generate_buffer(name, dtype, shape):
            if is_shape_tensor(name, dtype):
                buffer = np.array(shape, dtype=dtype)
                G_LOGGER.info(
                    "Assuming {:} is a shape tensor. Setting input values to: {:}. If this is not correct, "
                    "please set it correctly in 'input_metadata' or by providing --input-shapes"
                    .format(name, buffer),
                    mode=LogMode.ONCE,
                )
            elif np.issubdtype(dtype, np.integer) or np.issubdtype(
                    dtype, np.bool_):
                imin, imax = self._get_range(name,
                                             cast_type=int if np.issubdtype(
                                                 dtype, np.integer) else bool)
                G_LOGGER.verbose(
                    "Input tensor: {:} | Generating input data in range: [{:}, {:}]"
                    .format(name, imin, imax),
                    mode=LogMode.ONCE,
                )
                # high is 1 greater than the max int drawn.
                buffer = rng.randint(low=imin,
                                     high=imax + 1,
                                     size=shape,
                                     dtype=dtype)
            else:
                fmin, fmax = self._get_range(name, cast_type=float)
                G_LOGGER.verbose(
                    "Input tensor: {:} | Generating input data in range: [{:}, {:}]"
                    .format(name, fmin, fmax),
                    mode=LogMode.ONCE,
                )
                buffer = (rng.random_sample(size=shape) * (fmax - fmin) +
                          fmin).astype(dtype)

            buffer = np.array(
                buffer
            )  # To handle scalars, since the above functions return a float if shape is ().
            return buffer

        if self.input_metadata is None and self.user_input_metadata is not None:
            self.input_metadata = self.user_input_metadata

        buffers = OrderedDict()
        for name, (dtype, shape) in self.input_metadata.items():
            if name in self.user_input_metadata:
                user_dtype, user_shape = self.user_input_metadata[name]

                dtype = util.default(user_dtype, dtype)
                is_valid_shape_override = user_shape is not None and util.is_valid_shape_override(
                    user_shape, shape)

                if util.is_shape_dynamic(user_shape):
                    G_LOGGER.warning(
                        "Input tensor: {:} | Provided input shape: {:} is dynamic.\n"
                        "Dynamic shapes cannot be used to generate inference data. "
                        "Will use default shape instead.\n"
                        "To avoid this, please provide a fixed shape to the data loader. "
                        .format(name, user_shape))
                elif not is_valid_shape_override and not is_shape_tensor(
                        name, dtype):
                    G_LOGGER.warning(
                        "Input tensor: {:} | Cannot use provided custom shape: {:} "
                        "to override: {:}. Will use default shape instead.".
                        format(name, user_shape, shape),
                        mode=LogMode.ONCE,
                    )
                else:
                    shape = util.default(user_shape, shape)

            static_shape = get_static_shape(name, shape)
            buffers[name] = generate_buffer(name, dtype, shape=static_shape)

        # Warn about unused metadata
        for name in self.user_input_metadata.keys():
            if name not in self.input_metadata:
                msg = "Input tensor: {:} | Metadata was provided, but the input does not exist in one or more runners.".format(
                    name)
                close_match = util.find_in_dict(name, self.input_metadata)
                if close_match:
                    msg += "\nMaybe you meant to set: {:}".format(close_match)
                G_LOGGER.warning(msg)

        # Warn about unused val_range
        if not isinstance(self.val_range, tuple):
            util.check_dict_contains(self.val_range,
                                     list(self.input_metadata.keys()) + [""],
                                     check_missing=False,
                                     dict_name="val_range")

        return buffers