Esempio n. 1
0
def test_context(mx_cpu):
    mx_context = Mock()
    mx_cpu.return_value = mx_context

    default_context = get_default_context()

    assert default_context == mx_context
Esempio n. 2
0
    def default_input_fn(self, input_data, content_type):
        """Take request data and deserialize it into an MXNet NDArray for prediction.
        When an InvokeEndpoint operation is made against an Endpoint running SageMaker model server,
        the model server receives two pieces of information:

            - The request's content type, for example "application/json"
            - The request data

        The ``input_fn`` is responsible for preprocessing request data before prediction.

        Args:
            input_data (obj): the request data
            content_type (str): the request's content type

        Returns:
            mxnet.nd.array: an MXNet NDArray

        Raises:
            sagemaker_inference.errors.UnsupportedFormatError: if an unsupported content type is used.

        """
        if content_type in self.VALID_CONTENT_TYPES:
            np_array = decoder.decode(input_data, content_type)
            return mx.nd.array(np_array).as_in_context(get_default_context())
        else:
            raise errors.UnsupportedFormatError(content_type)
Esempio n. 3
0
    def default_input_fn(self, input_data, content_type, model=None):
        """Take request data and deserialize it into an object for prediction.
        When an InvokeEndpoint operation is made against an Endpoint running SageMaker model server,
        the model server receives two pieces of information:

            - The request's content type, for example "application/json"
            - The request data

        The ``input_fn`` is responsible for preprocessing request data before prediction.

        Args:
            input_data (obj): the request data
            content_type (str): the request's content type
            model (obj): an MXNet model

        Returns:
            mxnet.io.NDArrayIter: data ready for prediction.

        Raises:
            sagemaker_inference.errors.UnsupportedFormatError: if an unsupported content type is used.

        """
        if content_type not in self.VALID_CONTENT_TYPES:
            raise errors.UnsupportedFormatError(content_type)

        np_array = decoder.decode(input_data, content_type)
        ndarray = mx.nd.array(np_array).as_in_context(get_default_context())

        # We require model to only have one input
        [data_shape] = model.data_shapes

        # Reshape flattened CSV as specified by the model
        if content_type == content_types.CSV:
            _, target_shape = data_shape
            ndarray = ndarray.reshape(target_shape)

        # Batch size is first dimension of model input
        model_batch_size = data_shape[1][0]
        pad_rows = max(0, model_batch_size - ndarray.shape[0])

        # If ndarray has fewer rows than model_batch_size, then pad it with zeros.
        if pad_rows:
            padding_shape = tuple([pad_rows] + list(ndarray.shape[1:]))
            padding = mx.ndarray.zeros(shape=padding_shape)
            ndarray = mx.ndarray.concat(ndarray, padding, dim=0)

        model_input = mx.io.NDArrayIter(ndarray,
                                        batch_size=model_batch_size,
                                        last_batch_handle='pad')

        if pad_rows:
            # Update the getpad method on the model_input data iterator to return the amount of
            # padding. MXNet will ignore the last getpad() rows during Module predict.
            def _getpad():
                return pad_rows

            model_input.getpad = _getpad

        return model_input
Esempio n. 4
0
    def default_model_fn(self, model_dir, preferred_batch_size=1):
        """Function responsible for loading the model. This implementation is designed to work with
        the default save function provided for MXNet training.

        Args:
            model_dir (str): The directory where model files are stored
            preferred_batch_size (int): preferred batch size of the model's data shape.
                Defaults to 1.

        Returns:
            mxnet.mod.Module: the loaded model.

        """
        for f in DEFAULT_MODEL_FILENAMES.values():
            path = os.path.join(model_dir, f)
            if not os.path.exists(path):
                raise ValueError(
                    'Failed to load model with default model_fn: missing file {}.'
                    'Expected files: {}'.format(f, [
                        file_name
                        for _, file_name in DEFAULT_MODEL_FILENAMES.items()
                    ]))

        shapes_file = os.path.join(model_dir,
                                   DEFAULT_MODEL_FILENAMES['shapes'])
        preferred_batch_size = preferred_batch_size or os.environ.get(
            PREFERRED_BATCH_SIZE_PARAM)
        data_names, data_shapes = read_data_shapes(shapes_file,
                                                   preferred_batch_size)

        sym, args, aux = mx.model.load_checkpoint(
            os.path.join(model_dir, DEFAULT_MODEL_NAME), 0)

        ctx = mx.eia() if os.environ.get(
            INFERENCE_ACCELERATOR_PRESENT_ENV
        ) == 'true' else get_default_context()

        mod = mx.mod.Module(symbol=sym,
                            context=ctx,
                            data_names=data_names,
                            label_names=None)
        mod.bind(for_training=False, data_shapes=data_shapes)
        mod.set_params(args, aux, allow_missing=True)

        return mod