예제 #1
0
    def prepare(cls, model, device='CPU', **kwargs):
        """For running end to end model(used for onnx test backend)

        Parameters
        ----------
        model  : onnx ModelProto object
            loaded onnx graph
        device : 'CPU'
            specifying device to run test on
        kwargs :
            other arguments

        Returns
        -------
        GluonBackendRep : object
            Returns object of GluonBackendRep class which will be in turn
            used to run inference on the input model and return the result for comparison.
        """
        graph = GraphProto()
        if device == 'CPU':
            ctx = mx.cpu()
        else:
            raise NotImplementedError("ONNX tests are run only for CPU context.")

        net = graph.graph_to_gluon(model.graph, ctx)
        return GluonBackendRep(net, device)
예제 #2
0
    def prepare(cls, model, device='CPU', **kwargs):
        """For running end to end model(used for onnx test backend)

        Parameters
        ----------
        model  : onnx ModelProto object
            loaded onnx graph
        device : 'CPU'
            specifying device to run test on
        kwargs :
            other arguments

        Returns
        -------
        MXNetBackendRep : object
            Returns object of MXNetBackendRep class which will be in turn
            used to run inference on the input model and return the result for comparison.
        """

        graph = GraphProto()
        metadata = graph.get_graph_metadata(model.graph)
        input_data = metadata['input_tensor_data']
        input_shape = [data[1] for data in input_data]
        sym, arg_params, aux_params = MXNetBackend.perform_import_export(model.graph, input_shape)
        return MXNetBackendRep(sym, arg_params, aux_params, device)
예제 #3
0
    def prepare(cls, model, device='CPU', **kwargs):
        """For running end to end model(used for onnx test backend)

        Parameters
        ----------
        model  : onnx ModelProto object
            loaded onnx graph
        device : 'CPU'
            specifying device to run test on
        kwargs :
            other arguments

        Returns
        -------
        MXNetBackendRep : object
            Returns object of MXNetBackendRep class which will be in turn
            used to run inference on the input model and return the result for comparison.
        """

        graph = GraphProto()
        metadata = graph.get_graph_metadata(model.graph)
        input_data = metadata['input_tensor_data']
        input_shape = [data[1] for data in input_data]
        sym, arg_params, aux_params = MXNetBackend.perform_import_export(model.graph, input_shape)
        return MXNetBackendRep(sym, arg_params, aux_params, device)
예제 #4
0
    def prepare(cls, model, device='CPU', **kwargs):
        """For running end to end model(used for onnx test backend)

        Parameters
        ----------
        model  : onnx ModelProto object
            loaded onnx graph
        device : 'CPU'
            specifying device to run test on
        kwargs :
            other arguments

        Returns
        -------
        GluonBackendRep : object
            Returns object of GluonBackendRep class which will be in turn
            used to run inference on the input model and return the result for comparison.
        """
        graph = GraphProto()
        if device == 'CPU':
            ctx = mx.cpu()
        else:
            raise NotImplementedError(
                "ONNX tests are run only for CPU context.")

        net = graph.graph_to_gluon(model.graph, ctx)
        return GluonBackendRep(net, device)
예제 #5
0
    def prepare(cls, model, device='CPU', **kwargs):
        """For running end to end model(used for onnx test backend)

        Parameters
        ----------
        model  : onnx ModelProto object
            loaded onnx graph
        device : 'CPU'
            specifying device to run test on
        kwargs :
            other arguments

        Returns
        -------
        MXNetBackendRep : object
            Returns object of MXNetBackendRep class which will be in turn
            used to run inference on the input model and return the result for comparison.
        """
        backend = kwargs.get('backend', cls.backend)
        operation = kwargs.get('operation', cls.operation)

        graph = GraphProto()
        if device == 'CPU':
            ctx = mx.cpu()
        else:
            raise NotImplementedError(
                "ONNX tests are run only for CPU context.")

        # determine opset version model uses
        model_opset_version = max([x.version for x in model.opset_import])

        if backend == 'mxnet':
            sym, arg_params, aux_params = graph.from_onnx(
                model.graph, model_opset_version)
            if operation == 'export':
                metadata = graph.get_graph_metadata(model.graph)
                input_data = metadata['input_tensor_data']
                input_shape = [data[1] for data in input_data]
                sym, arg_params, aux_params = MXNetBackend.perform_import_export(
                    sym, arg_params, aux_params, input_shape)

            return MXNetBackendRep(sym, arg_params, aux_params, device)
        elif backend == 'gluon':
            if operation == 'import':
                net = graph.graph_to_gluon(model.graph, ctx,
                                           model_opset_version)
                return GluonBackendRep(net, device)
            elif operation == 'export':
                raise NotImplementedError(
                    "Gluon->ONNX export not implemented.")
예제 #6
0
    def perform_import_export(sym, arg_params, aux_params, input_shape):
        """ Import ONNX model to mxnet model and then export to ONNX model
            and then import it back to mxnet for verifying the result"""
        graph = GraphProto()

        params = {}
        params.update(arg_params)
        params.update(aux_params)
        # exporting to onnx graph proto format
        converter = MXNetGraph()
        graph_proto = converter.create_onnx_graph_proto(sym, params, in_shape=input_shape,
                                                        in_type=mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype('float32')])

        # importing back to MXNET for verifying result.
        sym, arg_params, aux_params = graph.from_onnx(graph_proto)

        return sym, arg_params, aux_params
예제 #7
0
    def perform_import_export(graph_proto, input_shape):
        """ Import ONNX model to mxnet model and then export to ONNX model
            and then import it back to mxnet for verifying the result"""
        graph = GraphProto()

        sym, arg_params, aux_params = graph.from_onnx(graph_proto)

        params = {}
        params.update(arg_params)
        params.update(aux_params)
        # exporting to onnx graph proto format
        converter = MXNetGraph()
        graph_proto = converter.create_onnx_graph_proto(sym, params, in_shape=input_shape, in_type=mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype('float32')])

        # importing back to MXNET for verifying result.
        sym, arg_params, aux_params = graph.from_onnx(graph_proto)

        return sym, arg_params, aux_params
예제 #8
0
    def prepare(cls, model, device='CPU', **kwargs):
        """For running end to end model(used for onnx test backend)

        Parameters
        ----------
        model  : onnx ModelProto object
            loaded onnx graph
        device : 'CPU'
            specifying device to run test on
        kwargs :
            other arguments

        Returns
        -------
        MXNetBackendRep : object
            Returns object of MXNetBackendRep class which will be in turn
            used to run inference on the input model and return the result for comparison.
        """
        backend = kwargs.get('backend', cls.backend)
        operation = kwargs.get('operation', cls.operation)

        graph = GraphProto()
        if device == 'CPU':
            ctx = mx.cpu()
        else:
            raise NotImplementedError("ONNX tests are run only for CPU context.")

        if backend == 'mxnet':
            sym, arg_params, aux_params = graph.from_onnx(model.graph)
            if operation == 'export':
                metadata = graph.get_graph_metadata(model.graph)
                input_data = metadata['input_tensor_data']
                input_shape = [data[1] for data in input_data]
                sym, arg_params, aux_params = MXNetBackend.perform_import_export(sym, arg_params, aux_params,
                                                                                 input_shape)

            return MXNetBackendRep(sym, arg_params, aux_params, device)
        elif backend == 'gluon':
            if operation == 'import':
                net = graph.graph_to_gluon(model.graph, ctx)
                return GluonBackendRep(net, device)
            elif operation == 'export':
                raise NotImplementedError("Gluon->ONNX export not implemented.")
예제 #9
0
    def prepare(cls, model, device='CPU', **kwargs):
        """For running end to end model(used for onnx test backend)

        Parameters
        ----------
        model  : onnx ModelProto object
            loaded onnx graph
        device : 'CPU'
            specifying device to run test on
        kwargs :
            other arguments

        Returns
        -------
        MXNetBackendRep : object
            Returns object of MXNetBackendRep class which will be in turn
            used to run inference on the input model and return the result for comparison.
        """
        graph = GraphProto()
        sym, arg_params, aux_params = graph.from_onnx(model.graph)
        return MXNetBackendRep(sym, arg_params, aux_params, device)
예제 #10
0
    def prepare(cls, model, device='CPU', **kwargs):
        """For running end to end model(used for onnx test backend)

        Parameters
        ----------
        model  : onnx ModelProto object
            loaded onnx graph
        device : 'CPU'
            specifying device to run test on
        kwargs :
            other arguments

        Returns
        -------
        MXNetBackendRep : object
            Returns object of MXNetBackendRep class which will be in turn
            used to run inference on the input model and return the result for comparison.
        """
        graph = GraphProto()
        sym, arg_params, aux_params = graph.from_onnx(model.graph)
        return MXNetBackendRep(sym, arg_params, aux_params, device)
    def prepare(cls, model, device='CPU', **kwargs):
        """For running end to end model(used for onnx test backend)

        Parameters
        ----------
        model  : onnx ModelProto object
            loaded onnx graph
        device : 'CPU'
            specifying device to run test on
        kwargs :
            other arguments

        Returns
        -------
        GluonBackendRep : object
            Returns object of GluonBackendRep class which will be in turn
            used to run inference on the input model and return the result for comparison.
        """
        graph = GraphProto()
        net = graph.graph_to_gluon(model.graph, device)
        return GluonBackendRep(net, device)
    def prepare(cls, model, device='CPU', **kwargs):
        """For running end to end model(used for onnx test backend)

        Parameters
        ----------
        model  : onnx ModelProto object
            loaded onnx graph
        device : 'CPU'
            specifying device to run test on
        kwargs :
            other arguments

        Returns
        -------
        GluonBackendRep : object
            Returns object of GluonBackendRep class which will be in turn
            used to run inference on the input model and return the result for comparison.
        """
        graph = GraphProto()
        net = graph.graph_to_gluon(model.graph, device)
        return GluonBackendRep(net, device)