Beispiel #1
0
 def run_node(
     cls,
     node,  # type: NodeProto
     inputs,  # type: Any
     device='CPU',  # type: Text
     outputs_info=None,  # type: Optional[Sequence[Tuple[numpy.dtype, Tuple[int, ...]]]]
     **kwargs  # type: Dict[Text, Any]
 ):  # type: (...) -> Optional[Tuple[Any, ...]]
     '''Simple run one operator and return the results.
     Args:
         outputs_info: a list of tuples, which contains the element type and
         shape of each output. First element of the tuple is the dtype, and
         the second element is the shape. More use case can be found in
         https://github.com/onnx/onnx/blob/master/onnx/backend/test/runner/__init__.py
     '''
     # TODO Remove Optional from return type
     if 'opset_version' in kwargs:
         special_context = c_checker.CheckerContext()
         special_context.ir_version = IR_VERSION
         special_context.opset_imports = {
             '': kwargs['opset_version']
         }  # type: ignore
         onnx.checker.check_node(node, special_context)
     else:
         onnx.checker.check_node(node)
     return None
Beispiel #2
0
    def test_check_graph_ir_version_3(self):  # type: () -> None
        ctx = C.CheckerContext()
        ctx.ir_version = 3
        ctx.opset_imports = {'': onnx.defs.onnx_opset_version()}

        def check_ir_version_3(g):   # type: (GraphProto) -> None
            checker.check_graph(g, ctx)

        node = helper.make_node(
            "Relu", ["X"], ["Y"], name="test")
        graph = helper.make_graph(
            [node],
            "test",
            [helper.make_tensor_value_info("X", TensorProto.FLOAT, [1, 2])],
            [helper.make_tensor_value_info("Y", TensorProto.FLOAT, [1, 2])])
        check_ir_version_3(graph)

        graph.initializer.extend([self._sample_float_tensor])

        graph.initializer[0].name = 'no-exist'

        self.assertRaises(checker.ValidationError, check_ir_version_3, graph)

        graph.initializer[0].name = 'X'
        check_ir_version_3(graph)
Beispiel #3
0
    def run_node(cls,
                 node,  # type: NodeProto
                 inputs,  # type: Any
                 device='CPU',  # type: Text
                 outputs_info=None,  # type: Optional[Sequence[Tuple[numpy.dtype, Tuple[int, ...]]]]
                 **kwargs  # type: Dict[Text, Any]
                 ):  # type: (...) -> Optional[Tuple[Any, ...]]
        print('##### run_node')
        raise Exception('run_node is not implemented yet')
        '''Simple run one operator and return the results.
        Args:
            outputs_info: a list of tuples, which contains the element type and
            shape of each output. First element of the tuple is the dtype, and
            the second element is the shape. More use case can be found in
            https://github.com/onnx/onnx/blob/master/onnx/backend/test/runner/__init__.py
        '''
        # TODO Remove Optional from return type
        if 'opset_version' in kwargs:
            special_context = c_checker.CheckerContext()
            special_context.ir_version = IR_VERSION
            special_context.opset_imports = {'': kwargs['opset_version']}  # type: ignore
            onnx.checker.check_node(node, special_context)
        else:
            onnx.checker.check_node(node)

        specs = [{'domain': '', 'version': 15}]  # temporary code, please use special_context
        opset = get_opset(specs)

        output = None

        if node.op_type in opset:
            op = opset[node.op_type]
            output = op(*inputs)

        return (output.dtype, output.shape, output)
Beispiel #4
0
 def __init__(self, opset_ver=OPSET_VER):
   self.opset_import = make_opsetid("", opset_ver)
   self.model = make_model_gen_version(onnx.GraphProto(),
                                       opset_imports=[self.opset_import])
   self.op_counter = collections.Counter()
   self.ctx = C.CheckerContext()
   self.ctx.ir_version = self.model.ir_version
   self.ctx.opset_imports = {'': opset_ver}
Beispiel #5
0
import functools

from onnx import (ValueInfoProto, AttributeProto, TensorProto, NodeProto,
                  ModelProto, GraphProto, IR_VERSION)
import onnx.onnx_cpp2py_export.checker as C
import onnx.defs
from google.protobuf.message import Message
from typing import TypeVar, Callable, Any, Type, cast, Union, Text
from six import string_types

# TODO: This thing where we reserialize the protobuf back into the
# string, only to deserialize it at the call site, is really goofy.
# Stop doing that.

# NB: Please don't edit this context!
DEFAULT_CONTEXT = C.CheckerContext()
DEFAULT_CONTEXT.ir_version = IR_VERSION
# TODO: Maybe ONNX-ML should also be defaulted?
DEFAULT_CONTEXT.opset_imports = {'': onnx.defs.onnx_opset_version()}

FuncType = TypeVar('FuncType', bound=Callable[..., Any])


# TODO: This really doesn't seem worth the metaprogramming...
def _create_checker(
        proto_type):  # type: (Type[Message]) -> Callable[[FuncType], FuncType]
    def decorator(py_func):  # type: (FuncType) -> FuncType
        @functools.wraps(py_func)
        def checker(proto,
                    ctx=DEFAULT_CONTEXT
                    ):  # type: (Message, C.CheckerContext) -> Any