示例#1
0
def _jit_graph_to_onnx_model(graph, operator_export_type, opset_version):
    r"""
    This function exports torch::jit::Graph object
    to serialized ONNX ModelProto.
    This function is for testing purpose.
    It only keeps the essential parts for IR graph conversions.
    It also does not interact with actual PyTorch modules nor
    PyTorch tensor inputs.
    """
    from torch.onnx.symbolic_helper import _set_onnx_shape_inference, _set_opset_version
    from torch.onnx.utils import _optimize_graph

    # Shape inference is required because some ops' symbolic functions
    # generate sub-graphs based on inputs' types.
    _set_onnx_shape_inference(True)
    _set_opset_version(opset_version)
    graph = _optimize_graph(graph, operator_export_type, params_dict={})
    proto, _, _, _ = graph._export_onnx(
        {},
        opset_version,
        {},
        False,
        operator_export_type,
        False,
        False,
        {},
        True,
        "",
        {},
    )
    return proto
示例#2
0
 def _model_to_graph(self,
                     model,
                     input,
                     do_constant_folding=True,
                     training=TrainingMode.EVAL,
                     operator_export_type=OperatorExportTypes.ONNX,
                     input_names=None,
                     dynamic_axes=None):
     if training == torch.onnx.TrainingMode.TRAINING:
         model.train()
     elif training == torch.onnx.TrainingMode.EVAL:
         model.eval()
     # Need disable onnx_shape_inference for this test because it puts const node to initializers.
     _set_onnx_shape_inference(False)
     utils._validate_dynamic_axes(dynamic_axes, model, None, None)
     graph, params_dict, torch_out = utils._model_to_graph(
         model,
         input,
         do_constant_folding=do_constant_folding,
         _disable_torch_constant_prop=True,
         operator_export_type=operator_export_type,
         training=training,
         input_names=input_names,
         dynamic_axes=dynamic_axes)
     _set_onnx_shape_inference(True)
     return graph, params_dict, torch_out
示例#3
0
 def _convert(self) -> None:
     prev_opset_version = None
     prev_export_type = None
     prev_shape_inference = None
     try:
         assert not to_utils.is_in_onnx_export()  # type: ignore[no-untyped-call]
         with to_utils.select_model_mode_for_export(self.original_model, self.training):
             to_utils.__IN_ONNX_EXPORT = True
             prev_opset_version = sym_hel._export_onnx_opset_version
             sym_hel._set_opset_version(self.opset_version)  # type: ignore[no-untyped-call]
             prev_export_type = sym_hel._operator_export_type
             sym_hel._set_operator_export_type(self.operator_export_type)  # type: ignore[no-untyped-call]
             prev_shape_inference = sym_hel._onnx_shape_inference
             sym_hel._set_onnx_shape_inference(  # type: ignore[no-untyped-call]
                 False  # TODO(twata): Use `self.onnx_shape_inference`
             )
             self._run_trace()
             self.model: onnx.ModelProto = self.generate_onnx()
     finally:
         to_utils.__IN_ONNX_EXPORT = False
         if prev_opset_version is not None:
             sym_hel._set_opset_version(prev_opset_version)  # type: ignore[no-untyped-call]
         if prev_shape_inference is not None:
             sym_hel._set_operator_export_type(prev_export_type)  # type: ignore[no-untyped-call]
         if prev_shape_inference is not None:
             sym_hel._set_onnx_shape_inference(prev_shape_inference)  # type: ignore[no-untyped-call]
示例#4
0
def run_model_test(self,
                   model,
                   batch_size=2,
                   state_dict=None,
                   input=None,
                   use_gpu=True,
                   rtol=0.001,
                   atol=1e-7,
                   example_outputs=None,
                   do_constant_folding=True,
                   dynamic_axes=None,
                   test_with_inputs=None,
                   input_names=None,
                   output_names=None,
                   fixed_batch_size=False):
    model.eval()

    if input is None:
        input = torch.randn(batch_size, 3, 224, 224, requires_grad=True)

    with torch.no_grad():
        if isinstance(input, torch.Tensor):
            input = (input, )
        # In-place operators will update input tensor data as well.
        # Thus inputs are replicated before every forward call.
        input_copy = copy.deepcopy(input)
        output = model(*input_copy)
        if isinstance(output, torch.Tensor):
            output = (output, )

        _set_opset_version(self.opset_version)
        _set_operator_export_type(OperatorExportTypes.ONNX)
        _set_onnx_shape_inference(True)
        _set_training_mode(False)
        if dynamic_axes is None:
            dynamic_axes = {}
        _validate_dynamic_axes(dynamic_axes, model, input_names, output_names)

        input_copy = copy.deepcopy(input)
        graph, _, _ = utils._model_to_graph(
            model,
            input_copy,
            input_names=input_names,
            output_names=output_names,
            operator_export_type=OperatorExportTypes.ONNX,
            example_outputs=output,
            do_constant_folding=do_constant_folding,
            training=TrainingMode.EVAL,
            use_new_jit_passes=self.use_new_jit_passes,
            dynamic_axes=dynamic_axes)
        verify_inferred_shape(graph)
示例#5
0
def _export_jit_graph_to_onnx_model_proto(graph: torch._C.Graph,
                                          operator_export_type: int):
    from torch.onnx.symbolic_helper import _set_onnx_shape_inference, _set_operator_export_type, _set_opset_version

    _set_onnx_shape_inference(True)
    _set_operator_export_type(operator_export_type)
    torch._C._jit_pass_run_decompositions(graph)
    graph = torch.onnx.utils._optimize_graph(graph,
                                             operator_export_type,
                                             params_dict={})
    proto, _, _, _ = graph._export_onnx(
        {},
        torch.onnx._globals.GLOBALS.export_onnx_opset_version,
        {},
        False,
        operator_export_type,
        False,
        False,
        {},
        True,
        "",
        {},
    )
    return proto
示例#6
0
 def __init__(self, *args, **kwargs):
     unittest.TestCase.__init__(self, *args, **kwargs)
     self.opset_version = _onnx_main_opset
     _set_onnx_shape_inference(True)
     _set_opset_version(self.opset_version)
 def setUp(self):
     self.opset_version = _constants.onnx_main_opset
     symbolic_helper._set_onnx_shape_inference(True)
     symbolic_helper._set_opset_version(self.opset_version)
 def __init__(self, *args, **kwargs):
     TestCase.__init__(self, *args, **kwargs)
     self.opset_version = _constants.onnx_main_opset
     _set_onnx_shape_inference(True)
     _set_opset_version(self.opset_version)