def _convert(self) -> None: prev_opset_version = None prev_export_type = None prev_shape_inference = None try: assert not to_utils.is_in_onnx_export() # type: ignore[no-untyped-call] with to_utils.select_model_mode_for_export(self.original_model, self.training): to_utils.__IN_ONNX_EXPORT = True prev_opset_version = sym_hel._export_onnx_opset_version sym_hel._set_opset_version(self.opset_version) # type: ignore[no-untyped-call] prev_export_type = sym_hel._operator_export_type sym_hel._set_operator_export_type(self.operator_export_type) # type: ignore[no-untyped-call] prev_shape_inference = sym_hel._onnx_shape_inference sym_hel._set_onnx_shape_inference( # type: ignore[no-untyped-call] False # TODO(twata): Use `self.onnx_shape_inference` ) self._run_trace() self.model: onnx.ModelProto = self.generate_onnx() finally: to_utils.__IN_ONNX_EXPORT = False if prev_opset_version is not None: sym_hel._set_opset_version(prev_opset_version) # type: ignore[no-untyped-call] if prev_shape_inference is not None: sym_hel._set_operator_export_type(prev_export_type) # type: ignore[no-untyped-call] if prev_shape_inference is not None: sym_hel._set_onnx_shape_inference(prev_shape_inference) # type: ignore[no-untyped-call]
def is_in_onnx_export(): r""" Returns True iff :func:`export` is running in the current thread """ from torch.onnx import utils return utils.is_in_onnx_export()
def is_in_onnx_export(): r""" Check whether it's in the middle of the ONNX export. This function returns True in the middle of torch.onnx.export(). torch.onnx.export should be executed with single thread. """ from torch.onnx import utils return utils.is_in_onnx_export()
def is_in_onnx_export(): from torch.onnx import utils return utils.is_in_onnx_export()