コード例 #1
0
    def __init__(
        self,
        opset_version,
        batch_dim,
        body: onnx.GraphProto,
    ):
        super().__init__()
        self.ops = import_module("onnx2pytorch.convert.operations")
        self.c = import_module("onnx2pytorch.constants")

        self.body = body
        self.batch_dim = batch_dim

        self.input_names = get_inputs_names(body)
        self.output_names = get_outputs_names(body)

        # Creates mapping from node (identified by first output) to submodule
        self.mapping = {}
        for op_id, op_name, op in self.ops.convert_operations(
                body, opset_version, batch_dim):
            setattr(self, op_name, op)
            self.mapping[op_id] = op_name

        # Store initializers as buffers
        for tensor in self.body.initializer:
            buffer_name = self.ops.get_buffer_name(tensor.name)
            self.register_buffer(
                buffer_name,
                torch.from_numpy(numpy_helper.to_array(tensor)),
            )
コード例 #2
0
ファイル: model.py プロジェクト: damon-93/onnx2X
    def __init__(self, onnx_model: onnx.ModelProto, batch_dim=0, debug=False):
        """
        Convert onnx model to pytorch.

        Parameters
        ----------
        onnx_model: onnx.ModelProto
            Loaded onnx model.
        batch_dim: int
            Dimension of the batch.
        
        Returns
        -------
        model: torch.nn.Module
            A converted pytorch model.
        """
        super().__init__()
        self.onnx_model = onnx_model
        self.batch_dim = batch_dim
        self.debug = debug
        self.mapping = {}
        for op_id, op_name, op in convert_operations(onnx_model, batch_dim):
            # 设置属性值,该属性不一定是存在的
            setattr(self, op_name, op)
            self.mapping[op_id] = op_name

        self.init_parameters = InitParameters({
            tensor.name: tensor
            for tensor in self.onnx_model.graph.initializer
        })

        self.input_names = get_inputs_names(onnx_model)
コード例 #3
0
ファイル: model.py プロジェクト: charmere/onnx2pytorch
    def __init__(self,
                 onnx_model: onnx.ModelProto,
                 batch_dim=0,
                 experimental=False,
                 debug=False):
        """
        Convert onnx model to pytorch.

        Parameters
        ----------
        onnx_model: onnx.ModelProto
            Loaded onnx model.
        batch_dim: int
            Dimension of the batch.
        experimental: bool
            Experimental implementation allows batch_size > 1. However,
            batchnorm layers could potentially produce false outputs.

        Returns
        -------
        model: torch.nn.Module
            A converted pytorch model.
        """
        super().__init__()
        self.onnx_model = onnx_model
        self.batch_dim = batch_dim
        self.experimental = experimental
        self.debug = debug
        self.mapping = {}
        for op_id, op_name, op in convert_operations(onnx_model, batch_dim):
            setattr(self, op_name, op)
            self.mapping[op_id] = op_name

        self.init_parameters = InitParameters({
            tensor.name: tensor
            for tensor in self.onnx_model.graph.initializer
        })

        self.input_names = get_inputs_names(onnx_model)

        if experimental:
            warnings.warn(
                "Using experimental implementation that allows 'batch_size > 1'."
                "Batchnorm layers could potentially produce false outputs.")
コード例 #4
0
ファイル: model.py プロジェクト: lightmatter-ai/onnx2pytorch
    def __init__(
        self,
        onnx_model: onnx.ModelProto,
        batch_dim=0,
        experimental=False,
        debug=False,
        enable_pruning=True,
    ):
        """
        Convert onnx model to pytorch.

        Parameters
        ----------
        onnx_model: onnx.ModelProto
            Loaded onnx model.
        batch_dim: int
            Dimension of the batch.
        experimental: bool
            Experimental implementation allows batch_size > 1. However,
            batchnorm layers could potentially produce false outputs.
        enable_pruning: bool
            Track kept/pruned indices between different calls to forward pass.

        Returns
        -------
        model: torch.nn.Module
            A converted pytorch model.
        """
        super().__init__()
        self.onnx_model = onnx_model
        self.batch_dim = batch_dim
        self.experimental = experimental
        self.debug = debug
        self.enable_pruning = enable_pruning

        self.input_names = get_inputs_names(onnx_model.graph)
        self.output_names = get_outputs_names(onnx_model.graph)
        opset_version = onnx_model.opset_import[0].version

        # Create mapping from node (identified by first output) to submodule
        self.mapping = {}
        for op_id, op_name, op in convert_operations(
                onnx_model.graph,
                opset_version,
                batch_dim,
                enable_pruning,
        ):
            setattr(self, op_name, op)
            if isinstance(op, Loop) and debug:
                raise NotImplementedError(
                    "debug-mode with Loop node not implemented.")
            self.mapping[op_id] = op_name

        # Store initializers as buffers
        for tensor in self.onnx_model.graph.initializer:
            buffer_name = get_buffer_name(tensor.name)
            self.register_buffer(
                buffer_name,
                torch.from_numpy(np.copy(numpy_helper.to_array(tensor))),
            )

        # Compute activation dependencies, mapping each node to its dependents
        self.needed_by = defaultdict(set)
        for node in self.onnx_model.graph.node:
            out_op_id = node.output[0]
            for in_op_id in node.input:
                self.needed_by[in_op_id].add(out_op_id)
            if node.op_type == "Loop":
                # Look at nodes in the loop body
                l1 = getattr(self, self.mapping[out_op_id])  # Loop object
                loop_body_l1 = l1.body
                for node_l1 in loop_body_l1.node:
                    for in_op_id in node_l1.input:
                        # Treating node (outer loop) as dependent, not node_l1
                        self.needed_by[in_op_id].add(out_op_id)
                    if node_l1.op_type == "Loop":
                        # Look at nodes in the loop body
                        l2 = getattr(
                            self, l1.mapping[node_l1.output[0]])  # Loop object
                        loop_body_l2 = l2.body
                        for node_l2 in loop_body_l2.node:
                            for in_op_id in node_l2.input:
                                # Treating node (outer loop) as dependent, not node_l2
                                self.needed_by[in_op_id].add(out_op_id)
                            if node_l2.op_type == "Loop":
                                # TODO: make this recursive for nested loops
                                raise NotImplementedError(
                                    "Activation garbage collection not implemented for >2 nested loops."
                                )
        self.needed_by.default_factory = None

        if experimental:
            warnings.warn(
                "Using experimental implementation that allows 'batch_size > 1'."
                "Batchnorm layers could potentially produce false outputs.")
コード例 #5
0
    def __init__(
        self,
        onnx_model: onnx.ModelProto,
        batch_dim=0,
        experimental=False,
        debug=False,
        enable_pruning=False,
    ):
        """
        Convert onnx model to pytorch.

        Parameters
        ----------
        onnx_model: onnx.ModelProto
            Loaded onnx model.
        batch_dim: int
            Dimension of the batch.
        experimental: bool
            Experimental implementation allows batch_size > 1. However,
            batchnorm layers could potentially produce false outputs.
        enable_pruning: bool
            Track kept/pruned indices between different calls to forward pass.

        Returns
        -------
        model: torch.nn.Module
            A converted pytorch model.
        """
        super().__init__()
        self.onnx_model = onnx_model
        self.batch_dim = batch_dim
        self.experimental = experimental
        self.debug = debug
        self.enable_pruning = enable_pruning

        self.input_names = get_inputs_names(onnx_model.graph)
        self.output_names = get_outputs_names(onnx_model.graph)
        opset_version = onnx_model.opset_import[0].version

        # Create mapping from node (identified by first output) to submodule
        self.mapping = {}
        for op_id, op_name, op in convert_operations(
                onnx_model.graph,
                opset_version,
                batch_dim,
                enable_pruning,
        ):
            setattr(self, op_name, op)
            if isinstance(op, Loop) and debug:
                raise NotImplementedError(
                    "debug-mode with Loop node not implemented.")
            self.mapping[op_id] = op_name

        # Store initializers as buffers
        for tensor in self.onnx_model.graph.initializer:
            buffer_name = get_buffer_name(tensor.name)
            self.register_buffer(
                buffer_name,
                torch.from_numpy(numpy_helper.to_array(tensor)),
            )

        # Compute activation dependencies, mapping each node to its dependents
        self.needed_by = compute_activation_dependencies(
            self.onnx_model.graph, self, self.mapping)

        if experimental:
            warnings.warn(
                "Using experimental implementation that allows 'batch_size > 1'."
                "Batchnorm layers could potentially produce false outputs.")