Пример #1
0
def build_mlir_module(op: OpOverload, kwargs: Dict[str, Any]) -> ir.Module:
    """Translate input function into an MLIR module in the `torch` dialect.

    Parameters
    ----------
    op: OpOverload
        Callable from the torch.ops.aten module/namespace that has a _schema field.
    kwargs: Dict
        A dictionary with all arguments passed in through __torch_dispatch__ (including int/float,bool params).

    Returns
    -------
    ir.Module
        Translation of the input module into an MLIR module.
    """

    # The assert here is to catch tensor shapes that have size 0 dimensions, such as those produced in
    # the course of evaluating SliceEndSleStartModule_basic and SliceOutOfLowerBoundEndIndexModule_basic.
    # Such 0 size dimensions fail the assert at mlir/lib/IR/BuiltinTypes.cpp, line 887
    annotations = []
    for arg_name, arg in kwargs.items():
        if isinstance(arg, torch.Tensor):
            assert np.prod(
                arg.shape) != 0, f"{arg_name} has invalid shape {arg.shape}"
            annotations.append(
                TorchTensorType(shape=tuple(arg.shape), dtype=arg.dtype))
    annotations = tuple(annotations)

    script_fun = build_ts_script_function(op._schema, kwargs)
    assert len(annotations) == len(list(script_fun.graph.inputs(
    ))), "Number of annotations and number of graph inputs differs."

    mb = ModuleBuilder()
    mb.import_function(script_fun)

    func_op = get_func_op_with_name(mb.module, script_fun.name)
    assert (
        func_op is not None
    ), "Unable to find FuncOp in new module. Make sure function was imported correctly into ModuleBuilder"

    func_annotation = Annotation(annotations)
    arg_attrs = AnnotationConverter.to_mlir_array_attr(func_annotation,
                                                       mb.context)
    func_op.attributes["arg_attrs"] = arg_attrs

    return mb.module
Пример #2
0
def convert_torchscript_module_to_torch_backend_contract_mlir(program: torch.nn.Module):
    """Perform common lowering from TorchScript to Torch MLIR

    Returns an MLIR module that satisfies the Torch backend contract.
    """
    mb = ModuleBuilder()
    scripted = torch.jit.script(program)
    class_annotator = ClassAnnotator()

    extract_annotations(program, scripted, class_annotator)


    # TODO: Find a way to make each of these calls own its own
    # "debuggable error report" situation.
    try:
        original_stderr = sys.stderr
        sys.stderr = StringIO()
        # Import the TorchScript module to MLIR
        mb.import_module(scripted._c, class_annotator)
    except Exception as e:
        raise Exception(f"""
PyTorch TorchScript module -> torch-mlir Object Graph IR import failed with:
Exception:
{e}
Diagnostics:
{sys.stderr.getvalue()}
""") from None
    finally:
        sys.stderr = original_stderr

    run_pipeline_with_repro_report(
        mb.module,
        "torchscript-module-to-torch-backend-pipeline",
        "Lowering TorchScript Object Graph IR -> Torch Backend IR")

    return mb.module
Пример #3
0
# -*- Python -*-
# This file is licensed under a pytorch-style license
# See LICENSE.pytorch for license information.

import torch
from torch_mlir.dialects.torch.importer.jit_ir import ModuleBuilder

# RUN: %PYTHON %s | torch-mlir-opt | FileCheck %s

mb = ModuleBuilder()

# Note: The "if without else" case is handled by yielding None from the
# else branch and making all defined values optional, so no special handling
# is needed.


# CHECK-LABEL: @__torch__.prim_If(
# CHECK-SAME:           %[[B:.*]]: !torch.bool,
# CHECK-SAME:           %[[I:.*]]: !torch.int) -> !torch.int {
@mb.import_function
@torch.jit.script
def prim_If(b: bool, i: int):
    # CHECK:           %[[RES:.*]] = torch.prim.If %[[B]] -> (!torch.int) {
    # CHECK:             %[[ADD:.*]] = torch.aten.add.int %[[I]], %[[I]]
    # CHECK:             torch.prim.If.yield %[[ADD]] : !torch.int
    # CHECK:           } else {
    # CHECK:             %[[MUL:.*]] = torch.aten.mul.int %[[I]], %[[I]]
    # CHECK:             torch.prim.If.yield %[[MUL]] : !torch.int
    # CHECK:           }
    # CHECK:           return %[[RES:.*]] : !torch.int
    if b:
Пример #4
0
# -*- Python -*-
# This file is licensed under a pytorch-style license
# See LICENSE.pytorch for license information.

import typing

import torch
from torch_mlir.dialects.torch.importer.jit_ir import ModuleBuilder

# RUN: %PYTHON %s | torch-mlir-opt | FileCheck %s

mb = ModuleBuilder()

# CHECK-LABEL:     func.func private @__torch__.TestModule.forward
# CHECK-SAME:        (%[[ARG0:.*]]: !torch.nn.Module<"__torch__.TestModule">, %[[ARG1:.*]]: !torch.tensor) -> !torch.tensor {
# CHECK:             %[[VAL_2:.*]] = constant @__torch__.identity : (!torch.tensor) -> !torch.tensor
# CHECK:             %[[VAL_3:.*]] = call_indirect %[[VAL_2]](%[[ARG1]]) : (!torch.tensor) -> !torch.tensor
# CHECK:             return %[[VAL_3]] : !torch.tensor
# CHECK:           }
# CHECK-LABEL:     func.func private @__torch__.identity
# CHECK-SAME:        (%[[ARG:.*]]: !torch.tensor) -> !torch.tensor {
# CHECK:             return %[[ARG]] : !torch.tensor
# CHECK:           }

# CHECK-LABEL:   torch.class_type @__torch__.TestModule  {
# CHECK:           torch.method "forward", @__torch__.TestModule.forward
# CHECK:         }


def identity(x):
    return x
Пример #5
0
# -*- Python -*-
# This file is licensed under a pytorch-style license
# See LICENSE.pytorch for license information.

import torch
from torch_mlir.dialects.torch.importer.jit_ir import ModuleBuilder
from typing import Tuple, Optional, NamedTuple

from utils import create_script_function

# RUN: %PYTHON %s | torch-mlir-opt | FileCheck %s

mb = ModuleBuilder()
NT = NamedTuple('NT', [('f1', Optional[torch.Tensor]),
                       ('f2', Optional[torch.Tensor])])


# CHECK-LABEL:   func.func @__torch__.tuple(
# CHECK-SAME:            %[[T0:.*]]: !torch.tensor,
# CHECK-SAME:            %[[T1:.*]]: !torch.tensor) ->
# CHECK-SAME:            !torch.tuple<tensor, tensor> {
# CHECK:           %[[RET:.*]] = torch.prim.TupleConstruct %[[T0]], %[[T1]] :
# CHECK-SAME:            !torch.tensor, !torch.tensor -> !torch.tuple<tensor, tensor>
# CHECK:           return %[[RET]] : !torch.tuple<tensor, tensor>
@mb.import_function
@torch.jit.script
def tuple(t0, t1):
    return t0, t1


# CHECK-LABEL:   func.func @__torch__.tuple_optional(
Пример #6
0
# -*- Python -*-
# This file is licensed under a pytorch-style license
# See LICENSE.pytorch for license information.

import typing

import torch
from torch_mlir.dialects.torch.importer.jit_ir import ClassAnnotator, ModuleBuilder
# RUN: %PYTHON %s | torch-mlir-opt | FileCheck %s

mb = ModuleBuilder()


class TestModule(torch.nn.Module):
    def __init__(self):
        super().__init__()
        self.exported = 1
        self.not_exported = 2

    def forward(self):
        return self.not_exported_method()

    def not_exported_method(self):
        return


test_module = TestModule()
recursivescriptmodule = torch.jit.script(test_module)

annotator = ClassAnnotator()
class_type = recursivescriptmodule._c._type()
Пример #7
0
# -*- Python -*-
# This file is licensed under a pytorch-style license
# See LICENSE.pytorch for license information.

import typing

import torch
from torch_mlir.dialects.torch.importer.jit_ir import ModuleBuilder

from utils import create_script_function

import typing

# RUN: %PYTHON %s | torch-mlir-opt | FileCheck %s

mb = ModuleBuilder()

# CHECK-LABEL:   func.func @__torch__.prim_NumToTensor(
# CHECK-SAME:                           %[[ARG:.*]]: !torch.int) -> !torch.tensor {
# CHECK:           %[[RET:.*]] = torch.prim.NumToTensor.Scalar %[[ARG]] : !torch.int -> !torch.tensor
# CHECK:           return %[[RET]] : !torch.tensor
# CHECK:         }
@mb.import_function
@torch.jit.script
def prim_NumToTensor(i: int):
    return _to_tensor(i)

# CHECK-LABEL:   func.func @__torch__.prim_Print(
# CHECK-SAME:                     %[[ARG:.*]]: !torch.tensor) -> !torch.none {
# CHECK:           %[[STR:.*]] = torch.constant.str "x"
# CHECK:           torch.prim.Print(%[[STR]], %[[ARG]]) : !torch.str, !torch.tensor
Пример #8
0
def compile(model: torch.nn.Module,
            example_args: Union[_example_arg, Sequence[_example_arg]],
            output_type: Union[str, "OutputType"] = OutputType.TORCH,
            use_tracing: bool = False,
            verbose: bool = False):
    """Convert a PyTorch model to MLIR.

    Args:
        model: The PyTorch model to convert.
        example_args: A list of example arguments to use when inferring the
            shapes of the arguments to `forward` method of the model.
            A single tensor is treated as a list of a single tensor.
            A TensorPlaceholder object is also allowed in the place of any
            Tensor.
        output_type: The kind of output to produce. See `OutputType` for more
            details.
        use_tracing: If True, use `torch.jit.trace` to convert the model to
            JIT IR rather than `torch.jit.script`.

    Returns:
        An MLIR module that contains the converted model in the specified
        output type.
    """
    output_type = OutputType.get(output_type)

    # Special case -- many models have just one input, so canonicalize a single
    # tensor to a list of a single tensor to make the API more ergonomic.
    if isinstance(example_args, (torch.Tensor, TensorPlaceholder)):
        example_args = (example_args, )

    # TODO: Don't hardcode "forward". See `torch.onnx.export` and
    # `torch.jit.trace_module` for API inspiration.
    if use_tracing:
        scripted = torch.jit.trace(model, tuple(example_args))
    else:
        scripted = torch.jit.script(model)

    # Convert all concrete inputs to TensorPlaceholder's, for consistency.
    arg_placeholders = []
    for arg in example_args:
        if isinstance(arg, TensorPlaceholder):
            arg_placeholders.append(arg)
        else:
            assert isinstance(arg, torch.Tensor)
            arg_placeholders.append(TensorPlaceholder.like(arg))

    class_annotator = ClassAnnotator()
    forward_annotation = [None]
    for arg in arg_placeholders:
        # Assume that all tensors have value semantics for now.
        forward_annotation.append((arg.shape, arg.dtype, True))
    class_annotator.exportNone(scripted._c._type())
    class_annotator.exportPath(scripted._c._type(), ["forward"])
    class_annotator.annotateArgs(scripted._c._type(), ["forward"],
                                 forward_annotation)

    mb = ModuleBuilder()
    mb.import_module(scripted._c, class_annotator)

    if output_type == OutputType.RAW:
        return mb.module

    run_pipeline_with_repro_report(
        mb.module, "torchscript-module-to-torch-backend-pipeline",
        "Lowering TorchScript IR -> Torch Backend IR")

    if verbose:
        print("\n====================")
        print("Torch Backend IR")
        print(mb.module)

    if output_type == OutputType.TORCH:
        return mb.module

    if output_type == OutputType.TOSA:
        run_pipeline_with_repro_report(
            mb.module, "torch-backend-to-tosa-backend-pipeline",
            "Lowering Torch Backend IR -> TOSA Backend IR")
        if verbose:
            print("\n====================")
            print("TOSA Backend IR")
            print(mb.module)
        return mb.module

    if output_type == OutputType.LINALG_ON_TENSORS:
        run_pipeline_with_repro_report(
            mb.module, "torch-backend-to-linalg-on-tensors-backend-pipeline",
            "Lowering Torch Backend IR -> Linalg-on-Tensors Backend IR")
        if verbose:
            print("\n====================")
            print("LINALG Backend IR")
            print(mb.module)
        return mb.module

    elif output_type == OutputType.MHLO:
        run_pipeline_with_repro_report(
            mb.module, "torch-backend-to-mhlo-backend-pipeline",
            "Lowering Torch Backend IR -> MHLO Backend IR")
        if verbose:
            print("\n====================")
            print("MHLO Backend IR")
            print(mb.module)
        return mb.module
    raise Exception(f"Unknown OutputType: {output_type}")
Пример #9
0
def compile(model: torch.nn.Module,
            example_args: Union[_example_arg, Sequence[_example_arg]],
            output_type: Union[str, "OutputType"] = OutputType.TORCH,
            use_tracing: bool = False,
            ignore_traced_shapes=False,
            verbose: bool = False):
    """Convert a PyTorch model to MLIR.

    Args:
        model: The PyTorch model to convert.
        example_args: A list of example arguments to use when inferring the
            shapes of the arguments to `forward` method of the model.
            A single tensor is treated as a list of a single tensor.
            A TensorPlaceholder object is also allowed in the place of any
            Tensor.
        output_type: The kind of output to produce. See `OutputType` for more
            details.
        use_tracing: If True, use `torch.jit.trace` to convert the model to
            JIT IR rather than `torch.jit.script`.
        ignore_traced_shapes: If True, ignore the shapes that were observed
            during tracing. This should only be used if one knows that the
            original traced program would result in the same trace (modulo
            shapes) for all shape combinations implied by any
            `TensorPlaceholder`'s used as `example_args`. Also,
            strictly-speaking, this option covers dtypes too, but we just say
            "shapes" to be succinct.
        verbose: If true, print extra information about the conversion.

    Returns:
        An MLIR module that contains the converted model in the specified
        output type.
    """
    output_type = OutputType.get(output_type)
    if ignore_traced_shapes and not use_tracing:
        raise Exception("`ignore_traced_shapes` requires `use_tracing`")

    # Special case -- many models have just one input, so canonicalize a single
    # tensor to a list of a single tensor to make the API more ergonomic.
    if isinstance(example_args, (torch.Tensor, TensorPlaceholder)):
        example_args = (example_args, )

    # TODO: Don't hardcode "forward". See `torch.onnx.export` and
    # `torch.jit.trace_module` for API inspiration.
    if use_tracing:
        example_args_for_trace = []
        for arg in example_args:
            if isinstance(arg, TensorPlaceholder):
                if not ignore_traced_shapes:
                    # To avoid accidental footguns, we require
                    # `ignore_traced_shapes` to be true if we're using
                    # TensorPlaceholder's, as it falls into the same
                    # "hopefully the trace works for different inputs" bucket
                    # of concerns.
                    raise Exception(
                        "TensorPlaceholder can only be used with tracing when `ignore_traced_shapes=True`"
                    )
                # For any dynamic dimensions, replace them with "7" arbitrarily.
                # If a user is using dynamic dimensions with tracing, they are
                # walking on thin ice already -- assume they know what they are
                # doing.
                shape = [s if s != -1 else 7 for s in arg.shape]
                example_args_for_trace.append(
                    torch.ones(*shape, dtype=arg.dtype))
            else:
                example_args_for_trace.append(arg)
        scripted = torch.jit.trace(model, tuple(example_args_for_trace))
    else:
        scripted = torch.jit.script(model)

    # Convert all concrete inputs to TensorPlaceholder's, for consistency.
    arg_placeholders = []
    for arg in example_args:
        if isinstance(arg, TensorPlaceholder):
            arg_placeholders.append(arg)
        else:
            assert isinstance(arg, torch.Tensor)
            arg_placeholders.append(TensorPlaceholder.like(arg))

    class_annotator = ClassAnnotator()
    forward_annotation = [None]
    for arg in arg_placeholders:
        # Assume that all tensors have value semantics for now.
        forward_annotation.append((arg.shape, arg.dtype, True))
    class_annotator.exportNone(scripted._c._type())
    class_annotator.exportPath(scripted._c._type(), ["forward"])
    class_annotator.annotateArgs(scripted._c._type(), ["forward"],
                                 forward_annotation)

    mb = ModuleBuilder()
    import_options = ImportOptions()
    import_options.ignoreExistingTensorShapesAndDtypes = ignore_traced_shapes
    mb.import_module(scripted._c, class_annotator, import_options)

    if output_type == OutputType.RAW:
        return mb.module

    run_pipeline_with_repro_report(
        mb.module, "torchscript-module-to-torch-backend-pipeline",
        "Lowering TorchScript IR -> Torch Backend IR")

    if verbose:
        print("\n====================")
        print("Torch Backend IR")
        print(mb.module)

    if output_type == OutputType.TORCH:
        return mb.module

    if output_type == OutputType.TOSA:
        run_pipeline_with_repro_report(
            mb.module, "torch-backend-to-tosa-backend-pipeline",
            "Lowering Torch Backend IR -> TOSA Backend IR")
        if verbose:
            print("\n====================")
            print("TOSA Backend IR")
            print(mb.module)
        return mb.module

    if output_type == OutputType.LINALG_ON_TENSORS:
        run_pipeline_with_repro_report(
            mb.module, "torch-backend-to-linalg-on-tensors-backend-pipeline",
            "Lowering Torch Backend IR -> Linalg-on-Tensors Backend IR")
        if verbose:
            print("\n====================")
            print("LINALG Backend IR")
            print(mb.module)
        return mb.module

    elif output_type == OutputType.MHLO:
        run_pipeline_with_repro_report(
            mb.module, "torch-backend-to-mhlo-backend-pipeline",
            "Lowering Torch Backend IR -> MHLO Backend IR")
        if verbose:
            print("\n====================")
            print("MHLO Backend IR")
            print(mb.module)
        return mb.module
    raise Exception(f"Unknown OutputType: {output_type}")