示例#1
0
def convert_torchscript_module_to_torch_backend_contract_mlir(program: torch.nn.Module):
    """Perform common lowering from TorchScript to Torch MLIR

    Returns an MLIR module that satisfies the Torch backend contract.
    """
    mb = ModuleBuilder()
    scripted = torch.jit.script(program)
    class_annotator = ClassAnnotator()

    extract_annotations(program, scripted, class_annotator)


    # TODO: Find a way to make each of these calls own its own
    # "debuggable error report" situation.
    try:
        original_stderr = sys.stderr
        sys.stderr = StringIO()
        # Import the TorchScript module to MLIR
        mb.import_module(scripted._c, class_annotator)
    except Exception as e:
        raise Exception(f"""
PyTorch TorchScript module -> torch-mlir Object Graph IR import failed with:
Exception:
{e}
Diagnostics:
{sys.stderr.getvalue()}
""") from None
    finally:
        sys.stderr = original_stderr

    run_pipeline_with_repro_report(
        mb.module,
        "torchscript-module-to-torch-backend-pipeline",
        "Lowering TorchScript Object Graph IR -> Torch Backend IR")

    return mb.module
示例#2
0
# CHECK:             %[[VAL_3:.*]] = call_indirect %[[VAL_2]](%[[ARG1]]) : (!torch.tensor) -> !torch.tensor
# CHECK:             return %[[VAL_3]] : !torch.tensor
# CHECK:           }
# CHECK-LABEL:     func.func private @__torch__.identity
# CHECK-SAME:        (%[[ARG:.*]]: !torch.tensor) -> !torch.tensor {
# CHECK:             return %[[ARG]] : !torch.tensor
# CHECK:           }

# CHECK-LABEL:   torch.class_type @__torch__.TestModule  {
# CHECK:           torch.method "forward", @__torch__.TestModule.forward
# CHECK:         }


def identity(x):
    return x


class TestModule(torch.nn.Module):
    def __init__(self):
        super().__init__()

    def forward(self, x):
        return identity(x)


test_module = TestModule()
recursivescriptmodule = torch.jit.script(test_module)
# TODO: Automatically handle unpacking Python class RecursiveScriptModule into the underlying ScriptModule.
mb.import_module(recursivescriptmodule._c)
mb.module.operation.print()
示例#3
0
    def __init__(self):
        super().__init__()
        self.exported = 1
        self.not_exported = 2

    def forward(self):
        return self.not_exported_method()

    def not_exported_method(self):
        return


test_module = TestModule()
recursivescriptmodule = torch.jit.script(test_module)

annotator = ClassAnnotator()
class_type = recursivescriptmodule._c._type()
# CHECK-LABEL:   torch.class_type @__torch__.TestModule  {
# CHECK:           torch.attr "exported" : !torch.int
# CHECK:           torch.attr private "not_exported" : !torch.int
# CHECK:           torch.method "forward", @{{.*}}
# CHECK:           torch.method private "not_exported_method", @{{.*}}
# CHECK:         }
annotator.exportNone(class_type)
annotator.exportPath(class_type, ['exported'])
annotator.exportPath(class_type, ['forward'])

# # TODO: Automatically handle unpacking Python class RecursiveScriptModule into the underlying ScriptModule.
mb.import_module(recursivescriptmodule._c, annotator)
mb.module.operation.print()
示例#4
0
def compile(model: torch.nn.Module,
            example_args: Union[_example_arg, Sequence[_example_arg]],
            output_type: Union[str, "OutputType"] = OutputType.TORCH,
            use_tracing: bool = False,
            verbose: bool = False):
    """Convert a PyTorch model to MLIR.

    Args:
        model: The PyTorch model to convert.
        example_args: A list of example arguments to use when inferring the
            shapes of the arguments to `forward` method of the model.
            A single tensor is treated as a list of a single tensor.
            A TensorPlaceholder object is also allowed in the place of any
            Tensor.
        output_type: The kind of output to produce. See `OutputType` for more
            details.
        use_tracing: If True, use `torch.jit.trace` to convert the model to
            JIT IR rather than `torch.jit.script`.

    Returns:
        An MLIR module that contains the converted model in the specified
        output type.
    """
    output_type = OutputType.get(output_type)

    # Special case -- many models have just one input, so canonicalize a single
    # tensor to a list of a single tensor to make the API more ergonomic.
    if isinstance(example_args, (torch.Tensor, TensorPlaceholder)):
        example_args = (example_args, )

    # TODO: Don't hardcode "forward". See `torch.onnx.export` and
    # `torch.jit.trace_module` for API inspiration.
    if use_tracing:
        scripted = torch.jit.trace(model, tuple(example_args))
    else:
        scripted = torch.jit.script(model)

    # Convert all concrete inputs to TensorPlaceholder's, for consistency.
    arg_placeholders = []
    for arg in example_args:
        if isinstance(arg, TensorPlaceholder):
            arg_placeholders.append(arg)
        else:
            assert isinstance(arg, torch.Tensor)
            arg_placeholders.append(TensorPlaceholder.like(arg))

    class_annotator = ClassAnnotator()
    forward_annotation = [None]
    for arg in arg_placeholders:
        # Assume that all tensors have value semantics for now.
        forward_annotation.append((arg.shape, arg.dtype, True))
    class_annotator.exportNone(scripted._c._type())
    class_annotator.exportPath(scripted._c._type(), ["forward"])
    class_annotator.annotateArgs(scripted._c._type(), ["forward"],
                                 forward_annotation)

    mb = ModuleBuilder()
    mb.import_module(scripted._c, class_annotator)

    if output_type == OutputType.RAW:
        return mb.module

    run_pipeline_with_repro_report(
        mb.module, "torchscript-module-to-torch-backend-pipeline",
        "Lowering TorchScript IR -> Torch Backend IR")

    if verbose:
        print("\n====================")
        print("Torch Backend IR")
        print(mb.module)

    if output_type == OutputType.TORCH:
        return mb.module

    if output_type == OutputType.TOSA:
        run_pipeline_with_repro_report(
            mb.module, "torch-backend-to-tosa-backend-pipeline",
            "Lowering Torch Backend IR -> TOSA Backend IR")
        if verbose:
            print("\n====================")
            print("TOSA Backend IR")
            print(mb.module)
        return mb.module

    if output_type == OutputType.LINALG_ON_TENSORS:
        run_pipeline_with_repro_report(
            mb.module, "torch-backend-to-linalg-on-tensors-backend-pipeline",
            "Lowering Torch Backend IR -> Linalg-on-Tensors Backend IR")
        if verbose:
            print("\n====================")
            print("LINALG Backend IR")
            print(mb.module)
        return mb.module

    elif output_type == OutputType.MHLO:
        run_pipeline_with_repro_report(
            mb.module, "torch-backend-to-mhlo-backend-pipeline",
            "Lowering Torch Backend IR -> MHLO Backend IR")
        if verbose:
            print("\n====================")
            print("MHLO Backend IR")
            print(mb.module)
        return mb.module
    raise Exception(f"Unknown OutputType: {output_type}")
示例#5
0
def compile(model: torch.nn.Module,
            example_args: Union[_example_arg, Sequence[_example_arg]],
            output_type: Union[str, "OutputType"] = OutputType.TORCH,
            use_tracing: bool = False,
            ignore_traced_shapes=False,
            verbose: bool = False):
    """Convert a PyTorch model to MLIR.

    Args:
        model: The PyTorch model to convert.
        example_args: A list of example arguments to use when inferring the
            shapes of the arguments to `forward` method of the model.
            A single tensor is treated as a list of a single tensor.
            A TensorPlaceholder object is also allowed in the place of any
            Tensor.
        output_type: The kind of output to produce. See `OutputType` for more
            details.
        use_tracing: If True, use `torch.jit.trace` to convert the model to
            JIT IR rather than `torch.jit.script`.
        ignore_traced_shapes: If True, ignore the shapes that were observed
            during tracing. This should only be used if one knows that the
            original traced program would result in the same trace (modulo
            shapes) for all shape combinations implied by any
            `TensorPlaceholder`'s used as `example_args`. Also,
            strictly-speaking, this option covers dtypes too, but we just say
            "shapes" to be succinct.
        verbose: If true, print extra information about the conversion.

    Returns:
        An MLIR module that contains the converted model in the specified
        output type.
    """
    output_type = OutputType.get(output_type)
    if ignore_traced_shapes and not use_tracing:
        raise Exception("`ignore_traced_shapes` requires `use_tracing`")

    # Special case -- many models have just one input, so canonicalize a single
    # tensor to a list of a single tensor to make the API more ergonomic.
    if isinstance(example_args, (torch.Tensor, TensorPlaceholder)):
        example_args = (example_args, )

    # TODO: Don't hardcode "forward". See `torch.onnx.export` and
    # `torch.jit.trace_module` for API inspiration.
    if use_tracing:
        example_args_for_trace = []
        for arg in example_args:
            if isinstance(arg, TensorPlaceholder):
                if not ignore_traced_shapes:
                    # To avoid accidental footguns, we require
                    # `ignore_traced_shapes` to be true if we're using
                    # TensorPlaceholder's, as it falls into the same
                    # "hopefully the trace works for different inputs" bucket
                    # of concerns.
                    raise Exception(
                        "TensorPlaceholder can only be used with tracing when `ignore_traced_shapes=True`"
                    )
                # For any dynamic dimensions, replace them with "7" arbitrarily.
                # If a user is using dynamic dimensions with tracing, they are
                # walking on thin ice already -- assume they know what they are
                # doing.
                shape = [s if s != -1 else 7 for s in arg.shape]
                example_args_for_trace.append(
                    torch.ones(*shape, dtype=arg.dtype))
            else:
                example_args_for_trace.append(arg)
        scripted = torch.jit.trace(model, tuple(example_args_for_trace))
    else:
        scripted = torch.jit.script(model)

    # Convert all concrete inputs to TensorPlaceholder's, for consistency.
    arg_placeholders = []
    for arg in example_args:
        if isinstance(arg, TensorPlaceholder):
            arg_placeholders.append(arg)
        else:
            assert isinstance(arg, torch.Tensor)
            arg_placeholders.append(TensorPlaceholder.like(arg))

    class_annotator = ClassAnnotator()
    forward_annotation = [None]
    for arg in arg_placeholders:
        # Assume that all tensors have value semantics for now.
        forward_annotation.append((arg.shape, arg.dtype, True))
    class_annotator.exportNone(scripted._c._type())
    class_annotator.exportPath(scripted._c._type(), ["forward"])
    class_annotator.annotateArgs(scripted._c._type(), ["forward"],
                                 forward_annotation)

    mb = ModuleBuilder()
    import_options = ImportOptions()
    import_options.ignoreExistingTensorShapesAndDtypes = ignore_traced_shapes
    mb.import_module(scripted._c, class_annotator, import_options)

    if output_type == OutputType.RAW:
        return mb.module

    run_pipeline_with_repro_report(
        mb.module, "torchscript-module-to-torch-backend-pipeline",
        "Lowering TorchScript IR -> Torch Backend IR")

    if verbose:
        print("\n====================")
        print("Torch Backend IR")
        print(mb.module)

    if output_type == OutputType.TORCH:
        return mb.module

    if output_type == OutputType.TOSA:
        run_pipeline_with_repro_report(
            mb.module, "torch-backend-to-tosa-backend-pipeline",
            "Lowering Torch Backend IR -> TOSA Backend IR")
        if verbose:
            print("\n====================")
            print("TOSA Backend IR")
            print(mb.module)
        return mb.module

    if output_type == OutputType.LINALG_ON_TENSORS:
        run_pipeline_with_repro_report(
            mb.module, "torch-backend-to-linalg-on-tensors-backend-pipeline",
            "Lowering Torch Backend IR -> Linalg-on-Tensors Backend IR")
        if verbose:
            print("\n====================")
            print("LINALG Backend IR")
            print(mb.module)
        return mb.module

    elif output_type == OutputType.MHLO:
        run_pipeline_with_repro_report(
            mb.module, "torch-backend-to-mhlo-backend-pipeline",
            "Lowering Torch Backend IR -> MHLO Backend IR")
        if verbose:
            print("\n====================")
            print("MHLO Backend IR")
            print(mb.module)
        return mb.module
    raise Exception(f"Unknown OutputType: {output_type}")