コード例 #1
0
 def _mod(mod: Union[PrimFunc, IRModule]) -> IRModule:
     if isinstance(mod, PrimFunc):
         mod = mod.with_attr("global_symbol", "main")
         mod = mod.with_attr("tir.noalias", True)
         mod = IRModule({"main": mod})
     if not isinstance(mod, IRModule):
         raise TypeError(f"Expected `mod` to be PrimFunc or IRModule, but gets: {mod}")
     # in order to make sure the mod can be found in ApplyHistoryBest
     # different func name can cause structural unequal
     func_names = mod.get_global_vars()
     (func_name,) = func_names
     if len(func_names) == 1 and func_name != "main":
         mod = IRModule({"main": mod[func_name]})
     return mod
コード例 #2
0
def create_executor(kind="debug", mod=None, ctx=None, target="llvm"):
    """Factory function to create an executor.

    Parameters
    ----------
    kind : str
        The type of executor

    mod : :py:class:`~tvm.IRModule`
        The Relay module containing collection of functions

    ctx : :py:class:`tvmContext`
        The context to execute the code.

    target : :py:class:`tvm.Target`
        The corresponding context
    """
    if mod is None:
        mod = IRModule()
    if ctx is not None:
        assert ctx.device_type == _nd.context(str(target), 0).device_type
    else:
        ctx = _nd.context(str(target), 0)

    if isinstance(target, str):
        target = _target.create(target)
    if kind == "debug":
        return _interpreter.Interpreter(mod, ctx, target)
    if kind == "graph":
        return GraphExecutor(mod, ctx, target)
    if kind == "vm":
        return VMExecutor(mod, ctx, target)
    raise RuntimeError("unknown execution strategy: {0}".format(kind))
コード例 #3
0
ファイル: space_generation.py プロジェクト: chenghanpeng/tvm
def check_sketches(
    mod: IRModule,
    sketches: List[Schedule],
    expected_mods: List[IRModule],
    expected_decisions: List[List[Tuple[str, List[int]]]],
    *,
    debug_mask="all",
):
    assert len(expected_mods) == len(expected_decisions)
    assert len(sketches) == len(expected_mods)
    expected_mods = [
        IRModule({"main": m}) if not isinstance(m, IRModule) else m for m in expected_mods
    ]
    sketches = list(sketches)
    for expected_id, (expected_mod, expected_decision) in enumerate(
        zip(expected_mods, expected_decisions)
    ):
        sketch_id = _find_match_sketch_id(
            mod,
            sketches,
            expected_mod,
            expected_decision,
            debug_mask=debug_mask,
        )
        if sketch_id is None:
            raise AssertionError(
                f"Expected sketch #{expected_id} doesn't exist in the generated sketches."
            )
        sketches.pop(sketch_id)
コード例 #4
0
ファイル: state.py プロジェクト: zotanika/incubator-tvm
def _parse_mod(mod: Union[PrimFunc, IRModule]) -> IRModule:
    if isinstance(mod, PrimFunc):
        mod = IRModule({"main": mod})
    if not isinstance(mod, IRModule):
        raise TypeError(
            f"Expected `mod` to be PrimFunc or IRModule, but gets: {mod}")
    return mod
コード例 #5
0
def replace_ir_builder_module(deep_copy=False, realize=False):
    new_func = tvm.script.from_source(tvm.script.asscript(elementwise))
    other_func = tvm.script.from_source(tvm.script.asscript(elementwise))
    mod = IRModule(functions={"main": new_func, "other": other_func})
    s = tir.ScheduleState(mod, debug_mode=True)
    target = tvm.tir.Block(
        iter_vars=[],
        reads=[],
        writes=[],
        name_hint="target",
        body=s.mod["main"].body.block.body[1],
        init=None,
        alloc_buffers=None,
        match_buffers=None,
        annotations=None,
    )
    if realize:
        target = tvm.tir.BlockRealize(
            iter_values=[],
            predicate=True,
            block=target,
        )
    if deep_copy:
        target.__setstate__(target.__getstate__())
    gc.collect()
    return s, target
コード例 #6
0
ファイル: build_module.py プロジェクト: zjppoet/tvm
def create_executor(kind="debug", mod=None, device=None, target="llvm", params=None):
    """Factory function to create an executor.

    Example
    -------
    .. code-block:: python

        import tvm.relay
        import numpy as np

        x = tvm.relay.var("x", tvm.relay.TensorType([1], dtype="float32"))
        expr = tvm.relay.add(x, tvm.relay.Constant(tvm.nd.array(np.array([1], dtype="float32"))))
        tvm.relay.create_executor(
            kind="vm", mod=tvm.IRModule.from_expr(tvm.relay.Function([x], expr))
        ).evaluate()(np.array([2], dtype="float32"))
        # returns `array([3.], dtype=float32)`

    Parameters
    ----------
    kind : str
        The type of executor. Avaliable options are `debug` for the
        interpreter, `graph` for the graph executor, and `vm` for the virtual
        machine.

    mod : :py:class:`~tvm.IRModule`
        The Relay module containing collection of functions

    device : :py:class:`Device`
        The device to execute the code.

    target : :py:class:`tvm.Target`
        The corresponding context

    params : dict of str to NDArray
         Input parameters to the graph that do not change
         during inference time.

    Returns
    -------
    executor : :py:class:`~tvm.relay.backend.interpreter.Executor`
    """
    if mod is None:
        mod = IRModule()
    if device is not None:
        assert device.device_type == _nd.device(str(target), 0).device_type
    else:
        device = _nd.device(str(target), 0)

    if params is not None:
        mod = IRModule.from_expr(bind_params_by_name(mod["main"], params))

    if isinstance(target, str):
        target = Target(target)
    if kind == "debug":
        return _interpreter.Interpreter(mod, device, target)
    if kind == "graph":
        return GraphExecutor(mod, device, target)
    if kind == "vm":
        return VMExecutor(mod, device, target)
    raise RuntimeError("unknown execution strategy: {0}".format(kind))
コード例 #7
0
    def __init__(
        self,
        mod: Union[PrimFunc, IRModule],
        debug_mode: Union[bool, int] = False,
    ) -> None:
        """Construct a schedule state from an IRModule or a PrimFunc

        Parameters
        ----------
        mod : Union[PrimFunc, IRModule]
            The IRModule or PrimFunc to be scheduled
        debug_mode : Union[bool, int]
            Do extra correctness checking after the class creation and each time
            after calling the Replace method.
            Possible choices of `debug_mode`:
            1) True - Turn on all the checks
            2) False - Turn off all the checks
            3) An integer - Turn on checks according to the bitmasks provided in ScheduleDebugMask
        """
        if isinstance(mod, PrimFunc):
            mod = IRModule({"main": mod})
        if isinstance(debug_mode, bool):
            if debug_mode:
                debug_mode = -1
            else:
                debug_mode = 0
        if not isinstance(debug_mode, int):
            raise TypeError(
                f"`debug_mode` should be integer or boolean, but gets: {debug_mode}"
            )
        self.__init_handle_by_constructor__(
            _ffi_api.ScheduleState,  # type: ignore # pylint: disable=no-member
            mod,
            debug_mode,
        )
コード例 #8
0
 def __init__(self, shape, dtype):
     self._nodes = {}
     self._params = {}
     self._visited_nodes = set()
     self._ops = {}
     self._shape = shape
     self._dtype = dtype
     self._mod = IRModule({})
コード例 #9
0
def test_tir_schedule_creation():
    # Tests:
    # - Schedule.__init__ for PrimFunc and IRModule
    # - Schedule.mod
    # - Schedule.state
    sch_1 = tir.Schedule(matmul, debug_mode=True)
    sch_2 = tir.Schedule(IRModule({"main": matmul}), debug_mode=True)
    assert sch_1.mod["main"].same_as(sch_2.mod["main"])
    assert sch_1.state.mod["main"].same_as(sch_2.state.mod["main"])
コード例 #10
0
    def __init__(self, shape, dtype, net, model):
        #网络和参数
        self._NetLayer = self.__getNetLayer(net)
        self._ModelLayer = self.__getModelLayer(model)

        self._params = {}
        self._nodes = {}
        self._LayerList = []
        self._shape = shape
        self._dtype = dtype
        self._mod = IRModule({})
コード例 #11
0
ファイル: build_module.py プロジェクト: inyukwo1/tvm
def create_executor(kind="debug", mod=None, ctx=None, target="llvm"):
    """Factory function to create an executor.

    Example
    -------
    .. code-block:: python

        import tvm.relay
        import numpy as np

        x = tvm.relay.var("x", tvm.relay.TensorType([1], dtype="float32"))
        expr = tvm.relay.add(x, tvm.relay.Constant(tvm.nd.array(np.array([1], dtype="float32"))))
        tvm.relay.create_executor(
            kind="vm", mod=tvm.IRModule.from_expr(tvm.relay.Function([x], expr))
        ).evaluate()(np.array([2], dtype="float32"))
        # returns `array([3.], dtype=float32)`

    Parameters
    ----------
    kind : str
        The type of executor. Avaliable options are `debug` for the
        interpreter, `graph` for the graph runtime, and `vm` for the virtual
        machine.

    mod : :py:class:`~tvm.IRModule`
        The Relay module containing collection of functions

    ctx : :py:class:`tvmContext`
        The context to execute the code.

    target : :py:class:`tvm.Target`
        The corresponding context

    Returns
    -------
    executor : :py:class:`~tvm.relay.backend.interpreter.Executor`
    """
    if mod is None:
        mod = IRModule()
    if ctx is not None:
        assert ctx.device_type == _nd.context(str(target), 0).device_type
    else:
        ctx = _nd.context(str(target), 0)

    if isinstance(target, str):
        target = Target(target)
    if kind == "debug":
        return _interpreter.Interpreter(mod, ctx, target)
    if kind == "graph":
        return GraphExecutor(mod, ctx, target)
    if kind == "vm":
        return VMExecutor(mod, ctx, target)
    raise RuntimeError("unknown execution strategy: {0}".format(kind))
コード例 #12
0
    def __init__(self, source_name: str) -> None:
        self.source_name = source_name
        self.module = IRModule({})  # type: IRModule

        # Adding an empty scope allows naked lets without pain.
        self.var_scopes = deque([deque()])       # type: Scopes[expr.Var]
        self.global_vars = {}                    # type: Scope[expr.GlobalVar]
        self.type_var_scopes = deque([deque()])  # type: Scopes[ty.TypeVar]
        self.global_type_vars = {}               # type: Scope[expr.GlobalVar]
        self.graph_expr = []                     # type: List[expr.Expr]

        super(ParseTreeToRelayIR, self).__init__()
コード例 #13
0
    def __init__(
        self,
        mod: Union[PrimFunc, IRModule],
        *,
        debug_mode: Union[bool, int] = False,
        error_render_level: ERROR_RENDER_LEVEL_CANDIDATES = "detail",
    ) -> None:
        """Construct a concrete TensorIR schedule from an IRModule or a PrimFunc

        Parameters
        ----------
        mod : Union[PrimFunc, IRModule]
            The IRModule or PrimFunc to be scheduled
        debug_mode : Union[bool, int]
            Do extra correctness checking after the class creation and each time
            scheduling primitive
        error_render_level : str = "detail"
            The level of error rendering. Choices: "detail", "fast", "none".
            "detail": Render a detailed error message, with the TIR and error locations printed
            "fast: Show a simple error message without rendering or string manipulation
            "none": Do not show any error message.

        Note
        ----
        The checks performed includes:
        1) VerifySRefTree
        2) VerifyCachedFlags
        """
        if isinstance(mod, PrimFunc):
            mod = IRModule({"main": mod})
        if isinstance(debug_mode, bool):
            if debug_mode:
                debug_mode = -1
            else:
                debug_mode = 0
        if not isinstance(debug_mode, int):
            raise TypeError(
                f"`debug_mode` should be integer or boolean, but gets: {debug_mode}"
            )
        if error_render_level not in Schedule.ERROR_RENDER_LEVEL:
            raise ValueError(
                'error_render_level can be "detail", "fast", or "none", but got: '
                + f"{error_render_level}")
        self.__init_handle_by_constructor__(
            _ffi_api.ConcreteSchedule,  # type: ignore # pylint: disable=no-member
            mod,
            debug_mode,
            Schedule.ERROR_RENDER_LEVEL.get(error_render_level),
        )
コード例 #14
0
def test_conv2d_winograd_cuda():
    mod = conv2d_winograd_cuda
    mod = IRModule({"main": mod})
    context = TuneContext(
        mod=mod,
        target=Target("nvidia/geforce-rtx-3090", host="llvm"),
        task_name="Custom Search Space Task",
        sch_rules=DefaultCUDA._sch_rules(),  # pylint: disable=protected-access
    )
    for sch_rule in context.sch_rules:
        sch_rule.initialize_with_tune_context(context)
    post_order_apply = PostOrderApply()
    post_order_apply.initialize_with_tune_context(context)
    (sch,) = post_order_apply.generate_design_space(mod)
    decisions = dict(
        zip(
            [i for i in sch.trace.insts if i.kind.name.startswith("Sample")],
            [
                # data_pack
                [3, 3],
                [64, 2],
                2,
                # inverse
                [3, 3],
                [2, 64],
                2,
                # bgemm
                [1, 1, 1, 1, 6],
                [1, 1, 1, 3, 2],
                [3, 1, 1, 1, 3],
                [4, 2, 1, 4, 4],
                [32, 1, 4],
                1,
                1,
                # root anno
                2,
                # conv2d
                2,
            ],
        )
    )
    trace = Trace(sch.trace.insts, decisions=decisions)
    sch = Schedule(mod=mod)
    trace.apply_to_schedule(sch, remove_postproc=False)
    answer = sch.mod
    expected = _get_mod()
    tvm.ir.assert_structural_equal(answer, expected)
コード例 #15
0
def all_type_vars(expr, mod=None):
    """Get all type variables from expression/type e

    Parameters
    ----------
    expr : Union[tvm.relay.Expr,tvm.relay.Type]
        The input expression/type

    mod : Optional[tvm.IRModule]
        The global module

    Returns
    -------
    free : List[tvm.relay.TypeVar]
        The list of all type variables in post-DFS order
    """
    use_mod = mod if mod is not None else IRModule()
    return _analysis.all_type_vars(expr, use_mod)
コード例 #16
0
def test_conv2d_winograd_cpu():
    mod = conv2d_winograd_cpu
    mod = IRModule({"main": mod})
    context = TuneContext(
        mod=mod,
        target=Target("llvm"),
        task_name="Custom Search Space Task",
        sch_rules=DefaultLLVM._sch_rules(),  # pylint: disable=protected-access
    )
    post_order_apply = PostOrderApply()
    post_order_apply.initialize_with_tune_context(context)
    (sch, ) = post_order_apply.generate_design_space(mod)

    decisions = dict(
        zip(
            [
                i for i in sch.trace.insts[:-4]
                if i.kind.name.startswith("Sample")
            ],
            [
                # data_pack
                [9, 1],
                [32, 4],
                # input_tile
                4,
                # data_pad
                -2,
                # inverse
                [1, 9],
                [2, 64],
                # bgemm
                [1, 2, 3, 1],
                [1, 1, 1, 6],
                [1, 1, 1, 9],
                [2, 1, 16, 4],
                [16, 8],
            ],
        ))
    trace = Trace(sch.trace.insts[:-4], decisions=decisions)
    sch = Schedule(mod=mod)
    trace.apply_to_schedule(sch, remove_postproc=False)
    answer = sch.mod
    expected = _get_mod()
    tvm.ir.assert_structural_equal(answer, expected)
コード例 #17
0
 def __init__(self):
     self._nodes = {}
     self._tf_node_map = {}
     self._params = {}
     self._input_shapes = {}
     self._output_shapes = {}
     self._num_rnn_layer = False
     self._input_shapes = {}
     self._loops = {}
     self._branches = {}
     self._mod = IRModule({})
     self._prelude = Prelude(self._mod)
     self._control_flow_node_map = defaultdict(set)
     self._loop_body_order = {}
     self._loop_var_order = {}
     self._lvar2expr = {}
     self._lname_map = {}
     self._sorted_cf_node_names = []
     self._while_loop_name_set = set()
     self._main_graph_proto = self
     self._tensor_array_shapes = {}
     self._tensor_array_shape_nodes = {}
コード例 #18
0
def test_te_workload(workload, flops):
    te_workload = create_te_workload(workload, 0)
    mod = IRModule({"main": te_workload})
    assert float(flops) == estimate_tir_flops(mod)
コード例 #19
0
ファイル: common.py プロジェクト: manupa-arm/incubator-tvm
def fold_constant(node, mod=None):
    if mod is None:
        mod = IRModule()
    return _transform.FoldConstantExpr(node, mod)
コード例 #20
0
 def __init__(self):
     self.mod = IRModule({})
     self.params = {}
     self.prelude = Prelude(self.mod)
コード例 #21
0
def test_flops_with_let():
    flops = estimate_tir_flops(IRModule({"main": flops_with_let}))
    assert flops == 8
コード例 #22
0
ファイル: prelude.py プロジェクト: zachzzc/incubator-tvm
 def __init__(self, mod=None):
     if mod is None:
         mod = IRModule()
     self.mod = mod
     self.load_prelude()