Ejemplo n.º 1
0
def create_executor(kind="debug", mod=None, device=None, target="llvm", params=None):
    """Factory function to create an executor.

    Example
    -------
    .. code-block:: python

        import tvm.relay
        import numpy as np

        x = tvm.relay.var("x", tvm.relay.TensorType([1], dtype="float32"))
        expr = tvm.relay.add(x, tvm.relay.Constant(tvm.nd.array(np.array([1], dtype="float32"))))
        tvm.relay.create_executor(
            kind="vm", mod=tvm.IRModule.from_expr(tvm.relay.Function([x], expr))
        ).evaluate()(np.array([2], dtype="float32"))
        # returns `array([3.], dtype=float32)`

    Parameters
    ----------
    kind : str
        The type of executor. Avaliable options are `debug` for the
        interpreter, `graph` for the graph executor, and `vm` for the virtual
        machine.

    mod : :py:class:`~tvm.IRModule`
        The Relay module containing collection of functions

    device : :py:class:`Device`
        The device to execute the code.

    target : :py:class:`tvm.Target`
        The corresponding context

    params : dict of str to NDArray
         Input parameters to the graph that do not change
         during inference time.

    Returns
    -------
    executor : :py:class:`~tvm.relay.backend.interpreter.Executor`
    """
    if mod is None:
        mod = IRModule()
    if device is not None:
        assert device.device_type == _nd.device(str(target), 0).device_type
    else:
        device = _nd.device(str(target), 0)

    if params is not None:
        mod = IRModule.from_expr(bind_params_by_name(mod["main"], params))

    if isinstance(target, str):
        target = Target(target)
    if kind == "debug":
        return _interpreter.Interpreter(mod, device, target)
    if kind == "graph":
        return GraphExecutor(mod, device, target)
    if kind == "vm":
        return VMExecutor(mod, device, target)
    raise RuntimeError("unknown execution strategy: {0}".format(kind))
Ejemplo n.º 2
0
 def _mod(mod: Union[PrimFunc, IRModule]) -> IRModule:
     if isinstance(mod, PrimFunc):
         mod = mod.with_attr("global_symbol", "main")
         mod = mod.with_attr("tir.noalias", True)
         mod = IRModule({"main": mod})
     if not isinstance(mod, IRModule):
         raise TypeError(f"Expected `mod` to be PrimFunc or IRModule, but gets: {mod}")
     # in order to make sure the mod can be found in ApplyHistoryBest
     # different func name can cause structural unequal
     func_names = mod.get_global_vars()
     (func_name,) = func_names
     if len(func_names) == 1 and func_name != "main":
         mod = IRModule({"main": mod[func_name]})
     return mod
Ejemplo n.º 3
0
def from_auto_ml(model, shape=None, dtype="float32", func_name="transform"):
    """
    Import scikit-learn model to Relay.
    """
    try:
        import sklearn  # pylint: disable=unused-import
    except ImportError as e:
        raise ImportError(
            "Unable to import scikit-learn which is required {}".format(e))

    if func_name == "transform":
        inexpr_float = _expr.var("input_float", shape=shape, dtype=dtype)
        inexpr_string = _expr.var("input_string", shape=shape, dtype=dtype)
        inexpr = [inexpr_float, inexpr_string]

        if type(model.feature_transformer.steps[0]
                [1]).__name__ != "ColumnTransformer":
            raise NameError(
                "The First Transformer must be an ColumnTransformer, but {} is given"
                .format(type(model.feature_transformer.steps[0][1]).__name__))

        outexpr = inexpr
        for _, transformer in model.feature_transformer.steps:
            outexpr = sklearn_op_to_relay(transformer, outexpr, shape, dtype,
                                          func_name, None)
    else:
        inexpr = _expr.var("input", shape=shape, dtype=dtype)
        transformer = model.target_transformer
        outexpr = sklearn_op_to_relay(transformer, inexpr, shape, dtype,
                                      func_name, None)

    func = _function.Function(analysis.free_vars(outexpr), outexpr)
    return IRModule.from_expr(func), []
Ejemplo n.º 4
0
    def check(dim, axis, nstep):
        eps = 0.01
        ttype1 = rly.TensorType(tuple(10 for i in range(dim)), dtype)
        ttype2 = rly.TensorType((10, ), dtype)
        x = rly.var("x", ttype1)
        beta = rly.var("beta", ttype2)
        gamma = rly.var("gamma", ttype2)
        moving_var = rly.var("moving_var", ttype2)
        moving_mean = rly.var("moving_mean", ttype2)
        y1, y2 = x, x

        for _ in range(nstep):
            y1, _, _ = rly.nn.batch_norm(y1 + rly.const(1, dtype),
                                         gamma,
                                         beta,
                                         moving_mean,
                                         moving_var,
                                         epsilon=eps,
                                         axis=axis)
            y1 = rly.nn.dropout(y1)
            y2 = simple_bn(y2 + rly.const(1, dtype),
                           gamma,
                           beta,
                           moving_mean,
                           moving_var,
                           epsilon=eps,
                           axis=axis,
                           shape=ttype1.shape)

        mod = IRModule.from_expr(y1)
        simplify = SimplifyInference()
        mod = simplify(mod)
        y1 = mod["main"].body

        assert rly.analysis.graph_equal(y1, y2)
Ejemplo n.º 5
0
def _parse_mod(mod: Union[PrimFunc, IRModule]) -> IRModule:
    if isinstance(mod, PrimFunc):
        mod = IRModule({"main": mod})
    if not isinstance(mod, IRModule):
        raise TypeError(
            f"Expected `mod` to be PrimFunc or IRModule, but gets: {mod}")
    return mod
Ejemplo n.º 6
0
    def from_program(self, program, shape_dict, scope):
        """Construct the TVM relay expression from PaddlePaddle program."""

        self.shape_dict = shape_dict
        if scope is None:
            import paddle

            scope = paddle.fluid.global_scope()
        self.check_unsupported_ops(program)
        self.extract_parameters(program, scope)
        self.ops_to_relay(program)

        output_names = list()
        for block in program.blocks:
            for op in block.ops:
                if op.type == "fetch":
                    output_names.append(op.input("X")[0])

        outputs = [self.nodes[name] for name in output_names]
        outputs = outputs[0] if len(outputs) == 1 else _expr.Tuple(outputs)

        free_vars = analysis.free_vars(outputs)
        func = _function.Function(free_vars, outputs)
        mod = IRModule.from_expr(func)
        return mod, self.params
Ejemplo n.º 7
0
def create_executor(kind="debug", mod=None, ctx=None, target="llvm"):
    """Factory function to create an executor.

    Parameters
    ----------
    kind : str
        The type of executor

    mod : :py:class:`~tvm.IRModule`
        The Relay module containing collection of functions

    ctx : :py:class:`tvmContext`
        The context to execute the code.

    target : :py:class:`tvm.Target`
        The corresponding context
    """
    if mod is None:
        mod = IRModule()
    if ctx is not None:
        assert ctx.device_type == _nd.context(str(target), 0).device_type
    else:
        ctx = _nd.context(str(target), 0)

    if isinstance(target, str):
        target = _target.create(target)
    if kind == "debug":
        return _interpreter.Interpreter(mod, ctx, target)
    if kind == "graph":
        return GraphExecutor(mod, ctx, target)
    if kind == "vm":
        return VMExecutor(mod, ctx, target)
    raise RuntimeError("unknown execution strategy: {0}".format(kind))
Ejemplo n.º 8
0
def replace_ir_builder_module(deep_copy=False, realize=False):
    new_func = tvm.script.from_source(tvm.script.asscript(elementwise))
    other_func = tvm.script.from_source(tvm.script.asscript(elementwise))
    mod = IRModule(functions={"main": new_func, "other": other_func})
    s = tir.ScheduleState(mod, debug_mode=True)
    target = tvm.tir.Block(
        iter_vars=[],
        reads=[],
        writes=[],
        name_hint="target",
        body=s.mod["main"].body.block.body[1],
        init=None,
        alloc_buffers=None,
        match_buffers=None,
        annotations=None,
    )
    if realize:
        target = tvm.tir.BlockRealize(
            iter_values=[],
            predicate=True,
            block=target,
        )
    if deep_copy:
        target.__setstate__(target.__getstate__())
    gc.collect()
    return s, target
Ejemplo n.º 9
0
def check_sketches(
    mod: IRModule,
    sketches: List[Schedule],
    expected_mods: List[IRModule],
    expected_decisions: List[List[Tuple[str, List[int]]]],
    *,
    debug_mask="all",
):
    assert len(expected_mods) == len(expected_decisions)
    assert len(sketches) == len(expected_mods)
    expected_mods = [
        IRModule({"main": m}) if not isinstance(m, IRModule) else m for m in expected_mods
    ]
    sketches = list(sketches)
    for expected_id, (expected_mod, expected_decision) in enumerate(
        zip(expected_mods, expected_decisions)
    ):
        sketch_id = _find_match_sketch_id(
            mod,
            sketches,
            expected_mod,
            expected_decision,
            debug_mask=debug_mask,
        )
        if sketch_id is None:
            raise AssertionError(
                f"Expected sketch #{expected_id} doesn't exist in the generated sketches."
            )
        sketches.pop(sketch_id)
Ejemplo n.º 10
0
    def from_translated_layer(self, layer, shape_dict):
        """Construct the TVM relay expression from PaddlePaddle TranslatedLayer."""

        self.shape_dict = shape_dict
        program = layer.program()
        parameters = dict()
        for param in layer.parameters():
            parameters[param.name] = np.array(param.value().get_tensor())
        self.check_unsupported_ops(program)
        self.extract_parameters(program, parameters)

        input_specs = layer._input_spec()
        self.ops_to_relay(program, input_specs)

        output_names = [x.name for x in layer._output_spec()]

        outputs = [self.nodes[name] for name in output_names]
        outputs = outputs[0] if len(outputs) == 1 else _expr.Tuple(outputs)

        free_vars = analysis.free_vars(outputs)
        func = _function.Function(free_vars, outputs)
        mod = IRModule.from_expr(func)
        # remove unused parameters
        final_params = dict()
        for var in free_vars:
            if var.name_hint in self.params:
                final_params[var.name_hint] = self.params[var.name_hint]
        self.params = final_params
        return mod, self.params
Ejemplo n.º 11
0
    def __init__(
        self,
        mod: Union[PrimFunc, IRModule],
        debug_mode: Union[bool, int] = False,
    ) -> None:
        """Construct a schedule state from an IRModule or a PrimFunc

        Parameters
        ----------
        mod : Union[PrimFunc, IRModule]
            The IRModule or PrimFunc to be scheduled
        debug_mode : Union[bool, int]
            Do extra correctness checking after the class creation and each time
            after calling the Replace method.
            Possible choices of `debug_mode`:
            1) True - Turn on all the checks
            2) False - Turn off all the checks
            3) An integer - Turn on checks according to the bitmasks provided in ScheduleDebugMask
        """
        if isinstance(mod, PrimFunc):
            mod = IRModule({"main": mod})
        if isinstance(debug_mode, bool):
            if debug_mode:
                debug_mode = -1
            else:
                debug_mode = 0
        if not isinstance(debug_mode, int):
            raise TypeError(
                f"`debug_mode` should be integer or boolean, but gets: {debug_mode}"
            )
        self.__init_handle_by_constructor__(
            _ffi_api.ScheduleState,  # type: ignore # pylint: disable=no-member
            mod,
            debug_mode,
        )
Ejemplo n.º 12
0
    def from_darknet(self):
        """To convert the darknet symbol to relay functions."""
        for i in range(self._net.n):
            layer = self._net.layers[i]
            need_skip, sym = self._preproc_layer(layer, i)
            if need_skip:
                continue

            processed, sym = self._handle_darknet_rnn_layers(i, sym)
            if processed:
                continue

            attr = self._get_darknet_attrs(layer, i)
            op_name = self._get_opname(layer)
            prefix = _get_params_prefix(op_name, i)
            params = self._get_darknet_params(self._net.layers[i], prefix)
            sym = _darknet_convert_symbol(op_name, _as_list(sym), params, attr,
                                          prefix)

            if params:
                self._tvmparams.update(params)
            self._sym_array[i] = sym
            self._make_outlist(sym, prefix, layer, i)

        outputs = _as_list(sym) + self._outs
        outputs = outputs[0] if len(outputs) == 1 else _expr.Tuple(outputs)
        sym = _function.Function(analysis.free_vars(outputs), outputs)
        return IRModule.from_expr(sym), self._tvmparams
Ejemplo n.º 13
0
def _set_params(mod, input_scale_func, weight_scale_func):
    quantize_op = _op.get("relay.op.annotation.simulated_quantize")
    cfg = quantize.current_qconfig()
    const_params = {}

    def visit_func(expr):
        '''visitor function for traverse'''
        if isinstance(expr, _expr.Call) and expr.op == quantize_op:
            _, ndom_scale, nclip_min, nclip_max = expr.args
            attrs = expr.attrs
            kind = attrs.kind
            nbit = cfg.get_nbit_by_kind(kind)
            valid_bit = nbit - attrs.sign

            # set scale
            if kind == quantize.QAnnotateKind.WEIGHT:
                assert isinstance(expr.args[0], _expr.Constant)
                scale = weight_scale_func(expr)
            else:
                scale = input_scale_func(expr)

            def _make_const(val):
                return _expr.const(val, 'float32')

            valid_range = 2**valid_bit
            const_params[ndom_scale] = _make_const(scale / valid_range)
            const_params[nclip_min] = _make_const(- (valid_range - 1))
            const_params[nclip_max] = _make_const((valid_range - 1))

    func = mod['main']
    _analysis.post_order_visit(func, visit_func)
    func = _expr.bind(func, const_params)
    return IRModule.from_expr(func)
Ejemplo n.º 14
0
        def _interp_wrapper(*args, **kwargs):
            if expr is None:
                args = self._convert_args(self.mod["main"], args, kwargs)
            else:
                args = self._convert_args(expr, args, kwargs)

            relay_args = []
            for arg in args:
                relay_args.append(_arg_to_ast(self.mod, arg))

            # Set the entry function for the module.
            if expr is None:
                pass
            elif isinstance(expr, GlobalVar):
                self.mod["main"] = self.mod[expr]
            else:
                assert isinstance(expr, Function)
                func = Function([], Call(expr, relay_args))
                relay_args = []
                if self.mod:
                    self.mod["main"] = func
                else:
                    self.mod = IRModule.from_expr(func)

            mod = self.optimize()
            opt_expr = Call(mod["main"], relay_args)
            return _intrp(opt_expr)
Ejemplo n.º 15
0
def infer_value(input_val, params, mod=None):
    """A hack for getting the value of an expression by evaluating a
    portion of the relay graph. This is often needed for functions that
    whose output shape depends on the value of a tensor.
    """
    # Check that all free variables have associated parameters.
    assert all(
        var.name_hint in params.keys() for var in analysis.free_vars(input_val)
    ), "All inputs to infer must be available in params."
    try:
        # TODO(kevinthesun): Use VM for all cases.
        # pylint: disable=import-outside-toplevel
        from tvm.contrib import graph_runtime

        func = _function.Function(analysis.free_vars(input_val), input_val)
        with tvm.transform.PassContext(opt_level=0):
            lib = tvm.relay.build(func, target="llvm", params=params)
        ctx = tvm.cpu(0)
        m = graph_runtime.GraphModule(lib["default"](ctx))
        m.run()
        return m.get_output(0)
    except Exception:
        if isinstance(mod, IRModule):
            mod["main"] = _function.Function(analysis.free_vars(input_val), input_val)
        else:
            mod = IRModule.from_expr(input_val)
        exc = tvm.relay.create_executor("debug", mod=mod, ctx=tvm.cpu(), target="llvm")
        inputs = []
        for param in mod["main"].params:
            inputs.append(params[param.name_hint])
        result = exc.evaluate()(*inputs)
        return result
Ejemplo n.º 16
0
 def __init__(self, shape, dtype):
     self._nodes = {}
     self._params = {}
     self._visited_nodes = set()
     self._ops = {}
     self._shape = shape
     self._dtype = dtype
     self._mod = IRModule({})
Ejemplo n.º 17
0
def infer_type(node, mod=None):
    """A method to infer the type of an intermediate node in the relay graph."""
    new_mod = IRModule.from_expr(node)
    if mod is not None:
        new_mod.update(mod)
    new_mod = _transform.InferType()(new_mod)
    entry = new_mod["main"]
    return entry if isinstance(node, _function.Function) else entry.body
Ejemplo n.º 18
0
def test_tir_schedule_creation():
    # Tests:
    # - Schedule.__init__ for PrimFunc and IRModule
    # - Schedule.mod
    # - Schedule.state
    sch_1 = tir.Schedule(matmul, debug_mode=True)
    sch_2 = tir.Schedule(IRModule({"main": matmul}), debug_mode=True)
    assert sch_1.mod["main"].same_as(sch_2.mod["main"])
    assert sch_1.state.mod["main"].same_as(sch_2.state.mod["main"])
Ejemplo n.º 19
0
    def __init__(self, shape, dtype, net, model):
        #网络和参数
        self._NetLayer = self.__getNetLayer(net)
        self._ModelLayer = self.__getModelLayer(model)

        self._params = {}
        self._nodes = {}
        self._LayerList = []
        self._shape = shape
        self._dtype = dtype
        self._mod = IRModule({})
Ejemplo n.º 20
0
def create_executor(kind="debug", mod=None, ctx=None, target="llvm"):
    """Factory function to create an executor.

    Example
    -------
    .. code-block:: python

        import tvm.relay
        import numpy as np

        x = tvm.relay.var("x", tvm.relay.TensorType([1], dtype="float32"))
        expr = tvm.relay.add(x, tvm.relay.Constant(tvm.nd.array(np.array([1], dtype="float32"))))
        tvm.relay.create_executor(
            kind="vm", mod=tvm.IRModule.from_expr(tvm.relay.Function([x], expr))
        ).evaluate()(np.array([2], dtype="float32"))
        # returns `array([3.], dtype=float32)`

    Parameters
    ----------
    kind : str
        The type of executor. Avaliable options are `debug` for the
        interpreter, `graph` for the graph runtime, and `vm` for the virtual
        machine.

    mod : :py:class:`~tvm.IRModule`
        The Relay module containing collection of functions

    ctx : :py:class:`tvmContext`
        The context to execute the code.

    target : :py:class:`tvm.Target`
        The corresponding context

    Returns
    -------
    executor : :py:class:`~tvm.relay.backend.interpreter.Executor`
    """
    if mod is None:
        mod = IRModule()
    if ctx is not None:
        assert ctx.device_type == _nd.context(str(target), 0).device_type
    else:
        ctx = _nd.context(str(target), 0)

    if isinstance(target, str):
        target = Target(target)
    if kind == "debug":
        return _interpreter.Interpreter(mod, ctx, target)
    if kind == "graph":
        return GraphExecutor(mod, ctx, target)
    if kind == "vm":
        return VMExecutor(mod, ctx, target)
    raise RuntimeError("unknown execution strategy: {0}".format(kind))
Ejemplo n.º 21
0
    def __init__(self, source_name: str) -> None:
        self.source_name = source_name
        self.module = IRModule({})  # type: IRModule

        # Adding an empty scope allows naked lets without pain.
        self.var_scopes = deque([deque()])       # type: Scopes[expr.Var]
        self.global_vars = {}                    # type: Scope[expr.GlobalVar]
        self.type_var_scopes = deque([deque()])  # type: Scopes[ty.TypeVar]
        self.global_type_vars = {}               # type: Scope[expr.GlobalVar]
        self.graph_expr = []                     # type: List[expr.Expr]

        super(ParseTreeToRelayIR, self).__init__()
Ejemplo n.º 22
0
def optimize(mod, target=None, params=None):
    """Helper function that optimizes a Relay module.

    Parameters
    ----------
    mod : :py:class:`~tvm.IRModule`
        The module to build. Using relay.Function is deprecated.

    target : None, or any multi-target like object, see Target.canon_multi_target
        For homogeneous compilation, the unique build target.
        For heterogeneous compilation, a dictionary or list of possible build targets.
        Defaults to the current target in the environment if None.

    params : dict of str to NDArray
        Input parameters to the graph that do not change
        during inference time. Used for constant folding.

    Returns
    -------
    mod : :py:class:`~tvm.IRModule`
        The optimized relay module.

    params : dict
        The parameters of the final graph.
    """
    if not isinstance(mod, (IRModule, _function.Function)):
        raise ValueError("Type of input parameter mod must be tvm.IRModule")

    if isinstance(mod, _function.Function):
        if params:
            mod = bind_params_by_name(mod, params)
        mod = IRModule.from_expr(mod)
        warnings.warn(
            "Please use input parameter mod (tvm.IRModule) "
            "instead of deprecated parameter func (tvm.relay.function.Function)",
            DeprecationWarning,
        )

    raw_targets = Target.canon_multi_target_and_host(
        Target.target_or_current(target))

    # If current dispatch context is fallback context (the default root context),
    # then load pre-tuned parameters from TopHub
    if isinstance(autotvm.DispatchContext.current, autotvm.FallbackContext):
        tophub_context = autotvm.tophub.context(raw_targets)
    else:
        tophub_context = autotvm.utils.EmptyContext()

    with tophub_context:
        bld_mod = BuildModule()
        mod, params = bld_mod.optimize(mod, target=raw_targets, params=params)
    return mod, params
Ejemplo n.º 23
0
def optimize(mod, target=None, params=None):
    """Helper function that optimizes a Relay module.

    Parameters
    ----------
    mod : :py:class:`~tvm.IRModule`
        The module to build. Using relay.Function is deprecated.

    target : str, :any:`tvm.target.Target`, or dict of str(i.e. device/context
    name) to str/tvm.target.Target, optional
        For heterogeneous compilation, it is a dictionary indicating context to
        target mapping. For homogeneous compilation, it is a build target.

    params : dict of str to NDArray
        Input parameters to the graph that do not change
        during inference time. Used for constant folding.

    Returns
    -------
    mod : :py:class:`~tvm.IRModule`
        The optimized relay module.

    params : dict
        The parameters of the final graph.
    """
    if not isinstance(mod, (IRModule, _function.Function)):
        raise ValueError("Type of input parameter mod must be tvm.IRModule")

    if isinstance(mod, _function.Function):
        if params:
            mod = bind_params_by_name(mod, params)
        mod = IRModule.from_expr(mod)
        warnings.warn(
            "Please use input parameter mod (tvm.IRModule) "
            "instead of deprecated parameter func (tvm.relay.function.Function)",
            DeprecationWarning,
        )

    target = _update_target(target)

    # If current dispatch context is fallback context (the default root context),
    # then load pre-tuned parameters from TopHub
    if isinstance(autotvm.DispatchContext.current, autotvm.FallbackContext):
        tophub_context = autotvm.tophub.context(list(target.values()))
    else:
        tophub_context = autotvm.utils.EmptyContext()

    with tophub_context:
        bld_mod = BuildModule()
        mod, params = bld_mod.optimize(mod, target, params)
    return mod, params
Ejemplo n.º 24
0
    def visit_call(self, call):
        # Keep track of our current depth and layer count
        # so we can know whether to skip this layer or not.
        current_depth = self.depth_count
        current_layer = self.valid_op_count - current_depth - 1
        if call.op in self.valid_ops:
            self.depth_count += 1
        # Visit current call operation
        new_fn = self.visit(call.op)
        # Visit current arguments
        args = []
        for arg in call.args:
            args.append(self.visit(arg))
            self.depth_count = current_depth

        # Downcast this op if its the correct type and not skipped.
        if call.op in self.valid_ops and current_layer not in self.skip_layers:
            # Recast inputs to specified type.
            args = [self.visit(arg) for arg in call.args]
            new_args = list()
            for arg in args:
                new_args.append(relay.cast(arg, dtype=self.dtype))

            # If out_dtype is in the attributes, we need to update it.
            orig_dtype = None
            if call.attrs is not None and "out_dtype" in call.attrs.keys():
                new_attr_dict = {}
                for attr in call.attrs.keys():
                    attr_value = call.attrs[attr]
                    if isinstance(attr_value, tvm.ir.container.Array):
                        attr_value = tuple(attr_value)
                    new_attr_dict[str(attr)] = attr_value
                new_attr_dict["out_dtype"] = self.out_dtype
                attr_type = str(call.attrs).split("(")[0]
                new_attrs = tvm.ir.make_node(attr_type, **new_attr_dict)
                if call.attrs["out_dtype"] != "":
                    orig_dtype = call.attrs["out_dtype"]
            else:
                new_attrs = call.attrs

            if orig_dtype is None:
                # Perform type inference to determine the original type.
                new_mod = IRModule.from_expr(call)
                new_mod = InferType()(new_mod)
                checked_arg = new_mod["main"].body
                orig_dtype = checked_arg.checked_type.dtype
            # Recast the output for compatibility with other graph operations.
            return relay.cast(Call(new_fn, new_args, new_attrs), orig_dtype)

        # Otherwise return the unchanged call.
        return Call(new_fn, args, call.attrs)
Ejemplo n.º 25
0
    def preprocess_mod(mod, params):
        if not isinstance(mod, (IRModule, _function.Function)):
            raise ValueError("Type of input parameter mod must be tvm.IRModule")

        if isinstance(mod, _function.Function):
            if params:
                mod = bind_params_by_name(mod, params)
            mod = IRModule.from_expr(mod)
            warnings.warn(
                "Please use input parameter mod (tvm.IRModule) "
                "instead of deprecated parameter mod (tvm.relay.function.Function)",
                DeprecationWarning,
            )
        return mod
Ejemplo n.º 26
0
    def __init__(
        self,
        mod: Union[PrimFunc, IRModule],
        *,
        debug_mode: Union[bool, int] = False,
        error_render_level: ERROR_RENDER_LEVEL_CANDIDATES = "detail",
    ) -> None:
        """Construct a concrete TensorIR schedule from an IRModule or a PrimFunc

        Parameters
        ----------
        mod : Union[PrimFunc, IRModule]
            The IRModule or PrimFunc to be scheduled
        debug_mode : Union[bool, int]
            Do extra correctness checking after the class creation and each time
            scheduling primitive
        error_render_level : str = "detail"
            The level of error rendering. Choices: "detail", "fast", "none".
            "detail": Render a detailed error message, with the TIR and error locations printed
            "fast: Show a simple error message without rendering or string manipulation
            "none": Do not show any error message.

        Note
        ----
        The checks performed includes:
        1) VerifySRefTree
        2) VerifyCachedFlags
        """
        if isinstance(mod, PrimFunc):
            mod = IRModule({"main": mod})
        if isinstance(debug_mode, bool):
            if debug_mode:
                debug_mode = -1
            else:
                debug_mode = 0
        if not isinstance(debug_mode, int):
            raise TypeError(
                f"`debug_mode` should be integer or boolean, but gets: {debug_mode}"
            )
        if error_render_level not in Schedule.ERROR_RENDER_LEVEL:
            raise ValueError(
                'error_render_level can be "detail", "fast", or "none", but got: '
                + f"{error_render_level}")
        self.__init_handle_by_constructor__(
            _ffi_api.ConcreteSchedule,  # type: ignore # pylint: disable=no-member
            mod,
            debug_mode,
            Schedule.ERROR_RENDER_LEVEL.get(error_render_level),
        )
def test_conv2d_winograd_cuda():
    mod = conv2d_winograd_cuda
    mod = IRModule({"main": mod})
    context = TuneContext(
        mod=mod,
        target=Target("nvidia/geforce-rtx-3090", host="llvm"),
        task_name="Custom Search Space Task",
        sch_rules=DefaultCUDA._sch_rules(),  # pylint: disable=protected-access
    )
    for sch_rule in context.sch_rules:
        sch_rule.initialize_with_tune_context(context)
    post_order_apply = PostOrderApply()
    post_order_apply.initialize_with_tune_context(context)
    (sch,) = post_order_apply.generate_design_space(mod)
    decisions = dict(
        zip(
            [i for i in sch.trace.insts if i.kind.name.startswith("Sample")],
            [
                # data_pack
                [3, 3],
                [64, 2],
                2,
                # inverse
                [3, 3],
                [2, 64],
                2,
                # bgemm
                [1, 1, 1, 1, 6],
                [1, 1, 1, 3, 2],
                [3, 1, 1, 1, 3],
                [4, 2, 1, 4, 4],
                [32, 1, 4],
                1,
                1,
                # root anno
                2,
                # conv2d
                2,
            ],
        )
    )
    trace = Trace(sch.trace.insts, decisions=decisions)
    sch = Schedule(mod=mod)
    trace.apply_to_schedule(sch, remove_postproc=False)
    answer = sch.mod
    expected = _get_mod()
    tvm.ir.assert_structural_equal(answer, expected)
Ejemplo n.º 28
0
def from_sklearn(model, shape=None, dtype="float32", func_name="transform", columns=None):
    """
    Import scikit-learn model to Relay.
    """
    try:
        import sklearn  # pylint: disable=unused-import
    except ImportError as e:
        raise ImportError("Unable to import scikit-learn which is required {}".format(e))

    if type(model).__name__ == "ColumnTransformer":
        raise NameError("ColumnTransformer is not supported for single op compilation.")

    inexpr = _expr.var("input", shape=shape, dtype=dtype)
    outexpr = sklearn_op_to_relay(model, inexpr, shape, dtype, func_name, columns)

    func = _function.Function(analysis.free_vars(outexpr), outexpr)
    return IRModule.from_expr(func), []
Ejemplo n.º 29
0
def infer_type(node, mod=None):
    """A method to infer the type of an intermediate node in the relay graph."""
    if isinstance(mod, IRModule):
        mod["main"] = _function.Function(tvm.relay.analysis.free_vars(node), node)
        mod = _transform.InferType()(mod)
        entry = mod["main"]
        ret = entry.body
    else:
        new_mod = IRModule.from_expr(node)
        if mod is not None:
            new_mod.update(mod)

        new_mod = _transform.InferType()(new_mod)
        entry = new_mod["main"]
        ret = entry if isinstance(node, _function.Function) else entry.body

    return ret
Ejemplo n.º 30
0
def all_type_vars(expr, mod=None):
    """Get all type variables from expression/type e

    Parameters
    ----------
    expr : Union[tvm.relay.Expr,tvm.relay.Type]
        The input expression/type

    mod : Optional[tvm.IRModule]
        The global module

    Returns
    -------
    free : List[tvm.relay.TypeVar]
        The list of all type variables in post-DFS order
    """
    use_mod = mod if mod is not None else IRModule()
    return _analysis.all_type_vars(expr, use_mod)