Exemplo n.º 1
0
    def after():

        data = relay.var("data", shape=(1, 32))
        eq1 = relay.var("e1", shape=[], dtype="float32")
        eq2 = relay.var("e2", shape=[], dtype="float32")

        cb_1 = relay.annotation.compiler_begin(eq1, target)
        cb_2 = relay.annotation.compiler_begin(eq2, target)

        equality_condition = relay.equal(cb_1, cb_2)
        ce_1 = relay.annotation.compiler_end(equality_condition, target)

        # if condition
        cb_3 = relay.annotation.compiler_begin(data, target)
        true_branch = relay.tanh(cb_3)
        ce_2 = relay.annotation.compiler_end(true_branch, target)

        # else condition
        cb_4 = relay.annotation.compiler_begin(data, target)
        false_branch = relay.sigmoid(cb_4)
        ce_3 = relay.annotation.compiler_end(false_branch, target)

        if_condition = relay.If(ce_1, ce_2, ce_3)
        cb_5 = relay.annotation.compiler_begin(if_condition, target)
        erf_out = relay.erf(cb_5)
        ce_4 = relay.annotation.compiler_end(erf_out, target)
        func = relay.Function([data, eq1, eq2], ce_4)
        mod = tvm.IRModule.from_expr(func)
        return mod
Exemplo n.º 2
0
def _erf_legalize(attrs, inputs, arg_types):
    """Legalizes ERF op if needed.

    Parameters
    ----------
    attrs : tvm.ir.Attrs
        Attributes of current convolution
    inputs : list of tvm.relay.Expr
        The args of the Relay expr to be legalized
    types : list of types
        List of input and output types

    Returns
    -------
    result : tvm.relay.Expr
        The legalized expr
    """
    # Extract types and expressions.
    data = inputs[0]
    data_tensor = arg_types[0]
    # Check if the input type is supported.
    data_dtype = data_tensor.dtype
    # If input is not fp32, we must cast to it.
    if data_dtype != "float32":
        data = relay.cast(data, "float32")
        output = relay.erf(data)
        return relay.cast(output, data_dtype)

    # Otherwise do nothing.
    return None
Exemplo n.º 3
0
    def get_func_with_control_flow():
        data = relay.var("data", shape=(1, 3, 224, 224))
        weight = relay.var("weight", shape=(32, 3, 3, 3))
        eq1 = relay.var("e1", shape=[], dtype="float32")
        eq2 = relay.var("e2", shape=[], dtype="float32")
        eq = relay.equal(eq1, eq2)

        true_branch = relay.zeros(shape=(1, 32, 222, 222), dtype="float32")
        false_branch = relay.nn.conv2d(data, weight, kernel_size=(3, 3), channels=32)
        ife = relay.If(eq, true_branch, false_branch)
        out = relay.erf(ife)
        return relay.Function([data, weight, eq1, eq2], out)
Exemplo n.º 4
0
def test_erf():
    x = relay.var("x", shape=(1, 16, 16, 16), dtype="float32")
    y = relay.erf(x)
    func = relay.Function([x], y)
    mod = tvm.IRModule.from_expr(func)

    fast_mod = FastMath()(mod)
    assert "fast_erf" in fast_mod.astext()

    # Check that FastMath option works for relay.build.
    with tvm.transform.PassContext(opt_level=3, required_pass=["FastMath"]):
        fast_mod = relay.optimize(mod, target="llvm", params=None)
    assert "fast_erf" in fast_mod[0].astext()
Exemplo n.º 5
0
    def before():
        data = relay.var("data", shape=(1, 32))
        eq1 = relay.var("e1", shape=[], dtype="float32")
        eq2 = relay.var("e2", shape=[], dtype="float32")
        eq = relay.equal(eq1, eq2)

        true_branch = relay.tanh(data)
        false_branch = relay.sigmoid(data)
        ife = relay.If(eq, true_branch, false_branch)
        out = relay.erf(ife)
        func = relay.Function([data, eq1, eq2], out)
        mod = tvm.IRModule.from_expr(func)

        return mod
Exemplo n.º 6
0
def test_tensorrt_not_compatible():
    if should_skip():
        return
    dtype = 'float32'
    xshape = (1, 32, 14, 14)
    x = relay.var('x', shape=(xshape), dtype=dtype)
    y = relay.add(x, x)
    z = relay.erf(y)
    out = relay.nn.relu(z)
    f = relay.Function([x], out)
    mod = relay.Module()
    mod['main'] = f
    mod = relay.tensorrt.EnableTrt(mod)
    assert not mod['main'].attrs
Exemplo n.º 7
0
def test_tensorrt_not_compatible():
    if skip_codegen_test():
        return
    dtype = "float32"
    xshape = (1, 32, 14, 14)
    x_data = np.random.uniform(-1, 1, xshape).astype(dtype)

    x = relay.var("x", shape=(xshape), dtype=dtype)
    y = relay.add(x, x)
    z = relay.erf(y)
    out = relay.nn.relu(z)
    f = relay.Function([x], out)
    mod = tvm.IRModule()
    mod["main"] = f
    mod, config = tensorrt.partition_for_tensorrt(mod)
    for mode in ["graph", "vm"]:
        with tvm.transform.PassContext(opt_level=3, config={"relay.ext.tensorrt.options": config}):
            exec = relay.create_executor(mode, mod=mod, ctx=tvm.gpu(0), target="cuda")
            if not skip_runtime_test():
                results = exec.evaluate()(x_data)
Exemplo n.º 8
0
def test_tensorrt_not_compatible():
    if skip_codegen_test():
        return
    dtype = "float32"
    xshape = (1, 32, 14, 14)
    x = relay.var("x", shape=(xshape), dtype=dtype)
    y = relay.add(x, x)
    z = relay.erf(y)
    out = relay.nn.relu(z)
    f = relay.Function([x], out)
    mod = tvm.IRModule()
    mod["main"] = f
    mod, config = tensorrt.partition_for_tensorrt(mod)
    with tvm.transform.PassContext(opt_level=3, config={"relay.ext.tensorrt.options": config}):
        graph, lib, params = relay.build(mod, "cuda")
    if skip_runtime_test():
        return
    mod = graph_runtime.create(graph, lib, ctx=tvm.gpu(0))
    x_data = np.random.uniform(-1, 1, xshape).astype(dtype)
    mod.run(x=x_data)
    results = [mod.get_output(i).asnumpy() for i in range(mod.get_num_outputs())]