示例#1
0
def relay_lstm_cell(batch_size, input_size, hidden_size):
    # based on https://pytorch.org/docs/stable/generated/torch.nn.GRU.html#torch.nn.GRU
    state_tensor_type = relay.TensorType((batch_size, hidden_size))
    state_tuple_type = relay.TupleType([state_tensor_type, state_tensor_type])

    inp = relay.var("input", shape=(batch_size, input_size))
    state = relay.Var("state", type_annotation=state_tuple_type)

    w_ih = relay.var("w_ih", shape=(4 * hidden_size, input_size))
    w_hh = relay.var("w_hh", shape=(4 * hidden_size, hidden_size))
    b_ih = relay.var("b_ih", shape=(4 * hidden_size, ))
    b_hh = relay.var("b_hh", shape=(4 * hidden_size, ))

    hidden = relay.TupleGetItem(state, 0)
    cell_state = relay.TupleGetItem(state, 1)

    # PyTorch packs the i2h and h2h weights and biases together so we will match that here
    w_i_splits = relay.split(w_ih, 4, 0)
    w_h_splits = relay.split(w_hh, 4, 0)
    b_i_splits = relay.split(b_ih, 4, 0)
    b_h_splits = relay.split(b_hh, 4, 0)
    w_ii, w_if, w_ig, w_io = w_i_splits[0], w_i_splits[1], w_i_splits[
        2], w_i_splits[3]
    w_hi, w_hf, w_hg, w_ho = w_h_splits[0], w_h_splits[1], w_h_splits[
        2], w_h_splits[3]
    b_ii, b_if, b_ig, b_io = b_i_splits[0], b_i_splits[1], b_i_splits[
        2], b_i_splits[3]
    b_hi, b_hf, b_hg, b_ho = b_h_splits[0], b_h_splits[1], b_h_splits[
        2], b_h_splits[3]

    def weighted_value(weight, value, bias):
        return relay.transpose(
            relay.nn.dense(weight, value) +
            relay.reshape(bias, (hidden_size, 1)))

    i_t = relay.sigmoid(
        weighted_value(w_ii, inp, b_ii) + weighted_value(w_hi, hidden, b_hi))
    f_t = relay.sigmoid(
        weighted_value(w_if, inp, b_if) + weighted_value(w_hf, hidden, b_hf))
    g_t = relay.tanh(
        weighted_value(w_ig, inp, b_ig) + weighted_value(w_hg, hidden, b_hg))
    o_t = relay.sigmoid(
        weighted_value(w_io, inp, b_io) + weighted_value(w_ho, hidden, b_ho))
    c_t = f_t * cell_state + i_t * g_t
    h_t = o_t * relay.tanh(c_t)

    h_var = relay.Var("h")
    c_var = relay.Var("c")
    return relay.Function(
        [inp, state, w_ih, w_hh, b_ih, b_hh],
        relay.Let(
            h_var, h_t,
            relay.Let(c_var, c_t,
                      relay.Tuple([h_var, relay.Tuple([h_var, c_var])]))),
        ret_type=relay.TupleType([state_tensor_type, state_tuple_type]))
示例#2
0
def test_conv2d_residual_block():
    d_shape = (16, 16, 32, 32)
    w_shape = (16, 16, 3, 3)
    padding = (1, 1)

    bias_add, residual_input = get_conv2d_nchw_bias_residual(
        d_shape, w_shape, padding)

    for func, tol in [
        (relay.nn.relu(bias_add + residual_input), 1e-5),
        (relay.nn.relu(bias_add) + residual_input, 1e-5),
        (relay.sigmoid(bias_add) * residual_input, 1e-5),
        (relay.nn.relu(silu(bias_add) * residual_input), 1e-5),
            # HardSwish requires higher tolerance since vectoring the residual block epilogue
            # in cutlass.
            # TODO(masahi): Invesitigate this issue
        (relay.nn.relu(hardswish(bias_add) + residual_input), 1e-3),
    ]:
        verify_conv2d(func,
                      func,
                      d_shape,
                      w_shape,
                      sm=80,
                      atol=tol,
                      rtol=tol,
                      run_benchmark=False)
示例#3
0
    def after():

        data = relay.var("data", shape=(1, 32))
        eq1 = relay.var("e1", shape=[], dtype="float32")
        eq2 = relay.var("e2", shape=[], dtype="float32")

        cb_1 = relay.annotation.compiler_begin(eq1, target)
        cb_2 = relay.annotation.compiler_begin(eq2, target)

        equality_condition = relay.equal(cb_1, cb_2)
        ce_1 = relay.annotation.compiler_end(equality_condition, target)

        # if condition
        cb_3 = relay.annotation.compiler_begin(data, target)
        true_branch = relay.tanh(cb_3)
        ce_2 = relay.annotation.compiler_end(true_branch, target)

        # else condition
        cb_4 = relay.annotation.compiler_begin(data, target)
        false_branch = relay.sigmoid(cb_4)
        ce_3 = relay.annotation.compiler_end(false_branch, target)

        if_condition = relay.If(ce_1, ce_2, ce_3)
        cb_5 = relay.annotation.compiler_begin(if_condition, target)
        erf_out = relay.erf(cb_5)
        ce_4 = relay.annotation.compiler_end(erf_out, target)
        func = relay.Function([data, eq1, eq2], ce_4)
        mod = tvm.IRModule.from_expr(func)
        return mod
示例#4
0
 def before(dim):
     X = relay.var("X", shape=(1, dim))
     W = relay.var("W", shape=(3 * dim, dim))
     matmul = relay.nn.dense(X, W)
     splitted = relay.split(matmul, indices_or_sections=3, axis=1)
     out = relay.sigmoid(splitted[0]) + relay.tanh(splitted[1]) * relay.exp(splitted[2])
     return relay.Function([X, W], out)
def test_batch_matmul_rewrite():
    data = relay.var("data", shape=(1, 4, 16, 16))
    data2 = relay.sigmoid(relay.var("data", shape=(4, 16, 64)))
    out = relay.nn.conv2d(data,
                          relay.var("weight"),
                          kernel_size=(3, 3),
                          padding=(1, 1),
                          channels=8)

    out = relay.nn.batch_flatten(out)
    out = relay.reshape(out, [1, 32, 64])
    out = relay.nn.batch_matmul(out, data2)

    qmod = quantize_and_build(out)

    def _check_batch_matmul(node):
        if isinstance(node, Call):

            if node.op.name in ["nn.batch_matmul", "nn.conv2d"]:
                assert node.checked_type.dtype == "int32"
            elif node.op.name == "nn.batch_flatten":
                assert node.checked_type.dtype == "int8"

    # check if batch_matmul is quantized
    relay.analysis.post_order_visit(qmod["main"], _check_batch_matmul)
示例#6
0
文件: test_dnnl.py 项目: wenxcs/tvm
def get_conv2d_transpose(
        x_shape=(1, 32, 8, 8),
        k_shape=(32, 16, 3, 3),
        groups=1,
        padding=(0, 0),
        strides=(1, 1),
        activation=None,
        dtype="float32",
):
    x = relay.var("x", shape=(x_shape), dtype=dtype)
    kernel = relay.var("kernel", shape=(k_shape), dtype=dtype)
    out = relay.nn.conv2d_transpose(
        x,
        kernel,
        channels=k_shape[1] * groups,
        kernel_size=k_shape[2:4],
        groups=groups,
        padding=padding,
        strides=strides,
    )
    dic = {"x": x_shape, "kernel": k_shape}
    param_lst = ["kernel"]

    if activation == "relu":
        return relay.nn.relu(out), dic, param_lst
    elif activation == "tanh":
        return relay.tanh(out), dic, param_lst
    elif activation == "sigmoid":
        return relay.sigmoid(out), dic, param_lst
    else:
        return out, dic, param_lst
    def get_blocks(prefix,
                   data,
                   in_channel,
                   out_channel,
                   include_bn=True,
                   include_sigmoid=False):
        weight = relay.var(prefix + "weight")
        bn_gamma = relay.var(prefix + "bn_gamma")
        bn_beta = relay.var(prefix + "bn_beta")
        bn_mmean = relay.var(prefix + "bn_mean")
        bn_mvar = relay.var(prefix + "bn_var")

        layer = relay.nn.conv2d(data=data,
                                weight=weight,
                                kernel_size=(3, 3),
                                channels=out_channel,
                                padding=(1, 1))
        if include_bn:
            bn_output = relay.nn.batch_norm(layer, bn_gamma, bn_beta, bn_mmean,
                                            bn_mvar)
            layer = bn_output[0]
        if include_sigmoid:
            # dummy layer to prevent pattern detection
            layer = relay.sigmoid(layer)
        layer = relay.nn.relu(layer)
        return layer
示例#8
0
文件: test_dnnl.py 项目: wenxcs/tvm
def get_conv3d(
        x_shape=(1, 32, 8, 8, 8),
        k_shape=(16, 32, 3, 3, 3),
        groups=1,
        padding=(0, 0, 0),
        strides=(1, 1, 1),
        dilation=(1, 1, 1),
        activation=None,
        dtype="float32",
):
    x = relay.var("x", shape=(x_shape), dtype=dtype)
    kernel = relay.const(np.random.randint(0, 1, k_shape).astype(dtype))
    out = relay.nn.conv3d(
        x,
        kernel,
        channels=k_shape[0],
        kernel_size=k_shape[2:],
        groups=groups,
        padding=padding,
        strides=strides,
        dilation=dilation,
    )
    dic = {"x": x_shape, "kernel": k_shape}
    param_lst = ["kernel"]

    if activation == "relu":
        return relay.nn.relu(out), dic, param_lst
    elif activation == "tanh":
        return relay.tanh(out), dic, param_lst
    elif activation == "sigmoid":
        return relay.sigmoid(out), dic, param_lst
    else:
        return out, dic, param_lst
def do_layer(x, node: Node, params):
    if isinstance(node, Conv):
        x = conv2d(x, node, params)
    elif isinstance(node, Pool):
        x = pool2d(x, node)
    elif isinstance(node, Identity):
        x = x
    elif isinstance(node, Activation):
        if node.act_type == 'relu':
            x = relay.nn.relu(x)
        elif node.act_type == 'sigmoid':
            x = relay.sigmoid(x)
        elif node.act_type == 'tanh':
            x = relay.tanh(x)
        else:
            raise ValueError
    elif isinstance(node, Element):
        x = x  # has been done in terms
    elif isinstance(node, Relu):
        x = relay.nn.relu(x)
    elif isinstance(node, Sequential):
        x = sequential(x, node, params)
    else:
        raise NotImplementedError
    return x
 def before(dim):
     X = relay.var("X", shape=(1, dim))
     W = relay.var("W", shape=(3 * dim, dim))
     matmul = relay.nn.dense(X, W)
     splitted = relay.split(matmul, indices_or_sections=3, axis=1)
     out = relay.sigmoid(splitted[0]) + relay.tanh(splitted[1]) * relay.exp(splitted[2])
     return relay.Function([X, W], out)
示例#11
0
文件: test_dnnl.py 项目: wenxcs/tvm
def get_conv1d(
        x_shape=((1, 3, 224)),
        k_shape=(16, 3, 3),
        groups=1,
        padding=(1, 1),
        strides=(1),
        dilation=(1),
        channels=None,
        activation=None,
        dtype="float32",
):
    x = relay.var("x", shape=(x_shape), dtype=dtype)
    kernel = relay.var("kernel", shape=(k_shape), dtype=dtype)
    out = relay.nn.conv1d(
        x,
        kernel,
        kernel_size=k_shape[2:3],
        groups=groups,
        padding=padding,
        strides=strides,
        dilation=dilation,
        channels=k_shape[0],
    )
    dic = {"x": x_shape, "kernel": k_shape}
    param_lst = ["kernel"]

    if activation == "relu":
        return relay.nn.relu(out), dic, param_lst
    elif activation == "tanh":
        return relay.tanh(out), dic, param_lst
    elif activation == "sigmoid":
        return relay.sigmoid(out), dic, param_lst
    else:
        return out, dic, param_lst
def test_stop_quantize():
    data = relay.var("data", shape=(1, 16, 64, 64))
    np_weight0 = np.random.rand(16, 16, 3, 3)
    conv0_weight = relay.Constant(tvm.nd.array(np_weight0)).astype("float32")
    np_weight1 = np.random.rand(16, 16, 1, 1)
    conv1_weight = relay.Constant(tvm.nd.array(np_weight1)).astype("float32")
    multiplier = relay.sigmoid(relay.var("data", shape=(1, 16, 1, 1)))

    conv0 = relay.nn.conv2d(data,
                            conv0_weight,
                            kernel_size=(3, 3),
                            padding=(1, 1),
                            channels=16)
    act0 = relay.nn.relu(data=conv0)

    pool = relay.nn.global_avg_pool2d(data=act0)

    conv1 = relay.nn.conv2d(pool,
                            conv1_weight,
                            kernel_size=(1, 1),
                            padding=(0, 0),
                            channels=16)
    act1 = relay.nn.relu(data=conv1)

    quantize_and_build(act1 * multiplier)
示例#13
0
文件: test_runtime.py 项目: szha/tvm
 def get_model(input_shape, var_names):
     """Return a model and any parameters it may have."""
     a = relay.var(next(var_names), shape=input_shape, dtype="float32")
     out = relay.reshape(a, (1, 1, 1000))
     out = relay.sigmoid(out)
     out = relay.reshape(out, (1, 1000))
     return out
    def test_sigmoid(self):
        data = relay.var("data", relay.TensorType((-1, 4, 2, 2), "float32"))

        net = relay.sigmoid(data)
        net = relay.Function(relay.analysis.free_vars(net), net)
        mod, params = testing.create_workload(net)

        xgraph = xf_relay.from_relay(mod, params)
        layers = xgraph.get_layers()

        assert layers[0].type[0] == 'Input'
        assert layers[1].type[0] == 'Sigmoid'
        assert 'relay_id' in layers[1].attrs
示例#15
0
    def before():
        data = relay.var("data", shape=(1, 32))
        eq1 = relay.var("e1", shape=[], dtype="float32")
        eq2 = relay.var("e2", shape=[], dtype="float32")
        eq = relay.equal(eq1, eq2)

        true_branch = relay.tanh(data)
        false_branch = relay.sigmoid(data)
        ife = relay.If(eq, true_branch, false_branch)
        out = relay.erf(ife)
        func = relay.Function([data, eq1, eq2], out)
        mod = tvm.IRModule.from_expr(func)

        return mod
示例#16
0
def test_mul_rewrite():
    """a test case where rhs of mul is not constant"""
    data = relay.var("data", shape=(1, 16, 64, 64))
    multiplier = relay.sigmoid(relay.var("data", shape=(1, 16, 1, 1)))
    conv = relay.nn.conv2d(
        data, relay.var("weight"), kernel_size=(3, 3), padding=(1, 1), channels=16
    )
    act = relay.nn.relu(data=conv)

    quantize_and_build(act * multiplier)

    pool = relay.nn.global_avg_pool2d(data=act)

    quantize_and_build(act * pool)
示例#17
0
def _get_model(shape, input_zp, input_sc, output_zp, output_sc, dtype):
    a = relay.var("a", shape=shape, dtype=dtype)
    dequantize = relay.qnn.op.dequantize(
        a,
        input_scale=relay.const(input_sc, "float32"),
        input_zero_point=relay.const(input_zp, "int32"),
    )
    sigmoid = relay.sigmoid(dequantize)
    model = relay.qnn.op.quantize(
        sigmoid,
        output_scale=relay.const(output_sc, "float32"),
        output_zero_point=relay.const(output_zp, "int32"),
        out_dtype=dtype,
    )
    return model
示例#18
0
def get_conv1d_bias(x_shape=(1, 3, 224), k_shape=(10, 3, 3), activation=None, dtype="float32"):
    conv, dic, param_lst = get_conv1d(x_shape=x_shape, k_shape=k_shape, dtype=dtype)
    bias = relay.var("bias", shape=(k_shape[0],), dtype=dtype)
    out = relay.nn.bias_add(conv, bias)
    dic["bias"] = (k_shape[0],)
    param_lst += ["bias"]

    if activation == "relu":
        return relay.nn.relu(out), dic, param_lst
    elif activation == "tanh":
        return relay.tanh(out), dic, param_lst
    elif activation == "sigmoid":
        return relay.sigmoid(out), dic, param_lst
    else:
        return out, dic, param_lst
示例#19
0
    def expected(dim):
        p0 = relay.var("p0", shape=(1, dim))
        p1 = relay.var("p1", shape=(3 * dim, dim))
        matmul = relay.nn.dense(p0, p1)
        f0 = relay.Function([p0, p1], matmul)

        p01 = relay.var("p01", shape=(1, 3 * dim))
        splitted = relay.split(p01, indices_or_sections=3, axis=1)
        out = relay.sigmoid(splitted[0]) + relay.tanh(splitted[1]) * relay.exp(splitted[2])
        f1 = relay.Function([p01], out)

        X = relay.var("X", shape=(1, dim))
        W = relay.var("W", shape=(3 * dim, dim))
        y = relay.Call(f0, [X, W])
        z = relay.Call(f1, [y])
        return relay.Function([X, W], z)
示例#20
0
    def expected(dim):
        p0 = relay.var("p0", shape=(1, dim))
        p1 = relay.var("p1", shape=(3 * dim, dim))
        matmul = relay.nn.dense(p0, p1)
        f0 = relay.Function([p0, p1], matmul)

        p01 = relay.var("p01", shape=(1, 3 * dim))
        splitted = relay.split(p01, indices_or_sections=3, axis=1)
        out = relay.sigmoid(splitted[0]) + relay.tanh(splitted[1]) * relay.exp(splitted[2])
        f1 = relay.Function([p01], out)

        X = relay.var("X", shape=(1, dim))
        W = relay.var("W", shape=(3 * dim, dim))
        y = relay.Call(f0, [X, W])
        z = relay.Call(f1, [y])
        return relay.Function([X, W], z)
示例#21
0
    def expected(dim):
        p0 = relay.var("p0", shape=(1, dim))
        p1 = relay.var("p1", shape=(3 * dim, dim))
        matmul = relay.nn.dense(p0, p1)
        f0 = relay.Function([p0, p1], matmul)
        f0 = f0.set_attribute("Primitive", tvm.tir.IntImm("int32", 1))

        p01 = relay.var("p01", shape=(1, 3 * dim))
        splitted = relay.split(p01, indices_or_sections=3, axis=1)
        out = relay.sigmoid(splitted[0]) + relay.tanh(splitted[1]) * relay.exp(splitted[2])
        f1 = relay.Function([p01], out)
        f1 = f1.set_attribute("Primitive", tvm.tir.IntImm("int32", 1))

        X = relay.var("X", shape=(1, dim))
        W = relay.var("W", shape=(3 * dim, dim))
        y = relay.Call(f0, [X, W])
        z = relay.Call(f1, [y])
        return relay.Function([X, W], z)
示例#22
0
def se_block(data,
             name,
             input_channels,
             ratio=0.25,
             layout='NCHW',
             dtype='float32'):
    pool = relay.nn.global_avg_pool2d(data, layout=layout)
    flatten = relay.nn.batch_flatten(data=pool)
    dense1 = layers.dense_add_bias(flatten,
                                   units=int(input_channels * ratio),
                                   name=name + '_dense1')
    relu = relay.nn.relu(data=dense1)
    dense2 = layers.dense_add_bias(relu,
                                   units=input_channels,
                                   name=name + '_dense2')
    sigmoid = relay.sigmoid(dense2)
    sigmoid = relay.expand_dims(sigmoid,
                                axis=(-1 if layout == 'NCHW' else 1),
                                num_newaxis=2)
    mul = relay.multiply(data, sigmoid)
    return mul
示例#23
0
文件: test_dnnl.py 项目: wenxcs/tvm
def get_conv3d_transpose(
    x_shape=(1, 32, 8, 8, 8),
    k_shape=(32, 16, 3, 3, 3),
    groups=1,
    padding=(0, 0, 0),
    strides=(1, 1, 1),
    output_padding=(0, 0, 0),
    activation=None,
    dtype="float32",
    data_layout="NCDHW",
    kernel_layout="OIDHW",
):
    x = relay.var("x", shape=(x_shape), dtype=dtype)
    kernel = relay.const(np.random.randint(0, 1, k_shape).astype(dtype))
    out = relay.nn.conv3d_transpose(
        x,
        kernel,
        channels=k_shape[1],
        kernel_size=k_shape[2:5],
        groups=groups,
        padding=padding,
        strides=strides,
        output_padding=output_padding,
        data_layout=data_layout,
        kernel_layout=kernel_layout,
    )
    dic = {"x": x_shape, "kernel": k_shape}
    param_lst = ["kernel"]

    if activation == "relu":
        return relay.nn.relu(out), dic, param_lst
    elif activation == "tanh":
        return relay.tanh(out), dic, param_lst
    elif activation == "sigmoid":
        return relay.sigmoid(out), dic, param_lst
    else:
        return out, dic, param_lst
def test_skip_conv():
    data = relay.var("data", shape=(1, 16, 64, 64))
    np_weight = np.random.rand(16, 16, 3, 3)
    conv0_weight = relay.Constant(tvm.nd.array(np_weight)).astype("float32")
    conv1_weight = relay.Constant(tvm.nd.array(np_weight)).astype("float32")
    multiplier = relay.sigmoid(relay.var("data", shape=(1, 16, 1, 1)))

    conv0 = relay.nn.conv2d(data,
                            conv0_weight,
                            kernel_size=(3, 3),
                            padding=(1, 1),
                            channels=16)
    act0 = relay.nn.relu(data=conv0)
    conv1 = relay.nn.conv2d(act0,
                            conv1_weight,
                            kernel_size=(3, 3),
                            padding=(1, 1),
                            channels=16)
    act1 = relay.nn.relu(data=conv1)

    quantize_and_build(act1 * multiplier)
    quantize_and_build(act1 * multiplier, skip_conv_layers=[0])
    quantize_and_build(act1 * multiplier, skip_conv_layers=[1])
    quantize_and_build(act1 * multiplier, skip_conv_layers=[0, 1])
示例#25
0
def get_conv2d_nchw_bias_silu(d_shape, w_shape, padding, out_dtype="float16"):
    conv_out = get_conv2d_nchw_bias(d_shape, w_shape, padding, out_dtype=out_dtype)
    return conv_out * relay.sigmoid(conv_out)
示例#26
0
文件: lstm.py 项目: LANHUIYING/tvm
def lstm_cell(num_hidden, batch_size=1, dtype="float32", name=""):
    """Long-Short Term Memory (LSTM) network cell.

    Parameters
    ----------
    num_hidden : int
        Number of units in output symbol.

    batch_size : int
        Batch size (length of states).

    Returns
    -------
    result : tvm.relay.Function
        A Relay function that evaluates an LSTM cell.
        The function takes in a tensor of input data, a tuple of two
        states, and weights and biases for dense operations on the
        inputs and on the state. It returns a tuple with two members,
        an output tensor and a tuple of two new states.
    """
    builder = relay.ScopeBuilder()

    input_type = relay.TensorType((batch_size, num_hidden), dtype)
    weight_type = relay.TensorType((4*num_hidden, num_hidden), dtype)
    bias_type = relay.TensorType((4*num_hidden,), dtype)

    dense_type = relay.TensorType((batch_size, 4*num_hidden), dtype)
    slice_type = relay.TupleType([input_type, input_type,
                                  input_type, input_type])
    ret_type = relay.TupleType([input_type,
                                relay.TupleType([input_type, input_type])])

    inputs = relay.Var("inputs", input_type)
    states = relay.Var("states",
                       relay.TupleType([input_type, input_type]))

    i2h_weight = relay.Var("i2h_weight", weight_type)
    i2h_bias = relay.Var("i2h_bias", bias_type)

    h2h_weight = relay.Var("h2h_weight", weight_type)
    h2h_bias = relay.Var("h2h_bias", bias_type)

    i2h = builder.let(("i2h", dense_type),
                      layers.dense_add_bias(
                          data=inputs,
                          units=num_hidden * 4,
                          weight=i2h_weight, bias=i2h_bias,
                          name="%si2h" % name))
    h2h = builder.let(("h2h", dense_type),
                      layers.dense_add_bias(
                          data=relay.TupleGetItem(states, 0),
                          units=num_hidden * 4,
                          weight=h2h_weight, bias=h2h_bias,
                          name="%sh2h" % name))

    gates = builder.let(("gates", dense_type), relay.add(i2h, h2h))
    slice_gates = builder.let(("slice_gates", slice_type),
                              relay.split(gates,
                                          indices_or_sections=4,
                                          axis=1).astuple())

    in_gate = builder.let(("in_gate", input_type),
                          relay.sigmoid(relay.TupleGetItem(slice_gates, 0)))
    forget_gate = builder.let(("forget_gate", input_type),
                              relay.sigmoid(relay.TupleGetItem(slice_gates, 1)))
    in_transform = builder.let(("in_transform", input_type),
                               relay.tanh(relay.TupleGetItem(slice_gates, 2)))
    out_gate = builder.let(("out_gate", input_type),
                           relay.sigmoid(relay.TupleGetItem(slice_gates, 3)))

    next_c = builder.let(("next_c", input_type),
                         relay.add(relay.multiply(forget_gate,
                                                  relay.TupleGetItem(states, 1)),
                                   relay.multiply(in_gate, in_transform)))
    next_h = builder.let(("next_h", input_type),
                         relay.multiply(out_gate, relay.tanh(next_c)))
    ret = builder.let(("ret", ret_type),
                      relay.Tuple([next_h, relay.Tuple([next_h, next_c])]))
    builder.ret(ret)

    body = builder.get()

    return relay.Function([inputs, states, i2h_weight,
                           i2h_bias, h2h_weight, h2h_bias],
                          body, ret_type)
示例#27
0
def silu(x):
    return x * relay.sigmoid(x)
示例#28
0
def get_conv2d_nchw_bias_sigmoid(d_shape, w_shape, padding, out_dtype="float16"):
    return relay.sigmoid(get_conv2d_nchw_bias(d_shape, w_shape, padding, out_dtype=out_dtype))
示例#29
0
def get_net(batch_size, image_shape, num_classes, dtype):
    height = image_shape[1]
    width = image_shape[2]
    data_shape = (batch_size,) + image_shape
    net = relay.var("data", shape=data_shape, dtype=dtype)

    net = conv_3x3(net, 3, 64, "conv1_1")
    net = conv_3x3(net, 64, 64, "conv1_2")
    net = relay.nn.max_pool2d(net, pool_size=(2, 2), strides=(2, 2), ceil_mode=True)

    net = conv_3x3(net, 64, 128, "conv2_1")
    net = conv_3x3(net, 64, 128, "conv2_2")
    net = relay.nn.max_pool2d(net, pool_size=(2, 2), strides=(2, 2), ceil_mode=True)

    net = conv_3x3(net, 128, 256, "conv3_1")
    net = conv_3x3(net, 256, 256, "conv3_2")
    net = conv_3x3(net, 256, 256, "conv3_3")
    net = relay.nn.max_pool2d(net, pool_size=(2, 2), strides=(2, 2), ceil_mode=True)

    net = conv_3x3(net, 256, 512, "conv4_1")
    net = conv_3x3(net, 512, 512, "conv4_2")
    net = conv_3x3(net, 512, 512, "conv4_3")
    net = relay.nn.max_pool2d(net, pool_size=(2, 2), strides=(2, 2), ceil_mode=True)

    net = conv_3x3(net, 512, 512, "conv5_1")
    net = conv_3x3(net, 512, 512, "conv5_2")
    net = conv_3x3(net, 512, 512, "conv5_3")

    net = relay.nn.dropout(net, rate=0.5)

    net = conv_3x3(net, 512, 72, "conv6", activation=False)
    net = relay.transpose(net, (0, 2, 3, 1))

    num_class_probs = 9 * 3
    num_confidence_scores = 9
    #num_box_delta = 9 * 4
    pred_class_probs, pred_conf, pred_box_delta = relay.split(net,
            (num_class_probs, num_class_probs + num_confidence_scores),
            axis=-1)

    # Probability
    pred_class_probs = relay.reshape(pred_class_probs, (-1, 3))
    pred_class_probs = relay.nn.softmax(pred_class_probs)
    pred_class_probs = relay.reshape(pred_class_probs, (batch_size, -1, 3))

    # Confidence
    pred_conf = relay.sigmoid(pred_conf)
    pred_conf = relay.reshape(pred_conf, (batch_size, -1, 1))

    # Bbox_delta
    pred_box_delta = relay.reshape(pred_box_delta, (batch_size, -1, 4))
    delta_x, delta_y, delta_w, delta_h = relay.split(pred_box_delta, (1, 2, 3), axis=2)
    delta_x = relay.reshape(delta_x, (batch_size, -1))
    delta_y = relay.reshape(delta_y, (batch_size, -1))
    delta_w = relay.reshape(delta_w, (batch_size, -1))
    delta_h = relay.reshape(delta_h, (batch_size, -1))

    anchor_box = set_anchors(height, width)
    anchor_x = relay.Constant(tvm.nd.array(anchor_box[:, 0]))
    anchor_y = relay.Constant(tvm.nd.array(anchor_box[:, 1]))
    anchor_w = relay.Constant(tvm.nd.array(anchor_box[:, 2]))
    anchor_h = relay.Constant(tvm.nd.array(anchor_box[:, 3]))

    box_center_x = anchor_x + delta_x * anchor_w
    box_center_y = anchor_y + delta_y * anchor_h
    '''
    box_width    = anchor_w * relay.exp(delta_w)
    box_height   = anchor_h * relay.exp(delta_h)
    '''
    box_width    = anchor_w + safe_exp(delta_w)
    box_height   = anchor_h + safe_exp(delta_h)

    xmins, ymins, xmaxs, ymaxs = bbox_transform(box_center_x, box_center_y, box_width, box_height)
    xmins = relay.minimum(relay.maximum(relay.const(0.0), xmins), relay.const(width - 1.0))
    ymins = relay.minimum(relay.maximum(relay.const(0.0), ymins), relay.const(height - 1.0))
    xmaxs = relay.maximum(relay.minimum(relay.const(width - 1.0), xmaxs), relay.const(0.0))
    ymaxs = relay.maximum(relay.minimum(relay.const(height - 1.0), ymaxs), relay.const(0.0))

    det_boxes = relay.stack(bbox_transform_inv(xmins, ymins, xmaxs, ymaxs), axis=-1)

    probs = relay.multiply(pred_class_probs, pred_conf)
    det_probs = relay.max(probs, axis=2)
    det_class = relay.argmax(probs, axis=2)

    out = relay.Tuple([det_boxes, det_probs, det_class])
    args = relay.analysis.free_vars(out)
    
    return relay.Function(args, out)
示例#30
0
def lstm_cell(num_hidden, batch_size=1, dtype="float32", name=""):
    """Long-Short Term Memory (LSTM) network cell.

    Parameters
    ----------
    num_hidden : int
        Number of units in output symbol.

    batch_size : int
        Batch size (length of states).

    Returns
    -------
    result : tvm.relay.Function
        A Relay function that evaluates an LSTM cell.
        The function takes in a tensor of input data, a tuple of two
        states, and weights and biases for dense operations on the
        inputs and on the state. It returns a tuple with two members,
        an output tensor and a tuple of two new states.
    """
    builder = relay.ScopeBuilder()

    input_type = relay.TensorType((batch_size, num_hidden), dtype)
    weight_type = relay.TensorType((4 * num_hidden, num_hidden), dtype)
    bias_type = relay.TensorType((4 * num_hidden,), dtype)

    dense_type = relay.TensorType((batch_size, 4 * num_hidden), dtype)
    slice_type = relay.TupleType([input_type, input_type, input_type, input_type])
    ret_type = relay.TupleType([input_type, relay.TupleType([input_type, input_type])])

    inputs = relay.Var("inputs", input_type)
    states = relay.Var("states", relay.TupleType([input_type, input_type]))

    i2h_weight = relay.Var("i2h_weight", weight_type)
    i2h_bias = relay.Var("i2h_bias", bias_type)

    h2h_weight = relay.Var("h2h_weight", weight_type)
    h2h_bias = relay.Var("h2h_bias", bias_type)

    i2h = builder.let(
        ("i2h", dense_type),
        layers.dense_add_bias(
            data=inputs, units=num_hidden * 4, weight=i2h_weight, bias=i2h_bias, name="%si2h" % name
        ),
    )
    h2h = builder.let(
        ("h2h", dense_type),
        layers.dense_add_bias(
            data=relay.TupleGetItem(states, 0),
            units=num_hidden * 4,
            weight=h2h_weight,
            bias=h2h_bias,
            name="%sh2h" % name,
        ),
    )

    gates = builder.let(("gates", dense_type), relay.add(i2h, h2h))
    slice_gates = builder.let(
        ("slice_gates", slice_type), relay.split(gates, indices_or_sections=4, axis=1).astuple()
    )

    in_gate = builder.let(
        ("in_gate", input_type), relay.sigmoid(relay.TupleGetItem(slice_gates, 0))
    )
    forget_gate = builder.let(
        ("forget_gate", input_type), relay.sigmoid(relay.TupleGetItem(slice_gates, 1))
    )
    in_transform = builder.let(
        ("in_transform", input_type), relay.tanh(relay.TupleGetItem(slice_gates, 2))
    )
    out_gate = builder.let(
        ("out_gate", input_type), relay.sigmoid(relay.TupleGetItem(slice_gates, 3))
    )

    next_c = builder.let(
        ("next_c", input_type),
        relay.add(
            relay.multiply(forget_gate, relay.TupleGetItem(states, 1)),
            relay.multiply(in_gate, in_transform),
        ),
    )
    next_h = builder.let(("next_h", input_type), relay.multiply(out_gate, relay.tanh(next_c)))
    ret = builder.let(("ret", ret_type), relay.Tuple([next_h, relay.Tuple([next_h, next_c])]))
    builder.ret(ret)

    body = builder.get()

    return relay.Function(
        [inputs, states, i2h_weight, i2h_bias, h2h_weight, h2h_bias], body, ret_type
    )
示例#31
0
 def verify_sigmoid(dshape, dtype="float32"):
     x = relay.var("x", relay.ty.TensorType(dshape, dtype))
     y = relay.sigmoid(x)
     func = relay.Function([x], y)
     x_data = np.random.uniform(size=dshape).astype(dtype)
     verify_results(func, [x_data], "test_sigmoid", rtol=1e-4, atol=1e-4)