Exemplo n.º 1
0
def _alter_sparse_dense_layout(_attrs, inputs, _tinfos, _out_type):
    """With cuda, we modify use alter_op_layout to swap the default
    sparse_dense implementation for one that operates on a padded matrix. We
    also pad the matrix.
    """
    # TODO(ANSHUMAN87): Handle for sparse_lhs case too
    if (isinstance(inputs[1], relay.Constant)
            and isinstance(inputs[2], relay.Constant)
            and isinstance(inputs[3], relay.Constant)
            and is_valid_for_sparse_dense_padded(inputs[0],
                                                 inputs[1].data.asnumpy())):
        if len(inputs[1].data.asnumpy().shape) == 1:
            sparse_matrix = sp.csr_matrix(
                (inputs[1].data.asnumpy(), inputs[2].data.asnumpy(),
                 inputs[3].data.asnumpy())).tobsr()
        else:
            sparse_matrix = sp.bsr_matrix(
                (inputs[1].data.asnumpy(), inputs[2].data.asnumpy(),
                 inputs[3].data.asnumpy()))
        warp_size = int(
            tvm.target.Target.current(allow_none=False).thread_warp_size)
        sparse_matrix = pad_sparse_matrix(sparse_matrix, warp_size)
        return relay.nn._make.sparse_dense_padded(
            inputs[0],
            relay.Constant(tvm.nd.array(sparse_matrix.data)),
            relay.Constant(tvm.nd.array(sparse_matrix.indices)),
            relay.Constant(tvm.nd.array(sparse_matrix.indptr)),
        )
    return None
Exemplo n.º 2
0
def get_network():
    # Get a list of modules representing subgraphs.
    mods = []
    dshape = (3, 3)
    data = relay.var("data_0", relay.TensorType(dshape, "float32"))
    data21 = relay.var("data_1", relay.TensorType(dshape, "float32"))
    data_net1_output_1 = relay.var("data_0", relay.TensorType(dshape, "float32"))
    data_net1_output_2 = relay.var("data_1", relay.TensorType(dshape, "float32"))
    data_net2_output_1 = relay.var("data_0", relay.TensorType(dshape, "float32"))
    mvalue1 = np.full((1), 1).astype("float32")
    mvalue2 = np.full((1), 2).astype("float32")
    mvalue3 = np.full((1), 3).astype("float32")
    mv1 = relay.Constant(tvm.nd.array(mvalue1))
    mv2 = relay.Constant(tvm.nd.array(mvalue2))
    mv3 = relay.Constant(tvm.nd.array(mvalue3))
    # There are three outputs in the first model.
    net1_output1 = relay.add(data, mv1)
    net1_output2 = relay.subtract(data, mv2)
    net1_output3 = relay.concatenate((net1_output1, net1_output2), axis=0)
    (net1_output3, _) = relay.split(net1_output3, indices_or_sections=2, axis=0)
    net1_output3 = relay.add(net1_output3, mv2)
    # The second model uses the output named net1_output3 of the first model as the first input,
    # the second input of the second model is data21.
    net2 = relay.add(net1_output3, mv2)
    net2 = relay.add(net2, data21)
    net2_output = relay.add(net2, mv3)
    # The third model uses the output named net2_output of the second model as the first input
    # and uses the output named net1_output2 of the first model as the second input.
    net3 = relay.multiply(net2_output, mv3)
    net3 = relay.add(net3, net1_output2)
    return tvm.IRModule.from_expr(relay.Function([data, data21], relay.Tuple([net3]))), dshape
def test_stop_quantize():
    data = relay.var("data", shape=(1, 16, 64, 64))
    np_weight0 = np.random.rand(16, 16, 3, 3)
    conv0_weight = relay.Constant(tvm.nd.array(np_weight0)).astype("float32")
    np_weight1 = np.random.rand(16, 16, 1, 1)
    conv1_weight = relay.Constant(tvm.nd.array(np_weight1)).astype("float32")
    multiplier = relay.sigmoid(relay.var("data", shape=(1, 16, 1, 1)))

    conv0 = relay.nn.conv2d(data,
                            conv0_weight,
                            kernel_size=(3, 3),
                            padding=(1, 1),
                            channels=16)
    act0 = relay.nn.relu(data=conv0)

    pool = relay.nn.global_avg_pool2d(data=act0)

    conv1 = relay.nn.conv2d(pool,
                            conv1_weight,
                            kernel_size=(1, 1),
                            padding=(0, 0),
                            channels=16)
    act1 = relay.nn.relu(data=conv1)

    quantize_and_build(act1 * multiplier)
Exemplo n.º 4
0
def test_sparse_dense_padded_alter_op(target, dev):
    with tvm.target.Target(target):
        M = 128
        N = 16
        K = 128
        X_np = np.random.randn(M, K).astype("float32")
        W_sp_np = random_bsr_matrix(N, K, 2, 2, density=0.01, dtype="float32")
        x = relay.var("x", relay.TensorType(X_np.shape, "float32"))
        mult = relay.op.nn.sparse_dense(
            x,
            (
                relay.Constant(tvm.nd.array(W_sp_np.data)),
                relay.Constant(tvm.nd.array(W_sp_np.indices)),
                relay.Constant(tvm.nd.array(W_sp_np.indptr)),
            ),
        )
        f = relay.Function([x], mult)
        f_ = relay.transform.InferType()(tvm.IRModule.from_expr(f))
        f_ = relay.transform.AlterOpLayout()(f_)
        assert f_["main"].body.op.name == "nn.internal.sparse_dense_padded"

        # build with cuda and AlterOpLayout to ensure that sparse_dense_padded is in action
        with tvm.transform.PassContext(opt_level=3,
                                       required_pass="******"):
            x = relay.build(tvm.IRModule.from_expr(f), target=target)
Exemplo n.º 5
0
 def _impl(cls, inputs, layer, params):
     import ipdb
     ipdb.set_trace()
     inputs.append(relay.Constant(tvm.nd.array(params[layer.name][0])))
     inputs.append(relay.Constant(tvm.nd.array(params[layer.name][1])))
     scale = float(attr.get('scale', 1.0))
     return inputs[0] * _expr.const(scale)
Exemplo n.º 6
0
    def _impl(cls, inputs, layer, params):
        # 提取超参数
        dilations = 1
        if layer.convolution_param.dilation != []:
            dilations = layer.convolution_param.dilation[0]
        ##填充pads
        pads = [0, 0]  # 默认为0
        if layer.convolution_param.pad != []:  # 若存在pad,则根据pad赋值
            pads = np.array([layer.convolution_param.pad] *
                            2).flatten().tolist()
        elif layer.convolution_param.pad_h != 0 or layer.convolution_param.pad_w != 0:  # 若存在pad_w,pad_h则根据其赋值
            pads = [
                layer.convolution_param.pad_h, layer.convolution_param.pad_w
            ]
        ##步长strides
        strides = [1, 1]  # 默认为1
        if layer.convolution_param.stride != []:
            strides = np.array([layer.convolution_param.stride] *
                               2).flatten().tolist()
        ##卷积核尺寸kernel_shape
        kernel_size = np.array([layer.convolution_param.kernel_size] *
                               2).flatten().tolist()
        if layer.convolution_param.kernel_size == []:
            kernel_size = [
                layer.convolution_param.kernel_h,
                layer.convolution_param.kernel_w
            ]
        ##分组group
        # import ipdb; ipdb.set_trace()
        group = layer.convolution_param.group

        # 将权重加入到input
        inputs.append(relay.Constant(tvm.nd.array(params[layer.name][0])))

        args = {
            'kernel_shape': kernel_size,
            'strides': strides,
            'dilation': dilations,
            'padding': pads,
            'groups': group,
            'data_layout': 'NCHW',
            #   'kernel_layout': 'OIHW'
        }

        out = AttrCvt(
            op_name=dimension_picker('conv'),
            transforms={
                'kernel_shape': 'kernel_size',
                # 'dilations': ('dilation', 1),
                # 'pads': ('padding', 0),
                # 'strides': 'strides',
                # 'group': ('groups', 1),
            },
            custom_check=dimension_constraint())(inputs[:2], args, params)

        if layer.convolution_param.bias_term:
            out = _op.nn.bias_add(
                out, relay.Constant(tvm.nd.array(params[layer.name][1])))
        return out
Exemplo n.º 7
0
    def _impl(cls, inputs, layer, params):
        import ipdb
        ipdb.set_trace()
        # 将权重加入到input
        inputs.append(relay.Constant(tvm.nd.array(params[layer.name][0])))
        if layer.embed_param.bias_term:
            out = _op.nn.bias_add(
                out, relay.Constant(tvm.nd.array(params[layer.name][1])))

        return _op.take(weight, indices.astype('int32'), axis=0)
Exemplo n.º 8
0
    def _impl(cls, inputs, layer, params):
        # 将权重加入到input
        inputs.append(relay.Constant(tvm.nd.array(params[layer.name][0])))

        inputs[0] = _op.nn.batch_flatten(inputs[0])
        units = infer_channels(inputs[1])
        out = _op.nn.dense(inputs[0], inputs[1], units=units)
        if layer.inner_product_param.bias_term:
            out = _op.nn.bias_add(
                out, relay.Constant(tvm.nd.array(params[layer.name][1])))
        return out
Exemplo n.º 9
0
def get_mannual_mod():
    # Get a list of modules representing subgraphs.
    mods = []
    dshape = (3, 3)
    data = relay.var("data_0", relay.TensorType(dshape, "float32"))
    data21 = relay.var("data_1", relay.TensorType(dshape, "float32"))
    data_net1_output_1 = relay.var("data_0",
                                   relay.TensorType(dshape, "float32"))
    data_net1_output_2 = relay.var("data_1",
                                   relay.TensorType(dshape, "float32"))
    data_net2_output_1 = relay.var("data_0",
                                   relay.TensorType(dshape, "float32"))
    mvalue1 = np.full((1), 1).astype("float32")
    mvalue2 = np.full((1), 2).astype("float32")
    mvalue3 = np.full((1), 3).astype("float32")
    mv1 = relay.Constant(tvm.nd.array(mvalue1))
    mv2 = relay.Constant(tvm.nd.array(mvalue2))
    mv3 = relay.Constant(tvm.nd.array(mvalue3))

    # There are three outputs in the first model.

    net1_output1 = relay.add(data, mv1)
    net1_output2 = relay.subtract(data, mv2)
    net1_output3 = relay.multiply(data, mv3)

    # The second model use output named net1_output1 of the first model as the first input,
    # the second input of the second model is data21.
    net2 = relay.add(data_net1_output_1, mv2)
    net2 = relay.add(net2, data21)
    net2_output = relay.add(net2, mv3)

    # The third model use the output named net2_output of the second model as the first input
    # and use the output named net1_output2 of the first model as the second input.
    net3 = relay.multiply(data_net2_output_1, mv3)
    net3 = relay.add(net3, data_net1_output_2)

    mods.append(
        tvm.IRModule.from_expr(
            relay.Function([data],
                           relay.Tuple(
                               [net1_output1, net1_output2, net1_output3]))))
    mods.append(
        tvm.IRModule.from_expr(
            relay.Function([data_net1_output_1, data21], net2_output)))
    mods.append(
        tvm.IRModule.from_expr(
            relay.Function([data_net1_output_2, data_net2_output_1], net3)))

    return mods, dshape
Exemplo n.º 10
0
 def _impl(cls, inputs, layer, params):
     # import ipdb; ipdb.set_trace()
     inputs.append(relay.Constant(tvm.nd.array(params[layer.name][0])))
     inputs.append(relay.Constant(tvm.nd.array(params[layer.name][1])))
     inputs.append(relay.Constant(tvm.nd.array(params[layer.name][2])))
     args = {
         "epsilon": layer.batch_norm_param.eps,  # 滑动系数
         "momentum": layer.batch_norm_param.moving_average_fraction
     }
     return AttrCvt(op_name='batch_norm',
                    disables=['momentum'],
                    ignores=[
                        'order', 'spatial', 'is_test', 'consumed_inputs',
                        'num_batches'
                    ])(inputs, args, params)
Exemplo n.º 11
0
def test_function_attrs():
    param_names = ["a", "b", "c", "d"]
    params = tvm.runtime.convert(
        [relay.var(n, shape=(5, 2)) for n in param_names])
    ret_type = relay.TupleType(tvm.runtime.convert([]))
    body = relay.Tuple(tvm.runtime.convert([]))
    type_params = tvm.runtime.convert([])
    fn = relay.Function(params, body, ret_type, type_params)
    model_params = {}
    for param in params[:1]:
        cty = param.type_annotation
        tensor = np.random.rand(*[int(sh)
                                  for sh in cty.shape]).astype(cty.dtype)
        model_params[param] = relay.Constant(tvm.nd.array(tensor))

    fn = fn.with_attr("__params__", model_params)

    assert fn.params == params
    assert fn.body == body
    assert fn.type_params == type_params
    assert fn.span == None
    str(fn)
    check_json_roundtrip(fn)
    json_str = tvm.ir.save_json(fn)
    fn_after = tvm.ir.load_json(json_str)
    model_params_after = fn_after.attrs["__params__"]
    after_keys = [item[0] for item in model_params_after.items()]
    for key1, key2 in zip(model_params, after_keys):
        assert key1.name_hint == key2.name_hint
        p1 = model_params[key1]
        p2 = model_params_after[key2]
        np.testing.assert_allclose(p1.data.numpy(), p2.data.numpy())
Exemplo n.º 12
0
def test_constant():
    arr = tvm.nd.array(10)
    const = relay.Constant(arr)
    assert const.data == arr
    assert const.span == None
    str(const)
    check_json_roundtrip(const)
Exemplo n.º 13
0
 def _impl(cls, inputs, layer, params):
     # import ipdb; ipdb.set_trace()
     # 将权重加入到input
     inputs.append(relay.Constant(tvm.nd.array(params[layer.name][0])))
     assert len(inputs) == 2, "Prelu need 2 inputs, {} given".format(
         len(inputs))
     return _op.nn.prelu(inputs[0], inputs[1])
Exemplo n.º 14
0
def test_let():
    ty = relay.ty.TensorType((10, 20), 'float32')
    lv = relay.Var('x', ty)
    arr = tvm.nd.array(10)
    value = relay.Constant(arr)
    let = relay.Let(lv, value, lv)
    show(let)
Exemplo n.º 15
0
def broadcast_to(children, attrs, odtype='float32'):
    # TODO(@jroesch) export broadcast to?
    data = children[0]
    shape = attrs.get_int_tuple('shape')
    array = numpy.zeros(shape).astype(odtype)
    rconst = relay.Constant(nd.array(array))
    return op.broadcast_to_like(data, rconst)
Exemplo n.º 16
0
def test_tuple():
    x = relay.Var('x')
    assert well_formed(x)
    v = relay.Constant(tvm.nd.array(10))
    let = relay.Let(x, v, x)
    assert well_formed(let)
    assert well_formed(relay.Tuple([v, v]))
    assert not well_formed(relay.Tuple([let, let]))
Exemplo n.º 17
0
def test_sparse_dense_padded_alter_op():
    with tvm.target.Target("cuda"):
        M = 128
        N = 16
        K = 128
        X_np = np.random.randn(M, K).astype("float32")
        W_sp_np = random_bsr_matrix(N, K, 2, 2, density=0.01, dtype="float32")
        mult = relay.op.nn.sparse_dense(
            relay.Constant(tvm.nd.array(X_np)),
            (
                relay.Constant(tvm.nd.array(W_sp_np.data)),
                relay.Constant(tvm.nd.array(W_sp_np.indices)),
                relay.Constant(tvm.nd.array(W_sp_np.indptr)),
            ),
        )
        f = relay.Function([], mult)
        f_ = relay.transform.AlterOpLayout()(tvm.IRModule.from_expr(f))
        assert f_["main"].body.op.name == "nn.internal.sparse_dense_padded"
Exemplo n.º 18
0
def test_let():
    x = relay.Var("x")
    assert well_formed(x)
    v = relay.Constant(tvm.nd.array(10))
    ty = None
    let = relay.Let(x, v, x)
    assert well_formed(let)
    assert not well_formed(relay.Let(x, v, let))
    f = relay.Function([x], x, ty)
    assert well_formed(f)
    assert well_formed(
        relay.Let(relay.Var("y"), f, relay.Let(relay.Var("z"), f, v)))
Exemplo n.º 19
0
def test_free_vars():
    x = relay.Var("x")
    fvx = free_vars(x)
    assert len(fvx) == 1
    assert fvx[0] == x
    v = relay.Constant(tvm.nd.array(10))
    ty = relay.TensorType([], "int32")
    let = relay.Let(x, v, x, ty)
    fvx = free_vars(let)
    assert len(free_vars(let)) == 0
    f = relay.Function([relay.Param(x, ty)], ty, x)
    assert len(free_vars(f)) == 0
Exemplo n.º 20
0
def test_let():
    lv = relay.Var('x')
    ty = None
    arr = tvm.nd.array(10)
    value = relay.Constant(arr)
    # I would prefer that the order of arguments
    # matches syntax let x: t = v in b
    let = relay.Let(lv, value, lv)
    assert let.var == lv
    assert let.value == value
    assert let.body == lv
    assert let.span == None
    str(let)
Exemplo n.º 21
0
def test_well_formed():
    x = relay.Var('x')
    assert well_formed(x)
    v = relay.Constant(tvm.nd.array(10))
    ty = None
    let = relay.Let(x, v, x, ty)
    assert well_formed(let)
    assert not well_formed(relay.Let(x, v, let, ty))
    f = relay.Function([relay.Param(x, ty)], ty, x)
    assert well_formed(f)
    # this test should pass in case of weak uniqueness (only test for shadowing)
    # but we want all binder to be distinct from each other.
    assert not well_formed(relay.Let(relay.Var("y"), f,
                                     relay.Let(relay.Var("z"), f, v, ty), ty))
def test_skip_conv():
    data = relay.var("data", shape=(1, 16, 64, 64))
    np_weight = np.random.rand(16, 16, 3, 3)
    conv0_weight = relay.Constant(tvm.nd.array(np_weight)).astype("float32")
    conv1_weight = relay.Constant(tvm.nd.array(np_weight)).astype("float32")
    multiplier = relay.sigmoid(relay.var("data", shape=(1, 16, 1, 1)))

    conv0 = relay.nn.conv2d(data,
                            conv0_weight,
                            kernel_size=(3, 3),
                            padding=(1, 1),
                            channels=16)
    act0 = relay.nn.relu(data=conv0)
    conv1 = relay.nn.conv2d(act0,
                            conv1_weight,
                            kernel_size=(3, 3),
                            padding=(1, 1),
                            channels=16)
    act1 = relay.nn.relu(data=conv1)

    quantize_and_build(act1 * multiplier)
    quantize_and_build(act1 * multiplier, skip_conv_layers=[0])
    quantize_and_build(act1 * multiplier, skip_conv_layers=[1])
    quantize_and_build(act1 * multiplier, skip_conv_layers=[0, 1])
Exemplo n.º 23
0
def merge_transform_to_mxnet_model(mod):
    """ Add Image Transform Logic Into Model """
    svalue = np.array([123., 117., 104.])
    sub_data = relay.Constant(tvm.nd.array(svalue)).astype("float32")
    dvalue = np.array([58.395, 57.12, 57.37])
    divide_data = relay.Constant(tvm.nd.array(dvalue)).astype("float32")

    data_shape = (224, 224, 3)
    data = relay.var("data", relay.TensorType(data_shape, "float32"))

    simple_net = relay.expand_dims(data, axis=0, num_newaxis=1)
    # To do, relay not support dynamic shape now, future need to add resize logic
    # simple_net = relay.image.resize(simple_net, (224, 224), "NHWC", "bilinear", "align_corners")
    simple_net = relay.subtract(simple_net, sub_data)
    simple_net = relay.divide(simple_net, divide_data)
    simple_net = relay.transpose(simple_net, ((0, 3, 1, 2)))

    #merge tranform into pretrained model network
    entry = mod["main"]
    anf = run_opt_pass(entry.body, transform.ToANormalForm())
    call = anf.value
    data, weights = call.args
    first_op = op.nn.conv2d(simple_net,
                            weights,
                            strides=call.attrs.strides,
                            padding=call.attrs.padding,
                            dilation=call.attrs.dilation,
                            groups=call.attrs.groups,
                            channels=call.attrs.channels,
                            kernel_size=call.attrs.kernel_size,
                            out_dtype=call.attrs.out_dtype)
    net = relay.expr.Let(anf.var, first_op, anf.body)
    net = run_opt_pass(net, transform.ToGraphNormalForm())

    mod['main'] = net
    return mod
Exemplo n.º 24
0
def rand_from_type(t):
    return relay.Constant(rand(t.dtype, *[int(d) for d in t.shape]))
Exemplo n.º 25
0
def test_constant():
    arr = tvm.nd.array(10)
    const = relay.Constant(arr)
    show(const)
Exemplo n.º 26
0
params = prepare_params(g, data)

# Check shape of features and the validity of adjacency matrix
assert len(params['infeats'].shape) == 2
assert params['g_data'] is not None and params[
    'indices'] is not None and params['indptr'] is not None
assert params['infeats'].shape[0] == params['indptr'].shape[0] - 1

######################################################################
# Put layers together
# -------------------

# Define input features, norms, adjacency matrix in Relay
infeats = relay.var("infeats", shape=data.features.shape)
norm = relay.Constant(tvm.nd.array(params['norm']))
g_data = relay.Constant(tvm.nd.array(params['g_data']))
indices = relay.Constant(tvm.nd.array(params['indices']))
indptr = relay.Constant(tvm.nd.array(params['indptr']))

Adjacency = namedtuple('Adjacency', ['data', 'indices', 'indptr'])
adj = Adjacency(g_data, indices, indptr)

# Construct the 2-layer GCN
layers = []
layers.append(
    GraphConv(layer_name="layers.0",
              input_dim=infeat_dim,
              output_dim=num_hidden,
              adj=adj,
              input=infeats,
Exemplo n.º 27
0
def test_constant():
    arr = tvm.nd.array(10)
    const = relay.Constant(arr)
    assert const.data == arr
    assert const.span == None
    str(const)
Exemplo n.º 28
0
 def _set_params(self, params):
     inputs = {}
     for name, param in params.items():
         inputs[name] = relay.Constant(param)
     self._set_params_func(inputs)
Exemplo n.º 29
0
params = prepare_params(g, data)

# Check shape of features and the validity of adjacency matrix
assert len(params["infeats"].shape) == 2
assert (params["g_data"] is not None and params["indices"] is not None
        and params["indptr"] is not None)
assert params["infeats"].shape[0] == params["indptr"].shape[0] - 1

######################################################################
# Put layers together
# -------------------

# Define input features, norms, adjacency matrix in Relay
infeats = relay.var("infeats", shape=data.features.shape)
norm = relay.Constant(tvm.nd.array(params["norm"]))
g_data = relay.Constant(tvm.nd.array(params["g_data"]))
indices = relay.Constant(tvm.nd.array(params["indices"]))
indptr = relay.Constant(tvm.nd.array(params["indptr"]))

Adjacency = namedtuple("Adjacency", ["data", "indices", "indptr"])
adj = Adjacency(g_data, indices, indptr)

# Construct the 2-layer GCN
layers = []
layers.append(
    GraphConv(
        layer_name="layers.0",
        input_dim=infeat_dim,
        output_dim=num_hidden,
        adj=adj,
Exemplo n.º 30
0
def test_function_alpha_equal():
    tt1 = relay.TensorType((1, 2, 3), "float32")
    tt2 = relay.TensorType((4, 5, 6), "int8")
    tt3 = relay.TupleType([tt1, tt2])

    v1 = relay.Var("v1", tt1)
    v2 = relay.Var("v2", tt2)
    v3 = relay.Var("v3", tt3)
    v4 = relay.Var("v4", tt2)
    vret = relay.Constant(tvm.nd.array(np.ones(1)))

    tp1 = relay.TypeVar("tp1", relay.Kind.Type)
    tp2 = relay.TypeVar("tp2", relay.Kind.Type)
    tp3 = relay.TypeVar("tp3", relay.Kind.Shape)
    tp4 = relay.TypeVar("tp4", relay.Kind.Shape)

    basic_args = [relay.Var("v3", tt1), relay.Var("v4", tt2)]
    basic_tps = [tp1, tp2]

    func = relay.Function([v1, v2], v1,
                          tt2, basic_tps)
    mapped = relay.Function(basic_args, basic_args[0], tt2, basic_tps)
    assert alpha_equal(func, mapped)

    fewer_params = relay.Function([relay.Var("v4", tt2)], v4, tt2, basic_tps)
    assert not alpha_equal(func, fewer_params)

    more_params = relay.Function([relay.Var("v3", tt1),
                                  relay.Var("v4", tt2),
                                  relay.Var("v2", tt2)], v4, tt2, basic_tps)
    assert not alpha_equal(func, more_params)

    params_unordered = relay.Function([v2, v1], v1,
                                      tt2, basic_tps)
    assert not alpha_equal(func, params_unordered)

    params_mismatch = relay.Function([v1, v3], v1,
                                     tt2, basic_tps)
    assert not alpha_equal(func, params_mismatch)

    # also would not typecheck
    ret_type_mismatch = relay.Function(basic_args, v4, tt1, basic_tps)
    assert not alpha_equal(func, ret_type_mismatch)

    # also mis-typed
    different_body = relay.Function(basic_args, v3, tt2, basic_tps)
    assert not alpha_equal(func, different_body)

    fewer_type_params = relay.Function(basic_args, v4, tt2, [tp1])
    assert not alpha_equal(func, fewer_type_params)

    more_type_params = relay.Function(basic_args, v4, tt2, [tp1, tp2, tp3])
    assert not alpha_equal(func, more_type_params)

    type_params_unordered = relay.Function(basic_args, v4, tt2, [tp2, tp1])
    assert not alpha_equal(func, type_params_unordered)

    different_type_params = relay.Function(basic_args, v4, tt2, [tp3, tp4])
    assert not alpha_equal(func, different_type_params)

    # a well-typed example that also differs in body, ret type, and type params
    tupled_example = relay.Function(basic_args, relay.Tuple([v3, v4]), tt3)
    assert not alpha_equal(func, tupled_example)

    # nullable
    no_ret_type = relay.Function(basic_args, v4, None, [tp1, tp2])
    # both null
    assert alpha_equal(no_ret_type, no_ret_type)
    # one null
    assert not alpha_equal(func, no_ret_type)
    assert not alpha_equal(no_ret_type, func)