def test_global_recursion(): mod = relay.Module() p = Prelude(mod) copy = relay.GlobalVar('copy') # same as above: it copies the given list a = relay.TypeVar('a') v = relay.Var('v', p.l(a)) h = relay.Var('h') t = relay.Var('t') copy_def = relay.Function( [v], relay.Match(v, [ relay.Clause( relay.PatternConstructor( p.cons, [relay.PatternVar(h), relay.PatternVar(t)]), p.cons(h, copy(t))), relay.Clause(relay.PatternConstructor(p.nil, []), p.nil()) ]), p.l(a), [a]) mod[copy] = copy_def call1 = copy_def(p.cons(relay.const(1), p.cons(relay.const(2), p.nil()))) val1 = run_as_python(call1, mod) assert_constructor_value(val1, p.cons, 2) assert_tensor_value(val1.fields[0], 1) assert_constructor_value(val1.fields[1], p.cons, 2) assert_tensor_value(val1.fields[1].fields[0], 2) assert_constructor_value(val1.fields[1].fields[1], p.nil, 0) call2 = copy_def(p.cons(relay.Tuple([]), p.nil())) val2 = run_as_python(call2, mod) assert_constructor_value(val2, p.cons, 2) assert_adt_len(val2.fields[0], 0) assert_constructor_value(val2.fields[1], p.nil, 0)
def test_adt_list(): mod = relay.Module() p = Prelude(mod) l1 = p.cons(relay.const(1), p.nil()) l21 = p.cons(relay.const(2), l1) l321 = p.cons(relay.const(3), l21) f = relay.Function([], l321) mod["main"] = f exe = create_exec(mod) code, lib = exe.save() des_exec = _vm.Executable.load_exec(code, lib) des_vm = _vm.VirtualMachine(des_exec) des_vm.init(tvm.cpu()) result = veval(des_vm) assert len(result) == 2 assert len(result[1]) == 2 assert len(result[1][1]) == 2 res = [] res.append(result[0].asnumpy().tolist()) res.append(result[1][0].asnumpy().tolist()) res.append(result[1][1][0].asnumpy().tolist()) tvm.testing.assert_allclose(res, np.array([3, 2, 1]))
def test_local_recursion(): mod = relay.Module() p = Prelude(mod) v = relay.Var('v') h = relay.Var('h') t = relay.Var('t') f = relay.Var('f') # just returns the same list let = relay.Let( f, relay.Function( [v], relay.Match(v, [ relay.Clause( relay.PatternConstructor( p.cons, [relay.PatternVar(h), relay.PatternVar(t)]), p.cons(h, f(t))), relay.Clause(relay.PatternConstructor(p.nil, []), p.nil()) ])), f( p.cons(relay.const(1), p.cons(relay.const(2), p.cons(relay.const(3), p.nil()))))) val = run_as_python(let, mod) assert_constructor_value(val, p.cons, 2) assert_tensor_value(val.fields[0], 1) assert_constructor_value(val.fields[1], p.cons, 2) assert_tensor_value(val.fields[1].fields[0], 2) assert_constructor_value(val.fields[1].fields[1], p.cons, 2) assert_tensor_value(val.fields[1].fields[1].fields[0], 3) assert_constructor_value(val.fields[1].fields[1].fields[1], p.nil, 0)
def test_adt_list(): mod = relay.Module() p = Prelude(mod) l1 = p.cons(relay.const(1), p.nil()) l21 = p.cons(relay.const(2), l1) l321 = p.cons(relay.const(3), l21) f = relay.Function([], l321) mod["main"] = f vm = create_vm(mod) ser = serializer.Serializer(vm) code, lib = ser.serialize() deser = deserializer.Deserializer(code, lib) des_vm = deser.deserialize() result = veval(des_vm) assert len(result) == 2 assert len(result[1]) == 2 assert len(result[1][1]) == 2 res = [] res.append(result[0].asnumpy().tolist()) res.append(result[1][0].asnumpy().tolist()) res.append(result[1][1][0].asnumpy().tolist()) tvm.testing.assert_allclose(res, np.array([3, 2, 1]))
def test_match_effect_exactly_once(): mod = tvm.IRModule() p = Prelude(mod) # the list should be of length 1! # Unless we mistakenly execute the data clause more than once r = relay.Var("r") data = seq(relay.RefWrite(r, p.cons(relay.Tuple([]), relay.RefRead(r))), relay.RefRead(r)) match = relay.Let( r, relay.RefCreate(p.nil()), relay.Match( data, [ relay.Clause(relay.PatternConstructor(p.nil, []), relay.const(0)), relay.Clause( relay.PatternConstructor(p.cons, [ relay.PatternWildcard(), relay.PatternConstructor(p.nil, []) ]), relay.const(1), ), relay.Clause(relay.PatternWildcard(), relay.const(2)), ], ), ) match_val = run_as_python(match, mod) assert_tensor_value(match_val, 1)
def test_head_cons(): mod = tvm.IRModule() p = Prelude(mod) t = TypeVar("t") x = Var("x", t) body = p.hd(p.cons(x, p.nil())) f = Function([x], body, None, [t]) res = dcpe(f, mod) assert tvm.ir.structural_equal(res, Function([x], x, t, [t]))
def test_head_cons(): mod = Module() p = Prelude(mod) hd = p.hd t = TypeVar("t") x = Var("x", t) body = hd(p.cons(x, p.nil())) f = Function([x], body, None, [t]) res = dcpe(f, mod) assert alpha_equal(res, Function([x], x, t, [t]))
def test_keep_only_referenced_prelude_functions(): mod = relay.Module() p = Prelude(mod) l = p.nil() for i in [4, 3, 2, 1, 0]: l = p.cons(relay.const(i), l) body = p.hd(p.tl(p.tl(l))) mod["main"] = relay.Function([], body) mod = relay.transform.RemoveUnusedFunctions()(mod) l = set([x[0].name_hint for x in mod.functions.items()]) assert l == set(['tl', 'hd', 'main'])
def test_adt_list(): mod = tvm.IRModule() p = Prelude(mod) l1 = p.cons(relay.const(1), p.nil()) l21 = p.cons(relay.const(2), l1) l321 = p.cons(relay.const(3), l21) f = relay.Function([], l321) mod["main"] = f result = get_serialized_output(mod) assert len(result) == 2 assert len(result[1]) == 2 assert len(result[1][1]) == 2 res = [] res.append(result[0].asnumpy().tolist()) res.append(result[1][0].asnumpy().tolist()) res.append(result[1][1][0].asnumpy().tolist()) tvm.testing.assert_allclose(res, np.array([3, 2, 1]))
def test_map(): mod = Module() p = Prelude(mod) f = Var("f") orig = p.map(f, p.cons(const(1), p.cons(const(2), p.cons(const(3), p.nil())))) expected = p.cons(f(const(1)), p.cons(f(const(2)), p.cons(f(const(3)), p.nil()))) assert alpha_equal(dcpe(orig, mod=mod), expected)
def test_multiple_entry_functions(): mod = relay.Module() p = Prelude(mod) l = p.nil() for i in [4, 3, 2, 1, 0]: l = p.cons(relay.const(i), l) body = p.hd(p.tl(p.tl(l))) mod["main1"] = relay.Function([], body) x = relay.var("x", shape=(1, 16)) id_func = relay.Function([x], x) id_name = relay.GlobalVar('id_func') mod[id_name] = id_func mod["main2"] = relay.Function([x], id_name(x)) mod = relay.transform.RemoveUnusedFunctions(['main1', 'main2'])(mod) l = set([x[0].name_hint for x in mod.functions.items()]) assert l == set(['tl', 'hd', 'main2', 'id_func', 'main1'])
def test_map(): mod = Module() p = Prelude(mod) f = GlobalVar("f") t = TypeVar("t") a = Var("a", t) mod[f] = Function([a], a, t, [t]) orig = p.map(f, p.cons(const(1), p.cons(const(2), p.cons(const(3), p.nil())))) expected = p.cons((const(1)), p.cons((const(2)), p.cons((const(3)), p.nil()))) expected = Function([], expected) mod["main"] = expected expected = mod["main"] orig = Function([], orig) res = dcpe(orig, mod=mod) assert alpha_equal(res.body, expected.body)
def test_head_cons(): mod = relay.Module() p = Prelude(mod) def hd_impl(): a = relay.TypeVar("a") x = relay.Var("x", p.l(a)) y = relay.Var("y") z = relay.Var("z") cons_case = relay.Clause(relay.PatternConstructor(p.cons, [relay.PatternVar(y), relay.PatternVar(z)]), y) return relay.Function([x], relay.Match(x, [cons_case]), a, [a]) t = relay.TypeVar("t") x = relay.Var("x", t) hd = relay.Var("hd") body = relay.Let(hd, hd_impl(), hd(p.cons(x, p.nil()))) f = relay.Function([x], body, None, [t]) f = infer_type(f, mod=mod) res = dcpe(f) assert alpha_equal(res, relay.Function([x], x, t, [t]))
def get_lstm(batch_size, num_hidden, dtype): '''Returns a module where the main() function is an LSTM RNN, returning a tuple of two items where the first is the list of outputs and the second is the final hidden state''' mod = relay.Module() p = Prelude(mod) input_type = relay.TensorType((batch_size, num_hidden), dtype) weight_type = relay.TensorType((4 * num_hidden, num_hidden), dtype) bias_type = relay.TensorType((4 * num_hidden, ), dtype) state_type = relay.TupleType([input_type, input_type]) cell_type = relay.TupleType([input_type, state_type]) state_var_type = relay.TupleType([p.l(input_type), state_type]) input_list = relay.Var('input_list', p.l(input_type)) init_states = relay.Var('init_states', state_type) cell_fn = lstm_cell(num_hidden, batch_size, dtype, "lstm_cell") i2h_weight = relay.Var('i2h_weight', weight_type) i2h_bias = relay.Var('i2h_bias', bias_type) h2h_weight = relay.Var('h2h_weight', weight_type) h2h_bias = relay.Var('h2h_bias', bias_type) state_var = relay.Var('state_var', state_var_type) input_var = relay.Var('input_var', input_type) cell_out = relay.Var('cell_out', cell_type) iteration = relay.Function([state_var, input_var], relay.Let( cell_out, cell_fn(input_var, relay.TupleGetItem(state_var, 1), i2h_weight, i2h_bias, h2h_weight, h2h_bias), relay.Tuple([ p.cons(relay.TupleGetItem(cell_out, 0), relay.TupleGetItem(state_var, 0)), relay.TupleGetItem(cell_out, 1) ])), state_var_type) fold_res = relay.Var('fold_res', state_var_type) mod['rnn'] = relay.Function( [i2h_weight, i2h_bias, h2h_weight, h2h_bias, init_states, input_list], relay.Let( fold_res, p.foldl(iteration, relay.Tuple([p.nil(), init_states]), input_list), relay.Tuple([ p.rev(relay.TupleGetItem(fold_res, 0)), relay.TupleGetItem(fold_res, 1) ])), state_var_type) mod['main'] = relay.Function( [], relay.Call(mod.get_global_var('rnn'), [ relay.const(generate_random_tensor(weight_type)), relay.const(generate_random_tensor(bias_type)), relay.const(generate_random_tensor(weight_type)), relay.const(generate_random_tensor(bias_type)), relay.Tuple([ relay.const(generate_random_tensor(input_type)), relay.const(generate_random_tensor(input_type)) ]), p.cons(relay.const(generate_random_tensor(input_type)), p.nil()) ])) return mod