def compute(self, input_size, hidden_size, output_size): self.category_var = category = relay.var('category', shape=(1, data.N_CATEGORIES)) self.inp_topi_var = inp_topi = relay.var('input', shape=(), dtype='int32') self.hidden_var = hidden = relay.var('hidden', shape=(1, hidden_size)) self.hidden = initialize(self.hidden_var) n_letter = relay.const(data.N_LETTERS) one_diag = relay.const(np.diag(np.ones(58)).astype('float32')) boxed_one = relay.const(np.array([1]).astype('int32')) inp = op.take(one_diag, op.multiply(boxed_one, inp_topi), axis=0) combined = op.concatenate( [op.concatenate([category, inp], axis=1), hidden], axis=1) hidden = self.linear(data.N_CATEGORIES + input_size + hidden_size, hidden_size, combined, name='i2h') output = self.linear(data.N_CATEGORIES + input_size + hidden_size, output_size, combined, name='i2o') output_combined = op.concatenate([hidden, output], axis=1) output = self.linear(hidden_size + output_size, output_size, output_combined, name='o2o') # output = op.nn.dropout(output, 0.1) #attributes has not been registered output = op.nn.log_softmax(output, axis=1) topi = op.argmax(output) body = relay.Tuple([ hidden, topi, op.equal(topi, op.subtract(n_letter, relay.const(1))) ]) fwd_para = [self.category_var, self.inp_topi_var, self.hidden_var] fwd_func = relay.Function(fwd_para, body) self.fwd = relay.Var('fwd') max = relay.var('max', shape=(), dtype='int32') inp_para = [max] + [copy_var(v) for v in fwd_para] fwd_res = self.fwd(*inp_para[1:]) fwd_res_0 = relay.TupleGetItem(fwd_res, 0) fwd_res_1 = relay.TupleGetItem(fwd_res, 1) fwd_res_2 = relay.TupleGetItem(fwd_res, 2) else_else_branch = self.prelude.cons( fwd_res_1, self.recurse(op.subtract(max, relay.const(1)), inp_para[1], fwd_res_1, fwd_res_0)) else_branch = relay.If(fwd_res_2, self.prelude.nil(), else_else_branch) body = relay.If(op.equal(max, relay.const(0)), self.prelude.nil(), else_branch) return inp_para, relay.Let(self.fwd, fwd_func, body), None
def test_triangle_number(): t = relay.TensorType([], "int32") x = Var("x", t) f_var = Var("f") f = Function([x], If(op.equal(x, const(0)), const(0), x + f_var(x - const(1)))) orig = run_infer_type(Let(f_var, f, f_var(const(10)))) assert_alpha_equal(dcpe(orig), const(55))
def test_equal(): i = relay.var('i', shape=[], dtype='int32') eq = op.equal(i, relay.const(0, dtype='int32')) func = relay.Function([i], eq) ft = relay.ir_pass.infer_type(func) assert ft.checked_type == relay.FuncType([relay.scalar_type('int32')], relay.scalar_type('bool'))
def test_recursion(): """ Program: let f(n: i32, data: f32) -> f32 = { if (n == 0) { return data; } else { return f(n - 1, log(data)); } } f(2, 10000); """ f = relay.Var("f") n = relay.Var("n", e.int32) data = relay.Var("data", e.float32) funcbody = relay.If( equal(n, relay.const(0)), data, relay.Call(f, [subtract(n, relay.const(1.0)), log(data)])) value = relay.Function([n, data], funcbody, e.float32, []) orig = relay.Let( f, funcbody, relay.Call(f, [relay.const(2.0), relay.const(10000.0)])) assert alpha_equal(dead_code_elimination(orig), orig) assert alpha_equal(dead_code_elimination(relay.Let(f, funcbody, e.three)), e.three)
def test_recursion(): """ Program: let f(n: i32, data: f32) -> f32 = { if (n == 0) { return data; } else { return f(n - 1, log(data)); } } f(2, 10000); """ f = relay.Var("f") f1 = relay.Var("f1") n = relay.Var("n", e.int32) data = relay.Var("data", e.float32) funcbody = relay.If( equal(n, relay.const(0)), data, relay.Call(f1, [subtract(n, relay.const(1)), log(data)])) value = relay.Function([n, data], funcbody, e.float32, []) orig = relay.Let(f, value, relay.Call( f, [relay.const(2), relay.const(10000.0)])) dced = run_opt_pass(orig, transform.DeadCodeElimination()) orig = run_opt_pass(orig, transform.InferType()) assert graph_equal(dced, orig) dced = run_opt_pass(relay.Let(f, value, e.three), transform.DeadCodeElimination()) assert alpha_equal(dced, e.three)
def test_recursion(): """ Program: let f(n: i32, data: f32) -> f32 = { if (n == 0) { return data; } else { return f(n - 1, log(data)); } } f(2, 10000); """ f = relay.Var("f") n = relay.Var("n") np = relay.Param(n, e.int32) data = relay.Var("data") datap = relay.Param(data, e.float32) funcbody = relay.If(equal(n, convert(0)), data, f(subtract(n, convert(1.0)), log(data))) value = relay.Function([np, datap], e.float32, funcbody, []) orig = relay.Let(f, funcbody, f(convert(2.0), convert(10000.0)), e.float32) assert alpha_equal(dead_code_elimination(orig), orig) assert alpha_equal( dead_code_elimination(relay.Let(f, funcbody, e.three, e.float32)), e.three)
def test_equal(): i = relay.var("i", shape=[], dtype="int32") eq = op.equal(i, relay.const(0, dtype="int32")) func = relay.Function([i], eq) ft = run_infer_type(func) assert ft.checked_type == relay.FuncType([relay.scalar_type("int32")], relay.scalar_type("bool"))
def test_equal(): i = relay.var('i', shape=[], dtype='int32') j = relay.var('i', shape=[], dtype='int32') z = op.equal(i, j) func = relay.Function([i, j], z, ret_type=relay.TensorType([], 'bool')) i_data = relay.const(0) j_data = relay.const(0) check_eval(func, [i_data, j_data], True)
def use_f(func): f = relay.Var("f") n = relay.Var("n", e.int32) data = relay.Var("data", e.float32) funcbody = relay.If( equal(n, relay.const(0)), data, relay.Call(f, [subtract(n, relay.const(1)), log(data)])) value = relay.Function([n, data], funcbody, e.float32, []) return relay.Let(f, value, func(f))
def test_simple_loop(): env = relay.env.Environment({}) sum_up = relay.GlobalVar('sum_up') i = relay.var('i', shape=[], dtype='int32') sb = ScopeBuilder() with sb.if_scope(op.equal(i, relay.const(0, dtype='int32'))): sb.ret(i) with sb.else_scope(): one_less = op.subtract(i, relay.const(1, dtype='int32')) rec_call = relay.Call(sum_up, [one_less]) sb.ret(op.add(rec_call, i)) func = relay.Function([i], sb.get(), ret_type=relay.TensorType([], 'int32')) env[sum_up] = func i_data = np.array(10, dtype='int32') check_eval(sum_up, [i_data], sum(range(1, 11)), env=env)
def test_loop(): env = relay.env.Environment({}) sum_up = relay.GlobalVar('sum_up') i = relay.var('i', shape=[], dtype='int32') accum = relay.var('accum', shape=[], dtype='int32') sb = ScopeBuilder() with sb.if_scope(op.equal(i, relay.const(0))): sb.ret(accum) with sb.else_scope(): one_less = op.subtract(i, relay.const(1)) new_accum = op.add(accum, i) sb.ret(relay.Call(sum_up, [one_less, new_accum])) func = relay.Function([i, accum], sb.get()) env[sum_up] = func i_data = np.array(10, dtype='int32') accum_data = np.array(0, dtype='int32') check_eval(sum_up, [i_data, accum_data], sum(range(1, 11)), env=env)
def test_recursion(): """ Program: let f(n: i32, data: f32) -> f32 = { if (n == 0) { return data; } else { return f(n - 1, log(data)); } } f(2, 10000); """ f = relay.Var("f") n = relay.Var("n", e.int32) data = relay.Var("data", e.float32) funcbody = relay.If(equal(n, relay.const(0)), data, relay.Call(f, [subtract(n, relay.const(1.0)), log(data)])) value = relay.Function([n, data], funcbody, e.float32, []) orig = relay.Let(f, value, relay.Call(f, [relay.const(2.0), relay.const(10000.0)])) assert alpha_equal(dead_code_elimination(orig), orig) assert alpha_equal(dead_code_elimination(relay.Let(f, value, e.three)), e.three)
def test_recursion(): """ Program: def f(n: i32, data: f32) -> f32 { if (n == 0) { return f(n - 1, log(data)); } else { return data; } } f(2, 10000); """ b = IRBuilder() f = b.global_var('f') n = b.param('n', ty='int32') data = b.param('data', ty='float32') with b.decl(f, n, data): with b.if_scope(equal(n, convert(0))): b.ret(f(subtract(n, convert(1)), log(data))) with b.else_scope(): b.ret(data) b.ret(f(convert(2.0), convert(10000.0))) assert_decl_has_type(b.env, 'f', func_type( ['int32', 'float32'], 'float32'))
def test_equal(): i = relay.var('i', shape=[], dtype='int32') eq = op.equal(i, relay.const(0, dtype='int32')) # This should fail .... func = relay.Function([i], eq, ret_type=relay.TensorType([], 'int32'))