def compute(self, input_size, hidden_size, output_size): self.category_var = category = relay.var('category', shape=(1, data.N_CATEGORIES)) self.input_var = inp = relay.var('input', shape=(1, input_size)) self.hidden_var = hidden = relay.var('hidden', shape=(1, hidden_size)) self.hidden = initialize(self.hidden_var) combined = op.concatenate( [op.concatenate([category, inp], axis=1), hidden], axis=1) hidden = self.linear(data.N_CATEGORIES + input_size + hidden_size, hidden_size, combined, name='i2h') output = self.linear(data.N_CATEGORIES + input_size + hidden_size, output_size, combined, name='i2o') output_combined = op.concatenate([hidden, output], axis=1) output = self.linear(hidden_size + output_size, output_size, output_combined, name='o2o') #output = op.nn.dropout(output, 0.1) #dropout isnt simplified, commented out for now output = op.nn.log_softmax(output, axis=1) return [self.category_var, self.input_var, self.hidden_var], relay.Tuple([output, hidden]), None
def compute(self, input_size, hidden_size, output_size): self.category_var = category = relay.var('category', shape=(1, data.N_CATEGORIES)) self.inp_topi_var = inp_topi = relay.var('input', shape=(), dtype='int32') self.hidden_var = hidden = relay.var('hidden', shape=(1, hidden_size)) self.hidden = initialize(self.hidden_var) n_letter = relay.const(data.N_LETTERS) one_diag = relay.const(np.diag(np.ones(58)).astype('float32')) boxed_one = relay.const(np.array([1]).astype('int32')) inp = op.take(one_diag, op.multiply(boxed_one, inp_topi), axis=0) combined = op.concatenate( [op.concatenate([category, inp], axis=1), hidden], axis=1) hidden = self.linear(data.N_CATEGORIES + input_size + hidden_size, hidden_size, combined, name='i2h') output = self.linear(data.N_CATEGORIES + input_size + hidden_size, output_size, combined, name='i2o') output_combined = op.concatenate([hidden, output], axis=1) output = self.linear(hidden_size + output_size, output_size, output_combined, name='o2o') # output = op.nn.dropout(output, 0.1) #attributes has not been registered output = op.nn.log_softmax(output, axis=1) topi = op.argmax(output) body = relay.Tuple([ hidden, topi, op.equal(topi, op.subtract(n_letter, relay.const(1))) ]) fwd_para = [self.category_var, self.inp_topi_var, self.hidden_var] fwd_func = relay.Function(fwd_para, body) self.fwd = relay.Var('fwd') max = relay.var('max', shape=(), dtype='int32') inp_para = [max] + [copy_var(v) for v in fwd_para] fwd_res = self.fwd(*inp_para[1:]) fwd_res_0 = relay.TupleGetItem(fwd_res, 0) fwd_res_1 = relay.TupleGetItem(fwd_res, 1) fwd_res_2 = relay.TupleGetItem(fwd_res, 2) else_else_branch = self.prelude.cons( fwd_res_1, self.recurse(op.subtract(max, relay.const(1)), inp_para[1], fwd_res_1, fwd_res_0)) else_branch = relay.If(fwd_res_2, self.prelude.nil(), else_else_branch) body = relay.If(op.equal(max, relay.const(0)), self.prelude.nil(), else_branch) return inp_para, relay.Let(self.fwd, fwd_func, body), None
def test_concat(): shape = (10, 10) dtype = 'float32' t = relay.TensorType(shape, dtype) rt = relay.TensorType((10, 20), dtype) x = relay.var("x", t) y = op.concatenate([x, x], axis=1) func = relay.Function([x], y) func = run_infer_type(func) back_func = run_infer_type(gradient(func)) tvm.ir.assert_structural_equal(back_func.checked_type, relay.FuncType([t], relay.TupleType([rt, relay.TupleType([t])])))
def build_impl(self, input_size, memory_size, dtype="float32"): l = self.input( Var("l", self.p.l(TensorType(shape=(1, input_size), dtype=dtype)))) def LSTM(l): return LSTMTransformer(input_size=input_size, memory_size=memory_size, dtype=dtype)(l) fwd = LSTM(l) rev = LSTM(self.p.rev(l)) lhs = op.concatenate( [TupleGetItem(fwd, 0), TupleGetItem(rev, 0)], axis=1) t = TensorType(shape=(1, memory_size), dtype=dtype) x = Var("x", TupleType([t, t])) # cannot infer here rhs = self.p.map( Function([x], op.concatenate([TupleGetItem(x, 0), TupleGetItem(x, 1)], axis=1)), self.p.zip(TupleGetItem(fwd, 1), TupleGetItem(rev, 1))) return Tuple([lhs, rhs])
def test_concat(): """ Program: def try_concat2(x: Float(3, 2), y: Float(2, 2)) -> Float(5, 2) { return concatenate((x, y), axis=0); } """ ib = IRBuilder() try_concat2 = ib.global_var('try_concat2') x = ib.param('x', ty=tensor_type(3, 2)) y = ib.param('y', ty=tensor_type(2, 2)) with ib.decl(try_concat2, x, y): ib.ret(concatenate((x, y), axis=0)) fn_ty = func_type([tensor_type(3, 2), tensor_type(2, 2)], tensor_type(5, 2)) assert_decl_has_type(ib.env, try_concat2, fn_ty)
def aten_cat(inputs, attributes, scope): tensors, dim = inputs ctx = current_context() net = ctx.network if ctx.is_tensorrt and has_trt_tensor(inputs): assert dim > 0 layer = net.add_concatenation(tensors) layer.axis = dim - 1 # trt don't support batch axis output = layer.get_output(0) output.name = scope layer.name = scope return [output] elif ctx.is_tvm and has_tvm_tensor(inputs): return [_op.concatenate(tuple(tensors), axis=dim)] res = torch.cat(tensors, dim=dim) return [res]
def test_concat(): t = relay.TensorType([10], "float32") x = Var("x", t) y = Var("x", t) orig = run_infer_type(Function([x, y], op.concatenate([x, y], axis=0))) tvm.ir.assert_structural_equal(dcpe(orig), orig)
def _concatenate(children, attrs, odtype='float32'): axis = attrs.get_int('axis', 1) return op.concatenate(children, axis)
def test_concat(): t = relay.TensorType([10], "float32") x = Var("x", t) y = Var("x", t) orig = run_infer_type(Function([x, y], op.concatenate([x, y], axis=0))) assert_alpha_equal(orig, dcpe(orig))