def eval_binary_op(nast, env): lv = eval_ast(nast.left, env) rv = eval_ast(nast.right, env) res = new_tensor(['TODO']) isfloor = False if isinstance(nast.op, gast.Add): optype = "Add" def opfun(a, b): return a + b elif isinstance(nast.op, gast.Sub): optype = "Sub" def opfun(a, b): return a - b elif isinstance(nast.op, gast.Mult): optype = "Mul" def opfun(a, b): return a * b elif isinstance(nast.op, gast.FloorDiv): optype = "Div" isfloor = True def opfun(a, b): return a // b elif isinstance(nast.op, gast.Div): optype = "Div" def opfun(a, b): return a / b else: raise Exception('unknown operator', nast.op) # code.InteractiveConsole({'lv': lv, 'rv': rv}).interact() # TODO(hamaji): Reconsider if constant folding is necessary in CH2O. #if not istensor(lv) and not istensor(rv): # # 定数畳み込みを行う # return opfun(lv, rv) lv.to_value_info(env) rv.to_value_info(env) if lv.is_sequence() and rv.is_sequence(): assert optype == 'Add' lv = lv.to_sequence(env) rv = rv.to_sequence(env) state = new_sequence(name='seq_plus_state') cond = new_tensor(name='seq_plus_cond') index = new_tensor(name='seq_plus_index') elem = new_tensor(name='seq_plus_elem') out_state = new_tensor(name='seq_plus_out_state') nodes = [] nodes.append( helper.make_node('ChainerSequenceLookup', inputs=[rv.name, index.name], outputs=[elem.name])) nodes.append( helper.make_node('ChainerSequenceAppend', inputs=[state.name, elem.name], outputs=[out_state.name])) loop = make_graph( nodes, "SeqPlus", [index, cond, state], [cond, out_state], ) length = env.calc('ChainerGenericLen', inputs=[rv.name]) res = new_sequence(name='seq_plus') env.addnode('Loop', inputs=[length.name, "", lv.name], outputs=[res.name], body=loop) else: if optype == 'Div' and not isfloor: lv = castto(lv.to_tensor(env), TensorProto.FLOAT, env) rv = castto(rv.to_tensor(env), TensorProto.FLOAT, env) else: lv = lv.to_tensor(env) rv = rv.to_tensor(env) res = env.calc( optype, inputs=[lv.name, rv.name], ) if isfloor: res = env.calc( "Floor", inputs=[res.name], ) return res
def compile_model(model, inputs): # return helper.make_graph([],'dummy',[],[]) init_id2name(model) # code.InteractiveConsole({'mo': model}).interact() env = Env(sys.modules[model.__module__]) molk = User_Defined_Link(model, env) input_tensors = [] for i in inputs: # TODO(hamaji): Set valid type info. if isinstance(i, (list, tuple)): x = new_sequence() elif i is None: x = new_tensor() else: if isinstance(i, int): i = np.array(i) else: # TODO(durswd): This code requires chainer6.x i = chainer.cuda.to_cpu(i) x = new_tensor(dims=i.shape, dtype=i.dtype) input_tensors.append(x) input_values = [Value(i) for i in input_tensors] v = molk.call(input_values, [], env) dprint('output_tensors', v) if isinstance(v.value, tuple): output_tensors = list(v.value) # ばらしてみる else: output_tensors = [v] # とりあえず1tensor # print('env.init_tensors ',env.init_tensors) input_tensors += list(env.init_tensors.values()) for f in env.restore_funcs: f() # for no in env.nodes: # print(no.op_type) # print(env.nodes) # print(input_tensors) # print(output_tensors) # for ch in model.namedparams(): # print(ch) outputs_vi = [o.to_value_info(env) for o in output_tensors] graph = make_graph(env.nodes, 'name_is_unknown_now', input_tensors, outputs_vi) # inputのうち、重みであるものにはinitializerをつける # batch_sizeやinput_sizeなどの可変なものはできる限りのそのままで # Chainer compiler 独自のノードを使うとcheckできなくなる... # checker.check_graph(graph) mo = helper.make_model(graph) # print(mo) return mo
def eval_for(nast, env): assert nast.orelse == [] ite = eval_ast(nast.iter, env) # A hack for ResNet50. # TODO(hamaji): Come up with a sophisticated way. # TODO(hamaji): This code doesn't handle scope properly, I think. if (isinstance(ite.value, types.GeneratorType) and 'ChainList.children' in str(ite.value)): # とりあえず実際にfor文を回す tg = nast.target.id env.set_var(tg, Value(None)) for v in ite.value: env.set_var(tg, _value(v)) eval_ast(nast.body, env) # print('looping',env.vars.keys()) env.pop_var(tg) return None if ite.is_py: ite = Value([Value(v) for v in ite.value]) assert isinstance(nast.target, gast.Name) x = nast.target.id # 新たなenv を作って、評価中にできた子グラフをもとにする localenv = env.new_block() cnt = new_tensor() gtx = new_sequence() localenv.set_var( x, _value( localenv.calc( "ChainerSequenceLookup", inputs=[gtx.name, cnt.name], ))) ty = eval_ast(nast.body, localenv) assert ty.is_none() in_out = _find_in_out(localenv, env) input_values = [] output_values = [] final_outputs = [] final_setattrs = [] for key, (iv, ov, setattr_info) in in_out.items(): if ov is None: continue if iv is None: iv = Value(False) out = ov.copy(env, name=key) final_outputs.append((key, out.value)) if setattr_info is not None: final_setattrs.append(tuple(list(setattr_info) + [out])) input_values.append(iv.to_value_info(env)) output_values.append(ov.to_value_info(env)) cond = new_tensor(name='loop_cond') localgraph = make_graph(localenv.nodes, "Loop_subgraph", [cnt, cond, gtx] + input_values, [cond, gtx] + output_values) mtc = env.calc( "ChainerGenericLen", inputs=[ite.to_sequence(env).name], ) env.addnode('Loop', inputs=([mtc.name, "", ite.to_sequence(env).name] + [i.name for i in input_values]), outputs=([new_tensor('out_generator').name] + [o.name for _, o in final_outputs]), body=localgraph) for k, o in final_outputs: if '.' not in k and '/' not in k: env.set_var(k, _value(o)) for var, key, value in final_setattrs: setattr(var.value, key, value) return None
def calc_seq(self, *args, npdtype=None, **kwargs): res = new_sequence(dtype=npdtype) assert 'outputs' not in kwargs.keys() kwargs['outputs'] = [res.name] self.addnode(*args, **kwargs) return res