Esempio n. 1
0
    def call_impl(self, env, a, axis, dtype, out):
        assert axis.is_none()  # TODO(hamaji): Not supported yet.
        assert dtype.is_none()  # TODO(hamaji): Not supported yet.
        assert out.is_none()  # TODO(hamaji): Not supported yet.
        # さらにさらに、入力は1次元のTensorである、と仮定してしまいます
        # 戻り値は入力に依らずテンソルらしい
        # TODO(satos) さすがに仮定がきつい
        v = a.to_tensor(env)

        # これ戻り値がテンソルでなくSequenceなら、SplitAxisみたいにかっこよく書けるはず
        """
        a = new_tensor()
        env.addnode(
            'Flatten',
            inputs=[v.name],outputs[a.name],
            axis=0
        )
        v = a
        a = new_tensor()
        env.addnode(
            'Squeeze',
            inputs=[v.name],outputs[a.name],
            axes=[0]
        )
        """
        ls = env.calc(
            'ChainerGenericLen',
            inputs=[v.name],
        )

        def dummy():
            return "dummy_" + new_tensor().name

        localenv = Env(env.module)
        cnt = new_tensor()
        cond = new_tensor()
        s = new_tensor()
        gtx = new_tensor()
        tx = localenv.calc(
            "ChainerGenericGetItem",
            inputs=[gtx.name, cnt.name],
        )
        ts = localenv.calc(
            "Add",
            inputs=[tx.name, s.name],
        )
        ts2 = localenv.calc("Identity", inputs=[ts.name])

        zero = totensor(0, env)

        res = new_tensor()
        env.addnode('Loop',
                    inputs=[ls.name, "", v.name, zero.name],
                    outputs=[dummy(), dummy(), res.name],
                    body=utils.make_graph(localenv.nodes, "Cumsum_subgraph",
                                          [cnt, cond, gtx, s],
                                          [cond, gtx, ts, ts2]))

        return res
Esempio n. 2
0
def compile_model(model, inputs):
    # return helper.make_graph([],'dummy',[],[])

    init_id2name(model)
    # code.InteractiveConsole({'mo': model}).interact()
    env = Env(sys.modules[model.__module__])
    molk = User_Defined_Link(model, env)

    input_tensors = []
    for i in inputs:
        # TODO(hamaji): Set valid type info.
        if isinstance(i, (list, tuple)):
            x = new_sequence()
        elif i is None:
            x = new_tensor()
        else:
            if isinstance(i, int):
                i = np.array(i)
            else:
                # TODO(durswd): This code requires chainer6.x
                i = chainer.cuda.to_cpu(i)

            x = new_tensor(dims=i.shape, dtype=i.dtype)
        input_tensors.append(x)

    input_values = [Value(i) for i in input_tensors]
    v = molk.call(input_values, [], env)

    dprint('output_tensors', v)
    if isinstance(v.value, tuple):
        output_tensors = list(v.value)  # ばらしてみる
    else:
        output_tensors = [v]  # とりあえず1tensor

    # print('env.init_tensors ',env.init_tensors)
    input_tensors += list(env.init_tensors.values())

    for f in env.restore_funcs:
        f()

    # for no in env.nodes:
    #   print(no.op_type)
    # print(env.nodes)
    # print(input_tensors)
    # print(output_tensors)
    # for ch in model.namedparams():
    #    print(ch)

    outputs_vi = [o.to_value_info(env) for o in output_tensors]
    graph = make_graph(env.nodes, 'name_is_unknown_now', input_tensors,
                       outputs_vi)

    # inputのうち、重みであるものにはinitializerをつける
    # batch_sizeやinput_sizeなどの可変なものはできる限りのそのままで

    # Chainer compiler 独自のノードを使うとcheckできなくなる...
    # checker.check_graph(graph)
    mo = helper.make_model(graph)

    # print(mo)
    return mo