示例#1
0
    def call_impl(self, env, c, h, x):
        # TODO(satos) 正しくする(ただただ面倒だが)
        # とりあえずnstep を 1step ずつに分解する
        # print(self.name,args)
        # assert(len(args) == 1)

        return new_tensor(), new_tensor()
示例#2
0
    def call_impl(self, env, a, axis, dtype, out):
        assert axis.is_none()  # TODO(hamaji): Not supported yet.
        assert dtype.is_none()  # TODO(hamaji): Not supported yet.
        assert out.is_none()  # TODO(hamaji): Not supported yet.
        # さらにさらに、入力は1次元のTensorである、と仮定してしまいます
        # 戻り値は入力に依らずテンソルらしい
        # TODO(satos) さすがに仮定がきつい
        v = a.to_tensor(env)

        # これ戻り値がテンソルでなくSequenceなら、SplitAxisみたいにかっこよく書けるはず
        """
        a = new_tensor()
        env.addnode(
            'Flatten',
            inputs=[v.name],outputs[a.name],
            axis=0
        )
        v = a
        a = new_tensor()
        env.addnode(
            'Squeeze',
            inputs=[v.name],outputs[a.name],
            axes=[0]
        )
        """
        ls = env.calc(
            'ChainerGenericLen',
            inputs=[v.name],
        )

        def dummy():
            return "dummy_" + new_tensor().name

        localenv = Env(env.module)
        cnt = new_tensor()
        cond = new_tensor()
        s = new_tensor()
        gtx = new_tensor()
        tx = localenv.calc(
            "ChainerGenericGetItem",
            inputs=[gtx.name, cnt.name],
        )
        ts = localenv.calc(
            "Add",
            inputs=[tx.name, s.name],
        )
        ts2 = localenv.calc("Identity", inputs=[ts.name])

        zero = totensor(0, env)

        res = new_tensor()
        env.addnode('Loop',
                    inputs=[ls.name, "", v.name, zero.name],
                    outputs=[dummy(), dummy(), res.name],
                    body=utils.make_graph(localenv.nodes, "Cumsum_subgraph",
                                          [cnt, cond, gtx, s],
                                          [cond, gtx, ts, ts2]))

        return res
示例#3
0
 def call_impl(self, env, x):
     res = new_tensor(['unknown', 'unknown', 'unknown'])
     env.nodes.append(
         helper.make_node("Conv",
                          inputs=[x.to_tensor(env).name, self.W.name] +
                          ([] if self.b is None else [self.b.name]),
                          outputs=[res.name],
                          kernel_shape=self.ksize,
                          pads=self.pads,
                          strides=self.stride))
     return res
示例#4
0
 def call_impl(self, env, x, **kwargs):
     assert not kwargs  # TODO(hamaji): finetune not supported yet.
     res = new_tensor(['unknown', 'unknown', 'unknown'])
     env.nodes.append(
         helper.make_node(
             "BatchNormalization",
             inputs=[x.to_tensor(env).name, self.scale.name, self.B.name,
                     self.mean.name, self.var.name], outputs=[res.name],
             epsilon=self.eps,
             momentum=self.momentum,
             # とりあえずspatialは1で(0でも値が変わらなかったのでよくわからん)
         )
     )
     return res
示例#5
0
def eval_unary_op(nast, env):
    v = eval_ast(nast.operand, env)
    res = new_tensor()
    if isinstance(nast.op, gast.USub):
        # optype = "*= -1"
        def opfun(x): return -x
    elif isinstance(nast.op, gast.Not):
        # optype = "Not"
        def opfun(x): return not x
    else:
        raise Exception('unknown operator', nast.op)

    if not istensor(v):
        return opfun(v.value)
    else:
        raise Exception("Unimplemented yet")
示例#6
0
    def call_impl(self, env, x, n_batch_axes):
        x = x.to_tensor(env)
        res = new_tensor([self.n_out])

        use_chainer_linear = True
        if use_chainer_linear:
            inputs = [x.name, self.W.name]
            if not self.nobias:
                inputs.append(self.b.name)
            return env.calc(
                "ChainerLinear",
                inputs=inputs,
                n_batch_axes=n_batch_axes.to_int()
            )

        x_shape = env.calc("Shape", inputs=[x.name])
        batch_size = env.calc("Gather",
                              inputs=[x_shape.name,
                                      Value(0).to_tensor(env).name])
        batch_size = env.calc("Unsqueeze",
                              inputs=[batch_size.name],
                              axes=[0])
        mat_shape = env.calc("Concat",
                             inputs=[batch_size.name,
                                     Value([Value(-1)]).to_tensor(env).name],
                             axis=0)
        x = env.calc("Reshape",
                     inputs=[x.name, mat_shape.name])

        if self.nobias:
            t = env.calc(
                "Transpose",
                inputs=[self.W.name],
                perm=[1, 0]
            )
            res = env.calc(
                "MatMul",
                inputs=[x.name, t.name],
            )
        else:
            res = env.calc(
                "Gemm",
                inputs=[x.name, self.W.name, self.b.name],
                transA=0, transB=1
            )
        return res
示例#7
0
def eval_list_comp(nast, env):
    vn = "dummy@" + new_tensor().name  # 重ならない名前にする(ループ内ループもあるため)
    assert len(nast.generators) >= 1
    tast = gast.ast_to_gast(ast.parse("v.append(w)")).body[0]
    tast.value.func.value.id = vn
    tast.value.args[0] = nast.elt

    for gen in nast.generators:
        # とりあえず、このあたりはまだ実装しません
        assert len(gen.ifs) == 0 and gen.is_async == 0
        tast = gast.For(target=gen.target, iter=gen.iter,
                        body=[tast], orelse=[])

    init = gast.ast_to_gast(ast.parse("v = []")).body[0]
    init.targets[0].id = vn
    tast = [init, tast]

    rv = eval_ast(tast, env)
    assert rv.is_none()
    res = env.pop_var(vn)
    return res
示例#8
0
def eval_ast_impl(nast, env):
    if isinstance(nast, list):
        # 逐次実行
        for s in nast:
            if is_print_logging(s, env):
                continue
            eval_ast(s, env)
        return None
    elif isinstance(nast, gast.For):
        return eval_for(nast, env)

    elif isinstance(nast, gast.Assign):
        return eval_assign(nast, env)

    elif isinstance(nast, gast.AugAssign):
        # referenceへの代入に対してこれは不正確
        ca = gast.Assign(targets=[nast.target],
                         value=gast.BinOp(left=nast.target,
                                          op=nast.op,
                                          right=nast.value))
        return eval_ast(ca, env)

    elif isinstance(nast, gast.Call):
        return eval_call(nast, env)

    elif isinstance(nast, gast.UnaryOp):
        return eval_unary_op(nast, env)

    elif isinstance(nast, gast.BinOp):
        return eval_binary_op(nast, env)

    elif isinstance(nast, gast.BoolOp):
        # 現在は定数boleanのみ対応
        vs = list(map(lambda x: eval_ast(x, env), nast.values))
        res = new_tensor()
        if isinstance(nast.op, gast.And):

            def opfun(v):
                return all(v)
        else:
            raise Exception('unknown operator', nast.op)

        if not any(map(istensor, vs)):
            return opfun(vs)

        raise Exception('Unimplemented BoolOp for tensor', nast)

    elif isinstance(nast, gast.Attribute):
        return eval_attribute(nast, env)

    elif isinstance(nast, gast.Compare):
        return eval_compare(nast, env)

    elif isinstance(nast, gast.If):
        return eval_if(nast, env)

    elif isinstance(nast, gast.ListComp):
        return eval_list_comp(nast, env)

    elif isinstance(nast, gast.Subscript):
        return eval_subscript(nast, env)

    elif isinstance(nast, gast.Delete):
        # おのおの単に忘れる
        vs = nast.targets
        for v in vs:
            assert isinstance(v, gast.Name)
            env.pop_var(v.id)
        return None

    elif isinstance(nast, gast.Name):
        try:
            return env.get_var(nast.id)
        except NameError as ne:
            if nast.id in dir(env.module):
                return getattr(env.module, nast.id)
            elif nast.id in dir(builtins):
                return getattr(builtins, nast.id)
            raise
    elif isinstance(nast, gast.Constant):
        return nast.value
    elif isinstance(nast, gast.Expr):
        return eval_ast(nast.value, env)
    elif isinstance(nast, gast.Constant) and isinstance(nast.value, str):
        return nast.value
    elif isinstance(nast, gast.Tuple):
        return tuple(map(lambda x: eval_ast(x, env), nast.elts))
    elif isinstance(nast, gast.List):
        return eval_list(nast, env)

    elif isinstance(nast, gast.Return):
        raise ValueReturn(eval_ast(nast.value, env))

    elif isinstance(nast, gast.Assert):
        # TODO(hamaji): Emit an assertion?
        return None

    # TODO(hamaji): Implement `with`.
    # elif isinstance(nast, gast.With):
    #     sys.stderr.write(
    #         'WARNING: Currenctly, the context of `with` is just ignored\n')
    #     for s in nast.body:
    #         eval_ast(s, env)
    #     return None

    else:
        print('unknown ast')
        code.InteractiveConsole({'nast': nast, 'env': env}).interact()
        raise Exception('unknown ast', nast)

    raise Exception("shouldn't reach here", nast)
示例#9
0
def eval_binary_op(nast, env):
    lv = eval_ast(nast.left, env)
    rv = eval_ast(nast.right, env)

    res = new_tensor(['TODO'])
    isfloor = False
    if isinstance(nast.op, gast.Add):
        optype = "Add"

        def opfun(a, b):
            return a + b

    elif isinstance(nast.op, gast.Sub):
        optype = "Sub"

        def opfun(a, b):
            return a - b

    elif isinstance(nast.op, gast.Mult):
        optype = "Mul"

        def opfun(a, b):
            return a * b

    elif isinstance(nast.op, gast.FloorDiv):
        optype = "Div"
        isfloor = True

        def opfun(a, b):
            return a // b

    elif isinstance(nast.op, gast.Div):
        optype = "Div"

        def opfun(a, b):
            return a / b

    else:
        raise Exception('unknown operator', nast.op)

    # code.InteractiveConsole({'lv': lv, 'rv': rv}).interact()

    # TODO(hamaji): Reconsider if constant folding is necessary in CH2O.
    #if not istensor(lv) and not istensor(rv):
    #    # 定数畳み込みを行う
    #    return opfun(lv, rv)

    lv.to_value_info(env)
    rv.to_value_info(env)
    if lv.is_sequence() and rv.is_sequence():
        assert optype == 'Add'
        lv = lv.to_sequence(env)
        rv = rv.to_sequence(env)

        state = new_sequence(name='seq_plus_state')
        cond = new_tensor(name='seq_plus_cond')
        index = new_tensor(name='seq_plus_index')
        elem = new_tensor(name='seq_plus_elem')
        out_state = new_tensor(name='seq_plus_out_state')
        nodes = []
        nodes.append(
            helper.make_node('ChainerSequenceLookup',
                             inputs=[rv.name, index.name],
                             outputs=[elem.name]))
        nodes.append(
            helper.make_node('ChainerSequenceAppend',
                             inputs=[state.name, elem.name],
                             outputs=[out_state.name]))
        loop = make_graph(
            nodes,
            "SeqPlus",
            [index, cond, state],
            [cond, out_state],
        )

        length = env.calc('ChainerGenericLen', inputs=[rv.name])
        res = new_sequence(name='seq_plus')
        env.addnode('Loop',
                    inputs=[length.name, "", lv.name],
                    outputs=[res.name],
                    body=loop)
    else:
        if optype == 'Div' and not isfloor:
            lv = castto(lv.to_tensor(env), TensorProto.FLOAT, env)
            rv = castto(rv.to_tensor(env), TensorProto.FLOAT, env)
        else:
            lv = lv.to_tensor(env)
            rv = rv.to_tensor(env)
        res = env.calc(
            optype,
            inputs=[lv.name, rv.name],
        )

    if isfloor:
        res = env.calc(
            "Floor",
            inputs=[res.name],
        )

    return res
示例#10
0
def eval_for(nast, env):
    assert nast.orelse == []
    ite = eval_ast(nast.iter, env)

    # A hack for ResNet50.
    # TODO(hamaji): Come up with a sophisticated way.
    # TODO(hamaji): This code doesn't handle scope properly, I think.
    if (isinstance(ite.value, types.GeneratorType)
            and 'ChainList.children' in str(ite.value)):
        # とりあえず実際にfor文を回す
        tg = nast.target.id
        env.set_var(tg, Value(None))
        for v in ite.value:
            env.set_var(tg, _value(v))
            eval_ast(nast.body, env)
            # print('looping',env.vars.keys())

        env.pop_var(tg)
        return None

    if ite.is_py:
        ite = Value([Value(v) for v in ite.value])

    assert isinstance(nast.target, gast.Name)
    x = nast.target.id

    # 新たなenv を作って、評価中にできた子グラフをもとにする
    localenv = env.new_block()

    cnt = new_tensor()
    gtx = new_sequence()
    localenv.set_var(
        x,
        _value(
            localenv.calc(
                "ChainerSequenceLookup",
                inputs=[gtx.name, cnt.name],
            )))
    ty = eval_ast(nast.body, localenv)
    assert ty.is_none()

    in_out = _find_in_out(localenv, env)

    input_values = []
    output_values = []
    final_outputs = []
    final_setattrs = []
    for key, (iv, ov, setattr_info) in in_out.items():
        if ov is None:
            continue
        if iv is None:
            iv = Value(False)
        out = ov.copy(env, name=key)
        final_outputs.append((key, out.value))
        if setattr_info is not None:
            final_setattrs.append(tuple(list(setattr_info) + [out]))
        input_values.append(iv.to_value_info(env))
        output_values.append(ov.to_value_info(env))

    cond = new_tensor(name='loop_cond')
    localgraph = make_graph(localenv.nodes, "Loop_subgraph",
                            [cnt, cond, gtx] + input_values,
                            [cond, gtx] + output_values)

    mtc = env.calc(
        "ChainerGenericLen",
        inputs=[ite.to_sequence(env).name],
    )

    env.addnode('Loop',
                inputs=([mtc.name, "", ite.to_sequence(env).name] +
                        [i.name for i in input_values]),
                outputs=([new_tensor('out_generator').name] +
                         [o.name for _, o in final_outputs]),
                body=localgraph)

    for k, o in final_outputs:
        if '.' not in k and '/' not in k:
            env.set_var(k, _value(o))

    for var, key, value in final_setattrs:
        setattr(var.value, key, value)

    return None
示例#11
0
def compile_model(model, inputs):
    # return helper.make_graph([],'dummy',[],[])

    init_id2name(model)
    # code.InteractiveConsole({'mo': model}).interact()
    env = Env(sys.modules[model.__module__])
    molk = User_Defined_Link(model, env)

    input_tensors = []
    for i in inputs:
        # TODO(hamaji): Set valid type info.
        if isinstance(i, (list, tuple)):
            x = new_sequence()
        elif i is None:
            x = new_tensor()
        else:
            if isinstance(i, int):
                i = np.array(i)
            else:
                # TODO(durswd): This code requires chainer6.x
                i = chainer.cuda.to_cpu(i)

            x = new_tensor(dims=i.shape, dtype=i.dtype)
        input_tensors.append(x)

    input_values = [Value(i) for i in input_tensors]
    v = molk.call(input_values, [], env)

    dprint('output_tensors', v)
    if isinstance(v.value, tuple):
        output_tensors = list(v.value)  # ばらしてみる
    else:
        output_tensors = [v]  # とりあえず1tensor

    # print('env.init_tensors ',env.init_tensors)
    input_tensors += list(env.init_tensors.values())

    for f in env.restore_funcs:
        f()

    # for no in env.nodes:
    #   print(no.op_type)
    # print(env.nodes)
    # print(input_tensors)
    # print(output_tensors)
    # for ch in model.namedparams():
    #    print(ch)

    outputs_vi = [o.to_value_info(env) for o in output_tensors]
    graph = make_graph(env.nodes, 'name_is_unknown_now', input_tensors,
                       outputs_vi)

    # inputのうち、重みであるものにはinitializerをつける
    # batch_sizeやinput_sizeなどの可変なものはできる限りのそのままで

    # Chainer compiler 独自のノードを使うとcheckできなくなる...
    # checker.check_graph(graph)
    mo = helper.make_model(graph)

    # print(mo)
    return mo
示例#12
0
 def dummy():
     return "dummy_" + new_tensor().name
示例#13
0
    def call_impl(self, env, hx, cx, xs):
        assert hx.value is None  # TODO(hamaji): Not implemented yet.
        assert cx.value is None  # TODO(hamaji): Not implemented yet.
        xs = xs.to_sequence(env)

        # とりあえずnstep を 1step ずつに分解する
        ilens = env.calc(
            "ChainerSequenceLengths",
            inputs=[xs.name],
        )

        tilens = env.calc("ConcatFromSequence",
                          inputs=[ilens.name],
                          axis=0,
                          new_axis=True)

        v = xs

        def lstm_param(ps):
            p = env.calc("Concat", inputs=[v.name for v in ps], axis=0)
            return env.calc("Unsqueeze", inputs=[p.name], axes=[0])

        wst = []
        rst = []
        bst = []
        for w in self.ws:
            wst.append(lstm_param([w[0], w[3], w[1], w[2]]))
            rst.append(lstm_param([w[4], w[7], w[5], w[6]]))
        for b in self.bs:
            bst.append(
                lstm_param([b[0], b[3], b[1], b[2], b[4], b[7], b[5], b[6]]))

        ws = []
        rs = []
        bs = []
        for i in range(self.n_layers):
            for s, t in [(ws, wst), (rs, rst), (bs, bst)]:
                s.append(
                    env.calc("Concat",
                             inputs=[t[i * 2].name, t[i * 2 + 1].name],
                             axis=0))

        hs = []
        cs = []
        v = Value(v).to_sequence(env)
        v = env.calc(
            "ChainerSequencePad",
            inputs=[v.name],
        )
        v = env.calc("Transpose", perm=(1, 0, 2), inputs=[v.name])

        sequence_length = env.calc("ChainerGenericLen", inputs=[v.name])
        out_shape = Value(
            [Value(sequence_length),
             Value(-1),
             Value(self.out_size * 2)]).to_tensor(env)

        for i in range(self.n_layers):
            h = new_tensor()
            c = new_tensor()
            ys = new_tensor()

            env.addnode(
                "LSTM",
                inputs=[
                    v.name, ws[i].name, rs[i].name, bs[i].name, tilens.name
                ],
                outputs=[ys.name, h.name, c.name],
                direction='bidirectional',
                hidden_size=self.out_size,
            )

            hs.append(h.name)
            cs.append(c.name)

            # ys :: [seqlen x 2 x batchsize x hiddensize]
            v = env.calc("Transpose", perm=(0, 2, 1, 3), inputs=[ys.name])
            v = env.calc("Reshape", inputs=[v.name, out_shape.name])

        v = env.calc("Transpose", perm=(1, 0, 2), inputs=[v.name])
        v = env.calc_seq("ChainerSequenceUnpad", inputs=[v.name, ilens.name])

        ths = env.calc(
            "Concat",
            inputs=hs,
            axis=0,
        )
        tcs = env.calc(
            "Concat",
            inputs=cs,
            axis=0,
        )

        tys = v
        return ths, tcs, tys
示例#14
0
    def call_impl(self, env, hx, cx, xs):
        assert hx.value is None  # TODO(hamaji): Not implemented yet.
        assert cx.value is None  # TODO(hamaji): Not implemented yet.
        xs = xs.to_sequence(env)

        # とりあえずnstep を 1step ずつに分解する
        ilens = env.calc(
            "ChainerSequenceLengths",
            inputs=[xs.name],
        )

        tilens = env.calc("ConcatFromSequence",
                          inputs=[ilens.name],
                          axis=0,
                          new_axis=True)

        v = env.calc(
            "ChainerSequencePad",
            inputs=[xs.name],
        )
        v = env.calc(
            "Transpose",
            perm=(1, 0, 2),
            inputs=[v.name],
        )

        def lstm_param(ps):
            p = env.calc("Concat", inputs=[v.name for v in ps], axis=0)
            return env.calc("Unsqueeze", inputs=[p.name], axes=[0])

        ws = []
        rs = []
        bs = []
        for w in self.ws:
            ws.append(lstm_param([w[0], w[3], w[1], w[2]]))
            rs.append(lstm_param([w[4], w[7], w[5], w[6]]))
        for b in self.bs:
            bs.append(
                lstm_param([b[0], b[3], b[1], b[2], b[4], b[7], b[5], b[6]]))

        hs = []
        cs = []
        for i in range(self.n_layers):
            h = new_tensor()
            c = new_tensor()
            ys = new_tensor()

            env.addnode(
                "LSTM",
                inputs=[
                    v.name, ws[i].name, rs[i].name, bs[i].name, tilens.name
                ],
                outputs=[ys.name, h.name, c.name],
                direction='forward',
                hidden_size=self.out_size,
                # sequence_lens=[ilens.name]
            )

            hs.append(h.name)
            cs.append(c.name)
            yys = env.calc("Squeeze", inputs=[ys.name], axes=[1])
            v = yys
        # print(hs)
        # print(cs)
        ths = env.calc(
            "Concat",
            inputs=hs,
            axis=0,
        )
        tcs = env.calc(
            "Concat",
            inputs=cs,
            axis=0,
        )

        tv = env.calc(
            "Transpose",
            perm=(1, 0, 2),
            inputs=[v.name],
        )
        v = tv

        tys = env.calc(
            "ChainerSequenceUnpad",
            inputs=[v.name, ilens.name],
        )
        return ths, tcs, tys
示例#15
0
 def calc(self, *args, npdtype=None, **kwargs):
     res = new_tensor(dtype=npdtype)
     assert 'outputs' not in kwargs.keys()
     kwargs['outputs'] = [res.name]
     self.addnode(*args, **kwargs)
     return res