Esempio n. 1
0
def eval(name):
    xp = cuda.cupy
    cuda.get_device(0).use()
    vocab = Vocabulary.load_from_file("model.vocab")
    m = LetterClassifyer(3000, 200, 1000, 2)
    chainer.serializers.load_hdf5("model.hdf5", m)
    m.to_gpu()
    x_batch = [letter_list_text(name)]
    output = forward(x_batch, None, m, False, vocab, xp)
    return np.argmax(output.data)
Esempio n. 2
0
def eval(args):
    if args.use_gpu:
        xp = cuda.cupy
        cuda.get_device(0).use()
    else:
        xp = np
    vocab = Vocabulary.load_from_file("%s.vocab" % args.model)
    m = LetterClassifyer(args.vocab, args.embed, args.hidden, args.classes)
    chainer.serializers.load_hdf5("%s.hdf5" % args.model, m)
    if args.use_gpu:
        m.to_gpu()
    x_batch = [letter_list_text(args.file)]
    output = forward(x_batch, None, m, False, vocab, xp)
    print(output.data)
    print("hyp: %d" % np.argmax(output.data)) # label
Esempio n. 3
0
def train(args):
    if args.use_gpu:
        xp = cuda.cupy
        cuda.get_device(0).use()
    else:
        xp = np
    vocab = Vocabulary(args.file)
    m = LetterClassifyer(args.vocab, args.embed, args.hidden)
    m.zerograds()
    if args.use_gpu:
        m.to_gpu()
    time_t = 10
    for e in range(args.epoch):
        opt = chainer.optimizers.Adam(alpha=0.001)
        opt.setup(m)
        opt.add_hook(chainer.optimizer.GradientClipping(5.0))
        print("epoch: %d" % e)
        i =0
        total_acc = 0
        e_acc = 0.0
        for x_batch, y in letter_list(args.file):
            x_batch = [x_batch]
            output, acc, loss = forward(x_batch, y, m, True, vocab, xp)
            total_acc += acc
            e_acc += acc
            if i % time_t == 0:
                if i != 0:
                    total_acc /= time_t
                print("time: %d, accuracy %f loss %f" % (i, total_acc.data, loss.data))
                total_acc = 0
                # print("".join(x_batch[0]))
                # print(",".join([str(vocab.stoi(x)) for x in x_batch[0]]))
            loss.backward()
            if args.unchain:
                loss.unchain_backward()
            opt.update()
            i += 1
            sys.stdout.flush()
        chainer.serializers.save_hdf5(args.model + ".hdf5", m)
        vocab.save_vocab(args.model + ".vocab")
        e_acc /= i
        print("total acc: %f" %  e_acc.data)
Esempio n. 4
0
def __eval(args):
    xp = np
    vocab = Vocabulary.load_from_file("%s.vocab" % args.model)
    m = LetterClassifyer(args.vocab, args.embed, args.hidden, args.classes)
    chainer.serializers.load_hdf5("%s.hdf5" % args.model, m)
    x_batch = [letter_list_text(args.file)]
    output = forward(x_batch, None, m, False, vocab, xp)
    print(args)
    print(output.data)
    print("hyp: %d" % np.argmax(output.data))  # output.dataの最大値を返す
    return {"result": np.argmax(output.data), "data": output.data}
Esempio n. 5
0
def evaluate(args):
    xp = np
    vocab = Vocabulary.load_from_file("%s.vocab" % args['model'])
    m = LetterClassifyer(args['vocab'], args['embed'], args['hidden'],
                         args['classes'])
    chainer.serializers.load_hdf5("%s.hdf5" % args['model'], m)
    x_batch = [letter_list_text(args['file'])]
    print('m: ', m)
    print('x_batch: ', x_batch)
    output = forward(x_batch, None, m, False, vocab, xp)
    print(args)
    print('output: ', output)
    print(output.data)
    print("hyp: %d" % np.argmax(output.data))  # output.dataの最大値を返す
    return {
        "result": int(np.argmax(output.data)),
        "data": output.data.tolist()
    }
Esempio n. 6
0
def train(args):
    if args.use_gpu:
        xp = cuda.cupy
        cuda.get_device(0).use()
    else:
        xp = np
    vocab = Vocabulary(args.file)
    m = LetterClassifyer(args.vocab, args.embed, args.hidden, args.classes)
    m.cleargrads()
    if args.use_gpu:
        m.to_gpu()
    time_t = 10
    for e in range(args.epoch):
        opt = chainer.optimizers.Adam(alpha=0.001)
        opt.setup(m)
        opt.add_hook(chainer.optimizer.GradientClipping(5.0))
        print("epoch: %d" % e)
        i = 0
        total_acc = 0
        e_acc = 0.0
        for x_batch, y in letter_list(args.file):
            x_batch = [x_batch]
            output, acc, loss = forward(x_batch, y, m, True, vocab, xp)
            total_acc += acc
            e_acc += acc
            if i % time_t == 0:
                if i != 0:
                    total_acc /= time_t
                print("time: %d, accuracy %f loss %f" %
                      (i, total_acc.data, loss.data))
                total_acc = 0
                # print("".join(x_batch[0]))
                # print(",".join([str(vocab.stoi(x)) for x in x_batch[0]]))
            loss.backward()
            if args.unchain:
                loss.unchain_backward()
            opt.update()
            i += 1
            sys.stdout.flush()
        chainer.serializers.save_hdf5(args.model + ".hdf5", m)
        vocab.save_vocab(args.model + ".vocab")
        e_acc /= i
        print("total acc: %f" % e_acc.data)