def eval(name): xp = cuda.cupy cuda.get_device(0).use() vocab = Vocabulary.load_from_file("model.vocab") m = LetterClassifyer(3000, 200, 1000, 2) chainer.serializers.load_hdf5("model.hdf5", m) m.to_gpu() x_batch = [letter_list_text(name)] output = forward(x_batch, None, m, False, vocab, xp) return np.argmax(output.data)
def eval(args): if args.use_gpu: xp = cuda.cupy cuda.get_device(0).use() else: xp = np vocab = Vocabulary.load_from_file("%s.vocab" % args.model) m = LetterClassifyer(args.vocab, args.embed, args.hidden, args.classes) chainer.serializers.load_hdf5("%s.hdf5" % args.model, m) if args.use_gpu: m.to_gpu() x_batch = [letter_list_text(args.file)] output = forward(x_batch, None, m, False, vocab, xp) print(output.data) print("hyp: %d" % np.argmax(output.data)) # label
def train(args): if args.use_gpu: xp = cuda.cupy cuda.get_device(0).use() else: xp = np vocab = Vocabulary(args.file) m = LetterClassifyer(args.vocab, args.embed, args.hidden) m.zerograds() if args.use_gpu: m.to_gpu() time_t = 10 for e in range(args.epoch): opt = chainer.optimizers.Adam(alpha=0.001) opt.setup(m) opt.add_hook(chainer.optimizer.GradientClipping(5.0)) print("epoch: %d" % e) i =0 total_acc = 0 e_acc = 0.0 for x_batch, y in letter_list(args.file): x_batch = [x_batch] output, acc, loss = forward(x_batch, y, m, True, vocab, xp) total_acc += acc e_acc += acc if i % time_t == 0: if i != 0: total_acc /= time_t print("time: %d, accuracy %f loss %f" % (i, total_acc.data, loss.data)) total_acc = 0 # print("".join(x_batch[0])) # print(",".join([str(vocab.stoi(x)) for x in x_batch[0]])) loss.backward() if args.unchain: loss.unchain_backward() opt.update() i += 1 sys.stdout.flush() chainer.serializers.save_hdf5(args.model + ".hdf5", m) vocab.save_vocab(args.model + ".vocab") e_acc /= i print("total acc: %f" % e_acc.data)
def __eval(args): xp = np vocab = Vocabulary.load_from_file("%s.vocab" % args.model) m = LetterClassifyer(args.vocab, args.embed, args.hidden, args.classes) chainer.serializers.load_hdf5("%s.hdf5" % args.model, m) x_batch = [letter_list_text(args.file)] output = forward(x_batch, None, m, False, vocab, xp) print(args) print(output.data) print("hyp: %d" % np.argmax(output.data)) # output.dataの最大値を返す return {"result": np.argmax(output.data), "data": output.data}
def evaluate(args): xp = np vocab = Vocabulary.load_from_file("%s.vocab" % args['model']) m = LetterClassifyer(args['vocab'], args['embed'], args['hidden'], args['classes']) chainer.serializers.load_hdf5("%s.hdf5" % args['model'], m) x_batch = [letter_list_text(args['file'])] print('m: ', m) print('x_batch: ', x_batch) output = forward(x_batch, None, m, False, vocab, xp) print(args) print('output: ', output) print(output.data) print("hyp: %d" % np.argmax(output.data)) # output.dataの最大値を返す return { "result": int(np.argmax(output.data)), "data": output.data.tolist() }
def train(args): if args.use_gpu: xp = cuda.cupy cuda.get_device(0).use() else: xp = np vocab = Vocabulary(args.file) m = LetterClassifyer(args.vocab, args.embed, args.hidden, args.classes) m.cleargrads() if args.use_gpu: m.to_gpu() time_t = 10 for e in range(args.epoch): opt = chainer.optimizers.Adam(alpha=0.001) opt.setup(m) opt.add_hook(chainer.optimizer.GradientClipping(5.0)) print("epoch: %d" % e) i = 0 total_acc = 0 e_acc = 0.0 for x_batch, y in letter_list(args.file): x_batch = [x_batch] output, acc, loss = forward(x_batch, y, m, True, vocab, xp) total_acc += acc e_acc += acc if i % time_t == 0: if i != 0: total_acc /= time_t print("time: %d, accuracy %f loss %f" % (i, total_acc.data, loss.data)) total_acc = 0 # print("".join(x_batch[0])) # print(",".join([str(vocab.stoi(x)) for x in x_batch[0]])) loss.backward() if args.unchain: loss.unchain_backward() opt.update() i += 1 sys.stdout.flush() chainer.serializers.save_hdf5(args.model + ".hdf5", m) vocab.save_vocab(args.model + ".vocab") e_acc /= i print("total acc: %f" % e_acc.data)