xs, ts = batch[:-1], batch[-1] evaluator.on_batch_begin({'train': False, 'xs': xs, 'ts': ts}) model(*xs) evaluator.on_batch_end({'train': False, 'xs': xs, 'ts': ts}) evaluator.on_epoch_validate_end({'epoch': 0}) if __name__ == "__main__": Log.AppLogger.configure(mkdir=True) App.add_command( 'decode', decode, { 'gpu': arg('--gpu', '-g', type=int, default=-1, help='GPU ID (negative value indicates CPU)'), 'model_file': arg('--modelfile', type=str, required=True, help='Trained model archive file'), 'save_to': arg('--out', type=str, default=None, help='Save results to the specified directory'), 'target_file': arg('--targetfile', type=str,
}, converter=lambda x: chainer.dataset.convert.to_device(device, x)) trainer.add_listener( training.listeners.ProgressBar(lambda n: tqdm(total=n)), priority=200) trainer.fit((train_x, train_y), (test_x, test_y), n_epoch, batch_size) if __name__ == "__main__": App.configure(name='chainer-mnist', logoption='d') App.add_command( 'train', train, { 'batch_size': arg('--batchsize', '-b', type=int, default=100, help='Number of images in each mini-batch'), 'device': arg('--device', type=int, default=-1, metavar='ID', help='Device ID (negative value indicates CPU)'), 'n_epoch': arg('--epoch', '-e', type=int, default=20, help='Number of sweeps over the dataset to train'), 'n_units':
# App.configure(logdir=App.basedir + '/../logs') if '--savedir' in sys.argv: savedir_index = sys.argv.index('--savedir') + 1 savedir = sys.argv[savedir_index] if '--modelfile' in sys.argv: savedir_index = sys.argv.index('--modelfile') + 1 savedir = os.path.dirname(sys.argv[savedir_index]) App.configure(logdir=App.basedir + '/' + savedir) logging.AppLogger.configure(mkdir=True) App.add_command( 'train', train, { 'batch_size': arg('--batchsize', type=int, default=20, metavar='NUM', help='Number of examples in each mini-batch'), 'cache_dir': arg('--cachedir', type=str, default=(App.basedir + '/../cache'), metavar='DIR', help='Cache directory'), 'test_file': arg('--devfile', type=str, default=None, metavar='FILE', help='Development data file'), 'device':
])) print() UAS, LAS, count = UAS + _uas, LAS + _las, count + _count Log.i("[evaluation] UAS: {:.8f}, LAS: {:.8f}".format( UAS / count * 100, LAS / count * 100)) if __name__ == "__main__": Log.AppLogger.configure(mkdir=True) App.add_command( 'train', train, { 'backend': arg('--backend', type=str, choices=('chainer', 'pytorch'), default='chainer', help='Backend framework for computation'), 'batch_size': arg('--batchsize', '-b', type=int, default=32, help='Number of examples in each mini-batch'), 'embed_file': arg('--embedfile', type=str, default=None, help='Pretrained word embedding file'), 'embed_size': arg('--embedsize',
trainer = training.Trainer(optimizer, model, loss_func=compute_loss, accuracy_func=compute_accuracy) trainer.configure(framework_utils.config) trainer.fit(train_dataset, None, batch_size=batch_size, epochs=n_epoch, validation_data=test_dataset, verbose=App.verbose) if __name__ == "__main__": logging.AppLogger.configure(mkdir=True) App.add_command( 'train', train, { 'train_file': arg('--trainfile', type=str, required=True), 'test_file': arg('--testfile', type=str), 'word_embed_file': arg('--embedfile', type=str), 'n_epoch': arg('--epoch', type=int, default=20), 'batch_size': arg('--batchsize', type=int, default=10), 'lr': arg('--lr', type=float, default=0.01), 'gpu': arg('--gpu', type=int, default=-1), 'seed': arg('--seed', type=int, default=1), }) chainer.config.debug = False chainer.config.type_check = False App.run()
arc_mlp_units=kwargs.get('arc_mlp_units', 500), rel_mlp_units=kwargs.get('rel_mlp_units', 100), arc_mlp_dropout=kwargs.get('arc_mlp_dropout', dropout_ratio), rel_mlp_dropout=kwargs.get('rel_mlp_dropout', dropout_ratio)) return parser if __name__ == "__main__": App.configure(logdir=App.basedir + '/../logs', loglevel='debug') logging.AppLogger.configure(mkdir=True) App.add_command( 'train', train, { 'batch_size': arg('--batchsize', type=int, default=5000, metavar='NUM', help='Number of tokens in each mini-batch'), 'cache_dir': arg('--cachedir', type=str, default=(App.basedir + '/../cache'), metavar='DIR', help='Cache directory'), 'test_file': arg('--devfile', type=str, default=None, metavar='FILE', help='Development data file'), 'device':
loader=loader))) # Start training trainer.fit(train_dataset, None, batch_size=batch_size, epochs=n_epoch, validation_data=test_dataset, verbose=App.verbose) if __name__ == "__main__": Log.AppLogger.configure(mkdir=True) App.add_command('train', train, { 'batch_size': arg('--batchsize', '-b', type=int, default=32, help='Number of examples in each mini-batch'), 'embed_file': arg('--embedfile', type=str, default=None, help='Pretrained word embedding file'), 'embed_size': arg('--embedsize', type=int, default=100, help='Size of embeddings'), 'gpu': arg('--gpu', '-g', type=int, default=-1, help='GPU ID (negative value indicates CPU)'), 'grad_clip': arg('--gradclip', type=float, default=5.0, help='L2 norm threshold of gradient norm'), 'l2_lambda': arg('--l2', type=float, default=0.0, help='Strength of L2 regularization'),