예제 #1
0
if __name__ == "__main__":
    Log.AppLogger.configure(mkdir=True)

    App.add_command(
        'decode', decode, {
            'gpu':
            arg('--gpu',
                '-g',
                type=int,
                default=-1,
                help='GPU ID (negative value indicates CPU)'),
            'model_file':
            arg('--modelfile',
                type=str,
                required=True,
                help='Trained model archive file'),
            'save_to':
            arg('--out',
                type=str,
                default=None,
                help='Save results to the specified directory'),
            'target_file':
            arg('--targetfile',
                type=str,
                required=True,
                help='Decoding target data file'),
        })

    App.run()
예제 #2
0
파일: mnist.py 프로젝트: greyblue9/teras
        model,
        loss_func=chainer.functions.softmax_cross_entropy,
        accuracy_func=chainer.functions.accuracy)
    trainer.configure(
        hooks={
            training.EPOCH_TRAIN_BEGIN: lambda _: set_chainer_train(True),
            training.EPOCH_VALIDATE_BEGIN: lambda _: set_chainer_train(False)
        },
        converter=lambda x: chainer.dataset.convert.to_device(device, x))
    trainer.add_listener(
        training.listeners.ProgressBar(lambda n: tqdm(total=n)), priority=200)
    trainer.fit((train_x, train_y), (test_x, test_y), n_epoch, batch_size)


if __name__ == "__main__":
    App.configure(name='chainer-mnist', logoption='d')
    App.add_command(
        'train',
        train, {
            'batch_size':
            arg('--batchsize',
                '-b',
                type=int,
                default=100,
                help='Number of images in each mini-batch'),
            'device':
            arg('--device',
                type=int,
                default=-1,
                metavar='ID',
                help='Device ID (negative value indicates CPU)'),
예제 #3
0
        pbar.update(len(ts))
    pbar.close()
    evaluator.report()
    logging.info("Number of correct tree: {}/{}".format(
        n_corrects, len(test_dataset)))


if __name__ == "__main__":
    # App.configure(logdir=App.basedir + '/../logs')
    if '--savedir' in sys.argv:
        savedir_index = sys.argv.index('--savedir') + 1
        savedir = sys.argv[savedir_index]
    if '--modelfile' in sys.argv:
        savedir_index = sys.argv.index('--modelfile') + 1
        savedir = os.path.dirname(sys.argv[savedir_index])
    App.configure(logdir=App.basedir + '/' + savedir)
    logging.AppLogger.configure(mkdir=True)

    App.add_command(
        'train', train, {
            'batch_size':
            arg('--batchsize',
                type=int,
                default=20,
                metavar='NUM',
                help='Number of examples in each mini-batch'),
            'cache_dir':
            arg('--cachedir',
                type=str,
                default=(App.basedir + '/../cache'),
                metavar='DIR',
예제 #4
0
 App.add_command(
     'train', train, {
         'backend':
         arg('--backend',
             type=str,
             choices=('chainer', 'pytorch'),
             default='chainer',
             help='Backend framework for computation'),
         'batch_size':
         arg('--batchsize',
             '-b',
             type=int,
             default=32,
             help='Number of examples in each mini-batch'),
         'embed_file':
         arg('--embedfile',
             type=str,
             default=None,
             help='Pretrained word embedding file'),
         'embed_size':
         arg('--embedsize',
             type=int,
             default=100,
             help='Size of embeddings'),
         'gpu':
         arg('--gpu',
             '-g',
             type=int,
             default=-1,
             help='GPU ID (negative value indicates CPU)'),
         'lr':
         arg('--lr', type=float, default=0.002, help='Learning Rate'),
         'model_params':
         arg('--model',
             action='store_dict',
             default={},
             help='Model hyperparameter'),
         'n_epoch':
         arg('--epoch',
             '-e',
             type=int,
             default=20,
             help='Number of sweeps over the dataset to train'),
         'seed':
         arg('--seed', type=int, default=None, help='Random seed'),
         'save_to':
         arg('--out',
             type=str,
             default=None,
             help='Save model to the specified directory'),
         'test_file':
         arg('--validfile',
             type=str,
             default=None,
             help='validation data file'),
         'train_file':
         arg('--trainfile',
             type=str,
             required=True,
             help='training data file'),
     })
예제 #5
0
    trainer = training.Trainer(optimizer,
                               model,
                               loss_func=compute_loss,
                               accuracy_func=compute_accuracy)
    trainer.configure(framework_utils.config)

    trainer.fit(train_dataset,
                None,
                batch_size=batch_size,
                epochs=n_epoch,
                validation_data=test_dataset,
                verbose=App.verbose)


if __name__ == "__main__":
    logging.AppLogger.configure(mkdir=True)
    App.add_command(
        'train', train, {
            'train_file': arg('--trainfile', type=str, required=True),
            'test_file': arg('--testfile', type=str),
            'word_embed_file': arg('--embedfile', type=str),
            'n_epoch': arg('--epoch', type=int, default=20),
            'batch_size': arg('--batchsize', type=int, default=10),
            'lr': arg('--lr', type=float, default=0.01),
            'gpu': arg('--gpu', type=int, default=-1),
            'seed': arg('--seed', type=int, default=1),
        })
    chainer.config.debug = False
    chainer.config.type_check = False
    App.run()
예제 #6
0
                                  if np.std(W) > 0. else W),
            loader.get_embeddings('pos'),
            n_lstm_layers=kwargs.get('n_lstm_layers', 3),
            lstm_hidden_size=kwargs.get('lstm_hidden_size', 400),
            embeddings_dropout=kwargs.get('input_dropout', dropout_ratio),
            lstm_dropout=kwargs.get('lstm_dropout', dropout_ratio),
            recurrent_dropout=kwargs.get('recurrent_dropout', dropout_ratio)),
        arc_mlp_units=kwargs.get('arc_mlp_units', 500),
        rel_mlp_units=kwargs.get('rel_mlp_units', 100),
        arc_mlp_dropout=kwargs.get('arc_mlp_dropout', dropout_ratio),
        rel_mlp_dropout=kwargs.get('rel_mlp_dropout', dropout_ratio))
    return parser


if __name__ == "__main__":
    App.configure(logdir=App.basedir + '/../logs', loglevel='debug')
    logging.AppLogger.configure(mkdir=True)
    App.add_command(
        'train', train, {
            'batch_size':
            arg('--batchsize',
                type=int,
                default=5000,
                metavar='NUM',
                help='Number of tokens in each mini-batch'),
            'cache_dir':
            arg('--cachedir',
                type=str,
                default=(App.basedir + '/../cache'),
                metavar='DIR',
                help='Cache directory'),
예제 #7
0
 App.add_command('train', train, {
     'batch_size':
     arg('--batchsize', '-b', type=int, default=32,
         help='Number of examples in each mini-batch'),
     'embed_file':
     arg('--embedfile', type=str, default=None,
         help='Pretrained word embedding file'),
     'embed_size':
     arg('--embedsize', type=int, default=100,
         help='Size of embeddings'),
     'gpu':
     arg('--gpu', '-g', type=int, default=-1,
         help='GPU ID (negative value indicates CPU)'),
     'grad_clip':
     arg('--gradclip', type=float, default=5.0,
         help='L2 norm threshold of gradient norm'),
     'l2_lambda':
     arg('--l2', type=float, default=0.0,
         help='Strength of L2 regularization'),
     'lr':
     arg('--lr', type=float, default=0.001,
         help='Learning Rate'),
     'n_epoch':
     arg('--epoch', '-e', type=int, default=20,
         help='Number of sweeps over the dataset to train'),
     'seed':
     arg('--seed', type=int, default=1,
         help='Random seed'),
     'save_to':
     arg('--out', type=str, default=None,
         help='Save model to the specified directory'),
     'tasks':
     arg('--task', type=str, default='tp',
         help='Tasks to train: {t: tagging, p: parsing}'),
     'test_file':
     arg('--validfile', type=str, default=None,
         help='validation data file'),
     'train_file':
     arg('--trainfile', type=str, required=True,
         help='training data file'),
 })