Exemple #1
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--gpu',
                        '-g',
                        default=-1,
                        type=int,
                        help='GPU ID (negative value indicates CPU)')
    parser.add_argument('--epoch',
                        '-e',
                        default=400,
                        type=int,
                        help='number of epochs to learn')
    parser.add_argument('--unit',
                        '-u',
                        default=30,
                        type=int,
                        help='number of units')
    parser.add_argument('--batchsize',
                        '-b',
                        type=int,
                        default=25,
                        help='learning minibatch size')
    parser.add_argument('--label',
                        '-l',
                        type=int,
                        default=5,
                        help='number of labels')
    parser.add_argument('--epocheval',
                        '-p',
                        type=int,
                        default=5,
                        help='number of epochs per evaluation')
    parser.add_argument('--test', dest='test', action='store_true')
    parser.set_defaults(test=False)
    args = parser.parse_args()

    vocab = {}
    max_size = None
    train_trees = data.read_corpus('trees/train.txt', max_size)
    test_trees = data.read_corpus('trees/test.txt', max_size)

    if args.gpu >= 0:
        chainer.cuda.get_device(args.gpu).use()
        xp = cuda.cupy
    else:
        xp = numpy

    train_data = [linearize_tree(vocab, t, xp) for t in train_trees]
    train_iter = chainer.iterators.SerialIterator(train_data, args.batchsize)
    test_data = [linearize_tree(vocab, t, xp) for t in test_trees]
    test_iter = chainer.iterators.SerialIterator(test_data,
                                                 args.batchsize,
                                                 repeat=False,
                                                 shuffle=False)

    model = ThinStackRecursiveNet(len(vocab), args.unit, args.label)

    if args.gpu >= 0:
        model.to_gpu()

    optimizer = chainer.optimizers.AdaGrad(0.1)
    optimizer.setup(model)

    updater = training.StandardUpdater(train_iter,
                                       optimizer,
                                       device=None,
                                       converter=convert)
    trainer = training.Trainer(updater, (args.epoch, 'epoch'))
    trainer.extend(extensions.Evaluator(test_iter,
                                        model,
                                        converter=convert,
                                        device=None),
                   trigger=(args.epocheval, 'epoch'))
    trainer.extend(extensions.LogReport())

    trainer.extend(
        extensions.MicroAverage('main/correct', 'main/total', 'main/accuracy'))
    trainer.extend(
        extensions.MicroAverage('validation/main/correct',
                                'validation/main/total',
                                'validation/main/accuracy'))

    trainer.extend(
        extensions.PrintReport([
            'epoch', 'main/loss', 'validation/main/loss', 'main/accuracy',
            'validation/main/accuracy', 'elapsed_time'
        ]))

    trainer.run()
Exemple #2
0
def train(args):
    """ Training model with chainer backend.
    This function is called from eend/bin/train.py with
    parsed command-line arguments.
    """
    np.random.seed(args.seed)
    os.environ['CHAINER_SEED'] = str(args.seed)
    chainer.global_config.cudnn_deterministic = True

    train_set = KaldiDiarizationDataset(
        args.train_data_dir,
        chunk_size=args.num_frames,
        context_size=args.context_size,
        input_transform=args.input_transform,
        frame_size=args.frame_size,
        frame_shift=args.frame_shift,
        subsampling=args.subsampling,
        rate=args.sampling_rate,
        use_last_samples=True,
        label_delay=args.label_delay,
        n_speakers=args.num_speakers,
        )
    dev_set = KaldiDiarizationDataset(
        args.valid_data_dir,
        chunk_size=args.num_frames,
        context_size=args.context_size,
        input_transform=args.input_transform,
        frame_size=args.frame_size,
        frame_shift=args.frame_shift,
        subsampling=args.subsampling,
        rate=args.sampling_rate,
        use_last_samples=True,
        label_delay=args.label_delay,
        n_speakers=args.num_speakers,
        )

    # Prepare model
    Y, T = train_set.get_example(0)

    if args.model_type == 'BLSTM':
        model = BLSTMDiarization(
                in_size=Y.shape[1],
                n_speakers=args.num_speakers,
                hidden_size=args.hidden_size,
                n_layers=args.num_lstm_layers,
                embedding_layers=args.embedding_layers,
                embedding_size=args.embedding_size,
                dc_loss_ratio=args.dc_loss_ratio,
                )
    elif args.model_type == 'Transformer':
        model = TransformerDiarization(
                args.num_speakers,
                Y.shape[1],
                n_units=args.hidden_size,
                n_heads=args.transformer_encoder_n_heads,
                n_layers=args.transformer_encoder_n_layers,
                dropout=args.transformer_encoder_dropout)
    else:
        raise ValueError('Possible model_type are "Transformer" and "BLSTM"')

    if args.gpu >= 0:
        gpuid = use_single_gpu()
        print('GPU device {} is used'.format(gpuid))
        model.to_gpu()
    else:
        gpuid = -1
    print('Prepared model')

    # Setup optimizer
    if args.optimizer == 'adam':
        optimizer = optimizers.Adam(alpha=args.lr)
    elif args.optimizer == 'sgd':
        optimizer = optimizers.SGD(lr=args.lr)
    elif args.optimizer == 'noam':
        optimizer = optimizers.Adam(alpha=0, beta1=0.9, beta2=0.98, eps=1e-9)
    else:
        raise ValueError(args.optimizer)

    optimizer.setup(model)
    if args.gradclip > 0:
        optimizer.add_hook(
            chainer.optimizer_hooks.GradientClipping(args.gradclip))

    # Init/Resume
    if args.initmodel:
        print('Load model from', args.initmodel)
        serializers.load_npz(args.initmodel, model)

    train_iter = iterators.MultiprocessIterator(
            train_set,
            batch_size=args.batchsize,
            repeat=True, shuffle=True,
            # shared_mem=64000000,
            shared_mem=None,
            n_processes=4, n_prefetch=2)

    dev_iter = iterators.MultiprocessIterator(
            dev_set,
            batch_size=args.batchsize,
            repeat=False, shuffle=False,
            # shared_mem=64000000,
            shared_mem=None,
            n_processes=4, n_prefetch=2)

    if args.gradient_accumulation_steps > 1:
        updater = GradientAccumulationUpdater(
            train_iter, optimizer, converter=_convert, device=gpuid)
    else:
        updater = training.StandardUpdater(
            train_iter, optimizer, converter=_convert, device=gpuid)

    trainer = training.Trainer(
            updater,
            (args.max_epochs, 'epoch'),
            out=os.path.join(args.model_save_dir))

    evaluator = extensions.Evaluator(
            dev_iter, model, converter=_convert, device=gpuid)
    trainer.extend(evaluator)

    if args.optimizer == 'noam':
        trainer.extend(
            NoamScheduler(args.hidden_size,
                          warmup_steps=args.noam_warmup_steps,
                          scale=args.noam_scale),
            trigger=(1, 'iteration'))

    if args.resume:
        chainer.serializers.load_npz(args.resume, trainer)

    # MICRO AVERAGE
    metrics = [
            ('diarization_error', 'speaker_scored', 'DER'),
            ('speech_miss', 'speech_scored', 'SAD_MR'),
            ('speech_falarm', 'speech_scored', 'SAD_FR'),
            ('speaker_miss', 'speaker_scored', 'MI'),
            ('speaker_falarm', 'speaker_scored', 'FA'),
            ('speaker_error', 'speaker_scored', 'CF'),
            ('correct', 'frames', 'accuracy')]
    for num, den, name in metrics:
        trainer.extend(extensions.MicroAverage(
            'main/{}'.format(num),
            'main/{}'.format(den),
            'main/{}'.format(name)))
        trainer.extend(extensions.MicroAverage(
            'validation/main/{}'.format(num),
            'validation/main/{}'.format(den),
            'validation/main/{}'.format(name)))

    trainer.extend(extensions.LogReport(log_name='log_iter',
                   trigger=(1000, 'iteration')))

    trainer.extend(extensions.LogReport())
    trainer.extend(extensions.PrintReport(
        ['epoch', 'main/loss', 'validation/main/loss',
         'main/diarization_error_rate',
         'validation/main/diarization_error_rate',
         'elapsed_time']))
    trainer.extend(extensions.PlotReport(
        ['main/loss', 'validation/main/loss'],
        x_key='epoch',
        file_name='loss.png'))
    trainer.extend(extensions.PlotReport(
        ['main/diarization_error_rate',
         'validation/main/diarization_error_rate'],
        x_key='epoch',
        file_name='DER.png'))
    trainer.extend(extensions.ProgressBar(update_interval=100))
    trainer.extend(extensions.snapshot(
        filename='snapshot_epoch-{.updater.epoch}'))

    trainer.extend(extensions.dump_graph('main/loss', out_name="cg.dot"))

    trainer.run()
    print('Finished!')
Exemple #3
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--gpu',
                        '-g',
                        default=-1,
                        type=int,
                        help='GPU ID (negative value indicates CPU)')
    parser.add_argument('--epoch',
                        '-e',
                        default=400,
                        type=int,
                        help='number of epochs to learn')
    parser.add_argument('--unit',
                        '-u',
                        default=30,
                        type=int,
                        help='number of units')
    parser.add_argument('--batchsize',
                        '-b',
                        type=int,
                        default=25,
                        help='learning minibatch size')
    parser.add_argument('--label',
                        '-l',
                        type=int,
                        default=5,
                        help='number of labels')
    parser.add_argument('--epocheval',
                        '-p',
                        type=int,
                        default=5,
                        help='number of epochs per evaluation')
    parser.add_argument('--test', dest='test', action='store_true')
    parser.add_argument('--root',
                        '-r',
                        type=str,
                        default="",
                        help='the root directory of the input dataset')
    parser.set_defaults(test=False)
    args = parser.parse_args()

    n_epoch = args.epoch  # number of epochs
    n_units = args.unit  # number of units per layer
    batchsize = args.batchsize  # minibatch size
    n_label = args.label  # number of labels
    epoch_per_eval = args.epocheval  # number of epochs per evaluation

    if args.test:
        max_size = 10
    else:
        max_size = None

    vocab = {}
    train_data = [
        convert_tree(vocab, tree) for tree in data.read_corpus(
            os.path.join(args.root + 'trees/train.txt'), max_size)
    ]
    train_iter = chainer.iterators.SerialIterator(train_data, batchsize)
    validation_data = [
        convert_tree(vocab, tree)
        for tree in data.read_corpus(os.path.join(args.root +
                                                  'trees/dev.txt'), max_size)
    ]
    validation_iter = chainer.iterators.SerialIterator(validation_data,
                                                       batchsize,
                                                       repeat=False,
                                                       shuffle=False)
    test_data = [
        convert_tree(vocab, tree) for tree in data.read_corpus(
            os.path.join(args.root + 'trees/test.txt'), max_size)
    ]

    model = RecursiveNet(len(vocab), n_units, n_label)

    if args.gpu >= 0:
        model.to_gpu()

    # Setup optimizer
    optimizer = optimizers.AdaGrad(lr=0.1)
    optimizer.setup(model)
    optimizer.add_hook(chainer.optimizer_hooks.WeightDecay(0.0001))

    def _convert(batch, _):
        return batch

    # Setup updater
    updater = chainer.training.StandardUpdater(train_iter,
                                               optimizer,
                                               device=args.gpu,
                                               converter=_convert)

    # Setup trainer and run
    trainer = chainer.training.Trainer(updater, (n_epoch, 'epoch'))
    trainer.extend(extensions.Evaluator(validation_iter,
                                        model,
                                        device=args.gpu,
                                        converter=_convert),
                   trigger=(epoch_per_eval, 'epoch'))
    trainer.extend(extensions.LogReport())

    trainer.extend(
        extensions.MicroAverage('main/correct', 'main/total', 'main/accuracy'))
    trainer.extend(
        extensions.MicroAverage('validation/main/correct',
                                'validation/main/total',
                                'validation/main/accuracy'))

    trainer.extend(
        extensions.PrintReport([
            'epoch', 'main/loss', 'validation/main/loss', 'main/accuracy',
            'validation/main/accuracy', 'elapsed_time'
        ]))
    trainer.run()

    print('Test evaluation')
    evaluate(model, test_data)
Exemple #4
0
def main():
    parser = argparse.ArgumentParser(
        description='Chainer example: POS-tagging')
    parser.add_argument('--batchsize', '-b', type=int, default=30,
                        help='Number of images in each mini batch')
    parser.add_argument('--epoch', '-e', type=int, default=20,
                        help='Number of sweeps over the dataset to train')
    parser.add_argument('--gpu', '-g', type=int, default=-1,
                        help='GPU ID (negative value indicates CPU)')
    parser.add_argument('--out', '-o', default='result',
                        help='Directory to output the result')
    parser.add_argument('--resume', '-r', default='',
                        help='Resume the training from snapshot')
    args = parser.parse_args()

    vocab = collections.defaultdict(lambda: len(vocab))
    pos_vocab = collections.defaultdict(lambda: len(pos_vocab))

    # Convert word sequences and pos sequences to integer sequences.
    nltk.download('brown')
    data = []
    for sentence in nltk.corpus.brown.tagged_sents():
        xs = numpy.array([vocab[lex] for lex, _ in sentence], numpy.int32)
        ys = numpy.array([pos_vocab[pos] for _, pos in sentence], numpy.int32)
        data.append((xs, ys))

    print('# of sentences: {}'.format(len(data)))
    print('# of words: {}'.format(len(vocab)))
    print('# of pos: {}'.format(len(pos_vocab)))

    model = CRF(len(vocab), len(pos_vocab))
    if args.gpu >= 0:
        chainer.backends.cuda.get_device_from_id(args.gpu).use()
        model.to_gpu(args.gpu)
    optimizer = chainer.optimizers.Adam()
    optimizer.setup(model)
    optimizer.add_hook(chainer.optimizer.WeightDecay(0.0001))

    test_data, train_data = datasets.split_dataset_random(
        data, len(data) // 10, seed=0)

    train_iter = chainer.iterators.SerialIterator(train_data, args.batchsize)
    test_iter = chainer.iterators.SerialIterator(test_data, args.batchsize,
                                                 repeat=False, shuffle=False)
    updater = training.updaters.StandardUpdater(
        train_iter, optimizer, converter=convert, device=args.gpu)
    trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.out)

    evaluator = extensions.Evaluator(
        test_iter, model, device=args.gpu, converter=convert)
    # Only validate in each 1000 iteration
    trainer.extend(evaluator, trigger=(1000, 'iteration'))
    trainer.extend(extensions.LogReport(trigger=(100, 'iteration')),
                   trigger=(100, 'iteration'))

    trainer.extend(
        extensions.MicroAverage(
            'main/correct', 'main/total', 'main/accuracy'))
    trainer.extend(
        extensions.MicroAverage(
            'validation/main/correct', 'validation/main/total',
            'validation/main/accuracy'))

    trainer.extend(
        extensions.PrintReport(
            ['epoch', 'main/loss', 'validation/main/loss',
             'main/accuracy', 'validation/main/accuracy', 'elapsed_time']),
        trigger=(100, 'iteration'))

    trainer.extend(extensions.ProgressBar(update_interval=10))

    if args.resume:
        chainer.serializers.load_npz(args.resume, trainer)

    trainer.run()
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--device',
                        '-d',
                        type=str,
                        default='-1',
                        help='Device specifier. Either ChainerX device '
                        'specifier or an integer. If non-negative integer, '
                        'CuPy arrays with specified device id are used. If '
                        'negative integer, NumPy arrays are used')
    parser.add_argument('--epoch',
                        '-e',
                        default=400,
                        type=int,
                        help='number of epochs to learn')
    parser.add_argument('--unit',
                        '-u',
                        default=30,
                        type=int,
                        help='number of units')
    parser.add_argument('--batchsize',
                        '-b',
                        type=int,
                        default=25,
                        help='learning minibatch size')
    parser.add_argument('--label',
                        '-l',
                        type=int,
                        default=5,
                        help='number of labels')
    parser.add_argument('--epocheval',
                        '-p',
                        type=int,
                        default=5,
                        help='number of epochs per evaluation')
    parser.add_argument('--test', dest='test', action='store_true')
    parser.set_defaults(test=False)
    group = parser.add_argument_group('deprecated arguments')
    group.add_argument('--gpu',
                       '-g',
                       dest='device',
                       type=int,
                       nargs='?',
                       const=0,
                       help='GPU ID (negative value indicates CPU)')
    args = parser.parse_args()

    if chainer.get_dtype() == numpy.float16:
        warnings.warn('This example may cause NaN in FP16 mode.',
                      RuntimeWarning)

    vocab = {}
    max_size = None
    train_trees = data.read_corpus('trees/train.txt', max_size)
    test_trees = data.read_corpus('trees/test.txt', max_size)

    device = chainer.get_device(args.device)
    device.use()
    xp = device.xp

    train_data = [linearize_tree(vocab, t, xp) for t in train_trees]
    train_iter = chainer.iterators.SerialIterator(train_data, args.batchsize)
    test_data = [linearize_tree(vocab, t, xp) for t in test_trees]
    test_iter = chainer.iterators.SerialIterator(test_data,
                                                 args.batchsize,
                                                 repeat=False,
                                                 shuffle=False)

    model = ThinStackRecursiveNet(len(vocab), args.unit, args.label)
    model.to_device(device)

    optimizer = chainer.optimizers.AdaGrad(0.1)
    optimizer.setup(model)

    updater = training.StandardUpdater(train_iter,
                                       optimizer,
                                       converter=convert,
                                       device=device)
    trainer = training.Trainer(updater, (args.epoch, 'epoch'))
    trainer.extend(extensions.Evaluator(test_iter,
                                        model,
                                        converter=convert,
                                        device=device),
                   trigger=(args.epocheval, 'epoch'))
    trainer.extend(extensions.LogReport())

    trainer.extend(
        extensions.MicroAverage('main/correct', 'main/total', 'main/accuracy'))
    trainer.extend(
        extensions.MicroAverage('validation/main/correct',
                                'validation/main/total',
                                'validation/main/accuracy'))

    trainer.extend(
        extensions.PrintReport([
            'epoch', 'main/loss', 'validation/main/loss', 'main/accuracy',
            'validation/main/accuracy', 'elapsed_time'
        ]))

    trainer.extend(extensions.ProgressBar(update_interval=10))

    trainer.run()
Exemple #6
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--device',
                        '-d',
                        type=str,
                        default='-1',
                        help='Device specifier. Either ChainerX device '
                        'specifier or an integer. If non-negative integer, '
                        'CuPy arrays with specified device id are used. If '
                        'negative integer, NumPy arrays are used')
    parser.add_argument('--out',
                        '-o',
                        default='result',
                        type=str,
                        help='Directory to ouput the result')
    parser.add_argument('--resume',
                        '-r',
                        type=str,
                        help='Resume the training from snapshot')
    parser.add_argument('--epoch',
                        '-e',
                        default=400,
                        type=int,
                        help='number of epochs to learn')
    parser.add_argument('--unit',
                        '-u',
                        default=30,
                        type=int,
                        help='number of units')
    parser.add_argument('--batchsize',
                        '-b',
                        type=int,
                        default=25,
                        help='learning minibatch size')
    parser.add_argument('--label',
                        '-l',
                        type=int,
                        default=5,
                        help='number of labels')
    parser.add_argument('--epocheval',
                        '-p',
                        type=int,
                        default=5,
                        help='number of epochs per evaluation')
    parser.add_argument('--test', dest='test', action='store_true')
    parser.set_defaults(test=False)
    group = parser.add_argument_group('deprecated arguments')
    group.add_argument('--gpu',
                       '-g',
                       dest='device',
                       type=int,
                       nargs='?',
                       const=0,
                       help='GPU ID (negative value indicates CPU)')
    args = parser.parse_args()

    if chainer.get_dtype() == np.float16:
        warnings.warn('This example may cause NaN in FP16 mode.',
                      RuntimeWarning)

    n_epoch = args.epoch  # number of epochs
    n_units = args.unit  # number of units per layer
    batchsize = args.batchsize  # minibatch size
    n_label = args.label  # number of labels
    epoch_per_eval = args.epocheval  # number of epochs per evaluation

    if args.test:
        max_size = 10
    else:
        max_size = None

    device = chainer.get_device(args.device)
    device.use()

    vocab = {}
    train_data = [
        convert_tree(vocab, tree)
        for tree in data.read_corpus('trees/train.txt', max_size)
    ]
    train_iter = chainer.iterators.SerialIterator(train_data, batchsize)
    validation_data = [
        convert_tree(vocab, tree)
        for tree in data.read_corpus('trees/dev.txt', max_size)
    ]
    validation_iter = chainer.iterators.SerialIterator(validation_data,
                                                       batchsize,
                                                       repeat=False,
                                                       shuffle=False)
    test_data = [
        convert_tree(vocab, tree)
        for tree in data.read_corpus('trees/test.txt', max_size)
    ]

    model = RecursiveNet(len(vocab), n_units, n_label)
    model.to_device(device)

    # Setup optimizer
    optimizer = optimizers.AdaGrad(lr=0.1)
    optimizer.setup(model)
    optimizer.add_hook(chainer.optimizer_hooks.WeightDecay(0.0001))

    # Setup updater
    updater = chainer.training.StandardUpdater(train_iter,
                                               optimizer,
                                               converter=convert,
                                               device=device)

    # Setup trainer and run
    trainer = chainer.training.Trainer(updater, (n_epoch, 'epoch'), args.out)
    trainer.extend(extensions.Evaluator(validation_iter,
                                        model,
                                        converter=convert,
                                        device=device),
                   trigger=(epoch_per_eval, 'epoch'))
    trainer.extend(extensions.LogReport())

    trainer.extend(
        extensions.MicroAverage('main/correct', 'main/total', 'main/accuracy'))
    trainer.extend(
        extensions.MicroAverage('validation/main/correct',
                                'validation/main/total',
                                'validation/main/accuracy'))

    trainer.extend(
        extensions.PrintReport([
            'epoch', 'main/loss', 'validation/main/loss', 'main/accuracy',
            'validation/main/accuracy', 'elapsed_time'
        ]))

    trainer.extend(
        extensions.snapshot(filename='snapshot_epoch_{.updater.epoch}'),
        trigger=(epoch_per_eval, 'epoch'))

    trainer.extend(extensions.ProgressBar(update_interval=10))

    if args.resume is not None:
        chainer.serializers.load_npz(args.resume, trainer)

    trainer.run()

    print('Test evaluation')
    evaluate(model, test_data)
Exemple #7
0
def main():
    parser = argparse.ArgumentParser(
        description='Chainer example: POS-tagging')
    parser.add_argument('--batchsize', '-b', type=int, default=30,
                        help='Number of images in each mini batch')
    parser.add_argument('--epoch', '-e', type=int, default=20,
                        help='Number of sweeps over the dataset to train')
    parser.add_argument('--device', '-d', type=str, default='-1',
                        help='Device specifier. Either ChainerX device '
                        'specifier or an integer. If non-negative integer, '
                        'CuPy arrays with specified device id are used. If '
                        'negative integer, NumPy arrays are used')
    parser.add_argument('--out', '-o', default='result',
                        help='Directory to output the result')
    parser.add_argument('--resume', '-r', default='',
                        help='Resume the training from snapshot')
    group = parser.add_argument_group('deprecated arguments')
    group.add_argument('--gpu', '-g', dest='device',
                       type=int, nargs='?', const=0,
                       help='GPU ID (negative value indicates CPU)')
    args = parser.parse_args()

    if chainer.get_dtype() == numpy.float16:
        warnings.warn(
            'This example may cause NaN in FP16 mode.', RuntimeWarning)

    vocab = collections.defaultdict(lambda: len(vocab))
    pos_vocab = collections.defaultdict(lambda: len(pos_vocab))

    # Convert word sequences and pos sequences to integer sequences.
    nltk.download('brown')
    data = []
    for sentence in nltk.corpus.brown.tagged_sents():
        xs = numpy.array([vocab[lex] for lex, _ in sentence], numpy.int32)
        ys = numpy.array([pos_vocab[pos] for _, pos in sentence], numpy.int32)
        data.append((xs, ys))

    print('# of sentences: {}'.format(len(data)))
    print('# of words: {}'.format(len(vocab)))
    print('# of pos: {}'.format(len(pos_vocab)))

    device = chainer.get_device(args.device)
    device.use()

    model = CRF(len(vocab), len(pos_vocab))
    model.to_device(device)

    optimizer = chainer.optimizers.Adam()
    optimizer.setup(model)
    optimizer.add_hook(chainer.optimizer.WeightDecay(0.0001))

    test_data, train_data = datasets.split_dataset_random(
        data, len(data) // 10, seed=0)

    train_iter = chainer.iterators.SerialIterator(train_data, args.batchsize)
    test_iter = chainer.iterators.SerialIterator(test_data, args.batchsize,
                                                 repeat=False, shuffle=False)
    updater = training.updaters.StandardUpdater(
        train_iter, optimizer, converter=convert, device=device)
    trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.out)

    evaluator = extensions.Evaluator(
        test_iter, model, device=device, converter=convert)
    # Only validate in each 1000 iteration
    trainer.extend(evaluator, trigger=(1000, 'iteration'))
    trainer.extend(extensions.LogReport(trigger=(100, 'iteration')),
                   trigger=(100, 'iteration'))

    trainer.extend(
        extensions.MicroAverage(
            'main/correct', 'main/total', 'main/accuracy'))
    trainer.extend(
        extensions.MicroAverage(
            'validation/main/correct', 'validation/main/total',
            'validation/main/accuracy'))

    trainer.extend(
        extensions.PrintReport(
            ['epoch', 'main/loss', 'validation/main/loss',
             'main/accuracy', 'validation/main/accuracy', 'elapsed_time']),
        trigger=(100, 'iteration'))

    trainer.extend(extensions.ProgressBar(update_interval=10))

    if args.resume:
        chainer.serializers.load_npz(args.resume, trainer)

    trainer.run()