예제 #1
0
def main():
    parser = argparse.ArgumentParser(description='chainer implementation of pix2pix')
    parser.add_argument('--gpu', '-g', type=int, default=-1,
                        help='GPU ID (negative value indicates CPU)')
    parser.add_argument('--seed', type=int, default=0,
                        help='Random seed')
    parser.add_argument('--model', '-m', default='',
                        help='model snapshot')
    parser.add_argument('--enc', '-e', type=str, default='enc_iter_60000.npz', help='encoder snapshot')
    parser.add_argument('--dec', '-d', type=str, default='dec_iter_60000.npz', help='decoder snapshot')
    parser.add_argument('--out', '-o', type=str, default='out', help='output dir')
    parser.add_argument('--input', '-i', default='sample.jpg', help='input jpg', required=True)
    parser.add_argument('--contour', '-c', action='store_true', help='from contour image or not')
    args = parser.parse_args()

    print('GPU: {}'.format(args.gpu))

    enc = Encoder(in_ch=3)
    dec = Decoder(out_ch=3)
    dis = Discriminator(in_ch=3, out_ch=3)

    if args.gpu >= 0:
        chainer.cuda.get_device(args.gpu).use()  # Make a specified GPU current
        enc.to_gpu()  # Copy the model to the GPU
        dec.to_gpu()
        dis.to_gpu()

    # Setup an optimizer
    def make_optimizer(model, alpha=0.0002, beta1=0.5):
        optimizer = chainer.optimizers.Adam(alpha=alpha, beta1=beta1)
        optimizer.setup(model)
        optimizer.add_hook(chainer.optimizer.WeightDecay(0.00001), 'hook_dec')
        return optimizer

    if args.model:
        opt_enc = make_optimizer(enc)
        opt_dec = make_optimizer(dec)
        opt_dis = make_optimizer(dis)

        # Set up a trainer
        updater = FacadeUpdater(
            models=(enc, dec, dis),
            iterator={},
            optimizer={
                'enc': opt_enc, 'dec': opt_dec,
                'dis': opt_dis},
            device=args.gpu)
        trainer = training.Trainer(updater, (200, 'epoch'), out='generate/')
        chainer.serializers.load_npz(args.model, trainer)
    elif args.enc and args.dec:
        chainer.serializers.load_npz(args.enc, enc)
        chainer.serializers.load_npz(args.dec, dec)

    if not args.contour:
        from make_contour import get_contour_image
        get_contour_image(args.input)

    generate_image_from_contour(args.input, enc, dec, args.out)
def main():
    parser = argparse.ArgumentParser(
        description='chainer implementation of pix2pix')
    parser.add_argument('--batchsize',
                        '-b',
                        type=int,
                        default=1,
                        help='Number of images in each mini-batch')
    parser.add_argument('--epoch',
                        '-e',
                        type=int,
                        default=200,
                        help='Number of sweeps over the dataset to train')
    parser.add_argument('--gpu',
                        '-g',
                        type=int,
                        default=-1,
                        help='GPU ID (negative value indicates CPU)')
    parser.add_argument('--dataset',
                        '-i',
                        default='./facade/base',
                        help='Directory of image files.')
    parser.add_argument('--out',
                        '-o',
                        default='result',
                        help='Directory to output the result')
    parser.add_argument('--resume',
                        '-r',
                        default='',
                        help='Resume the training from snapshot')
    parser.add_argument('--seed', type=int, default=0, help='Random seed')
    parser.add_argument('--snapshot_interval',
                        type=int,
                        default=1000,
                        help='Interval of snapshot')
    parser.add_argument('--display_interval',
                        type=int,
                        default=100,
                        help='Interval of displaying log to console')
    args = parser.parse_args()

    print('GPU: {}'.format(args.gpu))
    print('# Minibatch-size: {}'.format(args.batchsize))
    print('# epoch: {}'.format(args.epoch))
    print('')

    # Set up a neural network to train
    enc = Encoder(in_ch=12)
    dec = Decoder(out_ch=3)
    dis = Discriminator(in_ch=12, out_ch=3)

    if args.gpu >= 0:
        chainer.cuda.get_device(args.gpu).use()  # Make a specified GPU current
        enc.to_gpu()  # Copy the model to the GPU
        dec.to_gpu()
        dis.to_gpu()

    # Setup an optimizer
    def make_optimizer(model, alpha=0.0002, beta1=0.5):
        optimizer = chainer.optimizers.Adam(alpha=alpha, beta1=beta1)
        optimizer.setup(model)
        optimizer.add_hook(chainer.optimizer.WeightDecay(0.00001), 'hook_dec')
        return optimizer

    opt_enc = make_optimizer(enc)
    opt_dec = make_optimizer(dec)
    opt_dis = make_optimizer(dis)

    train_d = FacadeDataset(args.dataset, data_range=(1, 300))
    test_d = FacadeDataset(args.dataset, data_range=(300, 379))
    #train_iter = chainer.iterators.MultiprocessIterator(train_d, args.batchsize, n_processes=4)
    #test_iter = chainer.iterators.MultiprocessIterator(test_d, args.batchsize, n_processes=4)
    train_iter = chainer.iterators.SerialIterator(train_d, args.batchsize)
    test_iter = chainer.iterators.SerialIterator(test_d, args.batchsize)

    # Set up a trainer
    updater = FacadeUpdater(models=(enc, dec, dis),
                            iterator={
                                'main': train_iter,
                                'test': test_iter
                            },
                            optimizer={
                                'enc': opt_enc,
                                'dec': opt_dec,
                                'dis': opt_dis
                            },
                            device=args.gpu)
    trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.out)

    snapshot_interval = (args.snapshot_interval, 'iteration')
    display_interval = (args.display_interval, 'iteration')
    trainer.extend(
        extensions.snapshot(filename='snapshot_iter_{.updater.iteration}.npz'),
        trigger=snapshot_interval)
    trainer.extend(extensions.snapshot_object(
        enc, 'enc_iter_{.updater.iteration}.npz'),
                   trigger=snapshot_interval)
    trainer.extend(extensions.snapshot_object(
        dec, 'dec_iter_{.updater.iteration}.npz'),
                   trigger=snapshot_interval)
    trainer.extend(extensions.snapshot_object(
        dis, 'dis_iter_{.updater.iteration}.npz'),
                   trigger=snapshot_interval)
    trainer.extend(extensions.LogReport(trigger=display_interval))
    trainer.extend(extensions.PrintReport([
        'epoch',
        'iteration',
        'enc/loss',
        'dec/loss',
        'dis/loss',
    ]),
                   trigger=display_interval)
    trainer.extend(extensions.ProgressBar(update_interval=10))
    trainer.extend(out_image(updater, enc, dec, 5, 5, args.seed, args.out),
                   trigger=snapshot_interval)

    if args.resume:
        # Resume from a snapshot
        chainer.serializers.load_npz(args.resume, trainer)

    # Run the training
    trainer.run()
예제 #3
0
def main():
    parser = argparse.ArgumentParser(
        description='chainer implementation of pix2pix')
    parser.add_argument('--batchsize', '-b', type=int, default=1,
                        help='Number of images in each mini-batch')
    parser.add_argument('--epoch', '-e', type=int, default=200,
                        help='Number of sweeps over the dataset to train')
    parser.add_argument('--device', '-d', type=str, default='-1',
                        help='Device specifier. Either ChainerX device '
                        'specifier or an integer. If non-negative integer, '
                        'CuPy arrays with specified device id are used. If '
                        'negative integer, NumPy arrays are used')
    parser.add_argument('--dataset', '-i', default='./facade/base',
                        help='Directory of image files.')
    parser.add_argument('--out', '-o', default='result',
                        help='Directory to output the result')
    parser.add_argument('--resume', '-r', type=str,
                        help='Resume the training from snapshot')
    parser.add_argument('--seed', type=int, default=0,
                        help='Random seed')
    parser.add_argument('--snapshot_interval', type=int, default=1000,
                        help='Interval of snapshot')
    parser.add_argument('--display_interval', type=int, default=100,
                        help='Interval of displaying log to console')
    group = parser.add_argument_group('deprecated arguments')
    group.add_argument('--gpu', '-g', dest='device',
                       type=int, nargs='?', const=0,
                       help='GPU ID (negative value indicates CPU)')
    args = parser.parse_args()

    if chainer.get_dtype() == numpy.float16:
        warnings.warn(
            'This example may cause NaN in FP16 mode.', RuntimeWarning)

    device = chainer.get_device(args.device)
    if device.xp is chainerx:
        sys.stderr.write('This example does not support ChainerX devices.\n')
        sys.exit(1)

    print('Device: {}'.format(device))
    print('# Minibatch-size: {}'.format(args.batchsize))
    print('# epoch: {}'.format(args.epoch))
    print('')

    device.use()

    # Set up a neural network to train
    enc = Encoder(in_ch=12)
    dec = Decoder(out_ch=3)
    dis = Discriminator(in_ch=12, out_ch=3)

    enc.to_device(device)
    dec.to_device(device)
    dis.to_device(device)

    # Setup an optimizer
    def make_optimizer(model, alpha=0.0002, beta1=0.5):
        optimizer = chainer.optimizers.Adam(alpha=alpha, beta1=beta1)
        optimizer.setup(model)
        optimizer.add_hook(chainer.optimizer.WeightDecay(0.00001), 'hook_dec')
        return optimizer
    opt_enc = make_optimizer(enc)
    opt_dec = make_optimizer(dec)
    opt_dis = make_optimizer(dis)

    train_d = FacadeDataset(args.dataset, data_range=(1, 300))
    test_d = FacadeDataset(args.dataset, data_range=(300, 379))
    train_iter = chainer.iterators.SerialIterator(train_d, args.batchsize)
    test_iter = chainer.iterators.SerialIterator(test_d, args.batchsize)

    # Set up a trainer
    updater = FacadeUpdater(
        models=(enc, dec, dis),
        iterator={
            'main': train_iter,
            'test': test_iter},
        optimizer={
            'enc': opt_enc, 'dec': opt_dec,
            'dis': opt_dis},
        device=device)
    trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.out)

    snapshot_interval = (args.snapshot_interval, 'iteration')
    display_interval = (args.display_interval, 'iteration')
    trainer.extend(extensions.snapshot(
        filename='snapshot_iter_{.updater.iteration}.npz'),
        trigger=snapshot_interval)
    trainer.extend(extensions.snapshot_object(
        enc, 'enc_iter_{.updater.iteration}.npz'), trigger=snapshot_interval)
    trainer.extend(extensions.snapshot_object(
        dec, 'dec_iter_{.updater.iteration}.npz'), trigger=snapshot_interval)
    trainer.extend(extensions.snapshot_object(
        dis, 'dis_iter_{.updater.iteration}.npz'), trigger=snapshot_interval)
    trainer.extend(extensions.LogReport(trigger=display_interval))
    trainer.extend(extensions.PrintReport([
        'epoch', 'iteration', 'enc/loss', 'dec/loss', 'dis/loss',
    ]), trigger=display_interval)
    trainer.extend(extensions.ProgressBar(update_interval=10))
    trainer.extend(
        out_image(
            updater, enc, dec,
            5, 5, args.seed, args.out),
        trigger=snapshot_interval)

    if args.resume is not None:
        # Resume from a snapshot
        chainer.serializers.load_npz(args.resume, trainer)

    # Run the training
    trainer.run()
예제 #4
0
파일: train.py 프로젝트: g089vg/python
    train_d = CrackDataset(args.dataset, data_range=(1,90))
    test_d = CrackDataset(args.dataset, data_range=(91,100))
#    train_d = FacadeDataset(args.dataset, data_range=(1,300))
#    test_d = FacadeDataset(args.dataset, data_range=(1,379))
#    print(train_d.shape,test_d.shape)
    #train_iter = chainer.iterators.MultiprocessIterator(train_d, args.batchsize, n_processes=4)
    #test_iter = chainer.iterators.MultiprocessIterator(test_d, args.batchsize, n_processes=4)
    train_iter = chainer.iterators.SerialIterator(train_d, args.batchsize)
    test_iter = chainer.iterators.SerialIterator(test_d, args.batchsize)

    # Set up a trainer
    updater = FacadeUpdater(
        models=(enc, dec, dis),
        iterator={
            'main': train_iter,
            'test': test_iter},
        optimizer={
            'enc': opt_enc, 'dec': opt_dec, 
            'dis': opt_dis},
        device=args.gpu)
    trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.out)

    snapshot_interval = (args.snapshot_interval, 'iteration')
    display_interval = (args.display_interval, 'iteration')
    trainer.extend(extensions.snapshot(
        filename='snapshot_iter_{.updater.iteration}.npz'),
                   trigger=snapshot_interval)
    trainer.extend(extensions.snapshot_object(
        enc, 'enc_iter_{.updater.iteration}.npz'), trigger=snapshot_interval)
    trainer.extend(extensions.snapshot_object(
        dec, 'dec_iter_{.updater.iteration}.npz'), trigger=snapshot_interval)
예제 #5
0
def main():
    parser = argparse.ArgumentParser(
        description="chainer implementation of pix2pix")
    parser.add_argument("--batchsize",
                        "-b",
                        type=int,
                        default=1,
                        help="Number of images in each mini-batch")
    parser.add_argument("--epoch",
                        "-e",
                        type=int,
                        default=40000,
                        help="Number of sweeps over the dataset to train")
    parser.add_argument("--gpu",
                        "-g",
                        type=int,
                        default=-1,
                        help="GPU ID (negative value indicates CPU)")
    parser.add_argument("--dataset",
                        "-i",
                        default="./input/png/",
                        help="Directory of image files.")
    parser.add_argument("--out",
                        "-o",
                        default="D:/output/imasUtaConverter/",
                        help="Directory to output the result")
    parser.add_argument("--resume",
                        "-r",
                        default="",
                        help="Resume the training from snapshot")
    parser.add_argument("--seed", type=int, default=0, help="Random seed")
    parser.add_argument("--snapshot_interval",
                        type=int,
                        default=10000,
                        help="Interval of snapshot")
    parser.add_argument("--display_interval",
                        type=int,
                        default=20,
                        help="Interval of displaying log to console")
    args = parser.parse_args()

    print("GPU: {}".format(args.gpu))
    print("# Minibatch-size: {}".format(args.batchsize))
    print("# epoch: {}".format(args.epoch))
    print("")

    # Set up a neural network to train
    enc = Encoder(in_ch=2)
    dec = Decoder(out_ch=2)
    dis = Discriminator(in_ch=2, out_ch=2)

    if args.gpu >= 0:
        chainer.backends.cuda.get_device_from_id(
            args.gpu).use()  # Make a specified GPU current
        enc.to_gpu()  # Copy the model to the GPU
        dec.to_gpu()
        dis.to_gpu()

    # Setup an optimizer
    def make_optimizer(model, alpha=0.0002, beta1=0.5):
        optimizer = chainer.optimizers.Adam(alpha=alpha, beta1=beta1)
        optimizer.setup(model)
        optimizer.add_hook(chainer.optimizer.WeightDecay(0.00001), "hook_dec")
        return optimizer

    opt_enc = make_optimizer(enc)
    opt_dec = make_optimizer(dec)
    opt_dis = make_optimizer(dis)

    train_d = FacadeDataset(args.dataset, data_range=(0, 38))
    test_d = FacadeDataset(args.dataset, data_range=(38, 40))
    #train_iter = chainer.iterators.MultiprocessIterator(train_d, args.batchsize, n_processes=4)
    #test_iter = chainer.iterators.MultiprocessIterator(test_d, args.batchsize, n_processes=4)
    train_iter = chainer.iterators.SerialIterator(train_d, args.batchsize)
    test_iter = chainer.iterators.SerialIterator(test_d, args.batchsize)

    # Set up a trainer
    updater = FacadeUpdater(models=(enc, dec, dis),
                            iterator={
                                "main": train_iter,
                                "test": test_iter
                            },
                            optimizer={
                                "enc": opt_enc,
                                "dec": opt_dec,
                                "dis": opt_dis
                            },
                            device=args.gpu)
    trainer = training.Trainer(updater, (args.epoch, "epoch"), out=args.out)

    snapshot_interval = (args.snapshot_interval, "iteration")
    display_interval = (args.display_interval, "iteration")
    trainer.extend(
        extensions.snapshot(filename="snapshot_iter_{.updater.iteration}.npz"),
        trigger=snapshot_interval)
    trainer.extend(extensions.snapshot_object(
        enc, "enc_iter_{.updater.iteration}.npz"),
                   trigger=snapshot_interval)
    trainer.extend(extensions.snapshot_object(
        dec, "dec_iter_{.updater.iteration}.npz"),
                   trigger=snapshot_interval)
    trainer.extend(extensions.snapshot_object(
        dis, "dis_iter_{.updater.iteration}.npz"),
                   trigger=snapshot_interval)
    trainer.extend(extensions.LogReport(trigger=display_interval))
    trainer.extend(extensions.PrintReport([
        "epoch",
        "iteration",
        "enc/loss",
        "dec/loss",
        "dis/loss",
    ]),
                   trigger=display_interval)
    #trainer.extend(extensions.PlotReport(["enc/loss", "dis/loss"], x_key="epoch", file_name="loss.png"))
    trainer.extend(extensions.ProgressBar(update_interval=20))
    trainer.extend(out_image(updater, enc, dec, 1, 10, args.seed, args.out),
                   trigger=snapshot_interval)

    if args.resume:
        # Resume from a snapshot
        chainer.serializers.load_npz(args.resume, trainer)

    # Run the training
    trainer.run()
예제 #6
0
def main():
    parser = argparse.ArgumentParser(
        description='chainer implementation of pix2pix')

    parser.add_argument('--gpu',
                        '-g',
                        type=int,
                        default=-1,
                        help='GPU ID (negative value indicates CPU)')

    parser.add_argument('--seed', type=int, default=0, help='Random seed')

    parser.add_argument('--model', '-m', default='', help='model snapshot')

    parser.add_argument('--input',
                        '-i',
                        default='../images/generate/sample.jpg',
                        help='input jpg')

    args = parser.parse_args()

    print('GPU: {}'.format(args.gpu))

    # Set up a neural network to train
    enc = Encoder(in_ch=3)
    dec = Decoder(out_ch=3)
    dis = Discriminator(in_ch=3, out_ch=3)

    if args.gpu >= 0:
        chainer.cuda.get_device(args.gpu).use()  # Make a specified GPU current
        enc.to_gpu()  # Copy the model to the GPU
        dec.to_gpu()
        dis.to_gpu()

    # Setup an optimizer
    def make_optimizer(model, alpha=0.0002, beta1=0.5):
        optimizer = chainer.optimizers.Adam(alpha=alpha, beta1=beta1)
        optimizer.setup(model)
        optimizer.add_hook(chainer.optimizer.WeightDecay(0.00001), 'hook_dec')
        return optimizer

    opt_enc = make_optimizer(enc)
    opt_dec = make_optimizer(dec)
    opt_dis = make_optimizer(dis)

    if os.path.exists('generate_tmp'):
        shutil.rmtree('generate_tmp')

    os.mkdir('generate_tmp')
    os.mkdir('generate_tmp/base')
    os.mkdir('generate_tmp/label')
    shutil.copyfile(args.input, 'generate_tmp/base/tmp.jpg')
    shutil.copyfile(args.input, 'generate_tmp/label/tmp.jpg')
    test_d = FacadeDataset('generate_tmp/')
    test_iter = chainer.iterators.SerialIterator(test_d, 1)

    # Set up a trainer
    updater = FacadeUpdater(models=(enc, dec, dis),
                            iterator={},
                            optimizer={
                                'enc': opt_enc,
                                'dec': opt_dec,
                                'dis': opt_dis
                            },
                            device=args.gpu)
    trainer = training.Trainer(updater, (200, 'epoch'),
                               out='../results/generate/')
    chainer.serializers.load_npz(args.model, trainer)

    out_image(updater, enc, dec, 1, 1, args.seed, '../results/generate/', True,
              test_iter)(trainer)
예제 #7
0
def main():
    parser = argparse.ArgumentParser(
        description="chainer implementation of Unet")
    parser.add_argument("--batchsize",
                        "-b",
                        type=int,
                        default=1,
                        help="Number of images in each mini-batch")
    parser.add_argument("--epoch", "-e", type=int, default=200, help="epoch")
    parser.add_argument("--gpu", "-g", type=int, default=-1, help="GPU ID")
    parser.add_argument("--dataset",
                        "-i",
                        default="./train/",
                        help="Directory of image files")
    parser.add_argument("--out",
                        "-o",
                        default="result/",
                        help="Directory to output the result")
    parser.add_argument('--resume',
                        '-r',
                        default='',
                        help='Resume the training from snapshot')
    parser.add_argument('--seed', type=int, default=0, help='Random seed')
    parser.add_argument('--snapshot_interval',
                        type=int,
                        default=1000,
                        help='Interval of snapshot')
    parser.add_argument('--display_interval',
                        type=int,
                        default=100,
                        help='Interval of displaying log to console')
    args = parser.parse_args()

    print('GPU: {}'.format(args.gpu))
    print('# Minibatch-size: {}'.format(args.batchsize))
    print('# epoch: {}'.format(args.epoch))
    print('')

    # Only Encoder Decoder with Unet
    enc = Encoder(in_ch=3)  # in_ch => 3(YCbCr)
    dec = Decoder(out_ch=3)  # out_ch => 3(DCT)

    # GPU set up
    if args.gpu >= 0:
        chainer.cuda.get_device(args.gpu).use()
        enc.to_gpu()
        dec.to_gpu()

    # Setup an optimizer
    def make_optimizer(model, alpha=0.0002, beta1=0.5):
        optimizer = chainer.optimizers.Adam(alpha=alpha, beta1=beta1)
        optimizer.setup(model)
        optimizer.add_hook(chainer.optimizer.WeightDecay(0.00001), 'hook_dec')
        return optimizer

    opt_enc = make_optimizer(enc)
    opt_dec = make_optimizer(dec)

    train_d = ImgDCTDataset(args.dataset, data_range=(0, 1000))
    test_d = ImgDCTDataset(args.dataset, data_range=(1000, 2000))
    train_iter = chainer.iterators.SerialIterator(train_d, args.batchsize)
    test_iter = chainer.iterators.SerialIterator(test_d, args.batchsize)

    updater = FacadeUpdater(models=(enc, dec),
                            iterator={
                                'main': train_iter,
                                'test': test_iter
                            },
                            optimizer={
                                'enc': opt_enc,
                                'dec': opt_dec
                            },
                            device=args.gpu)
    trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.out)

    snapshot_interval = (args.snapshot_interval, 'iteration')
    display_interval = (args.display_interval, 'iteration')
    trainer.extend(
        extensions.snapshot(filename='snapshot_iter_{.updater.iteration}.npz'),
        trigger=snapshot_interval)
    trainer.extend(extensions.snapshot_object(
        enc, 'enc_iter_{.updater.iteration}.npz'),
                   trigger=snapshot_interval)
    trainer.extend(extensions.snapshot_object(
        dec, 'dec_iter_{.updater.iteration}.npz'),
                   trigger=snapshot_interval)
    # trainer.extend(extensions.snapshot_object(
    #     dis, 'dis_iter_{.updater.iteration}.npz'), trigger=snapshot_interval)
    trainer.extend(extensions.LogReport(trigger=display_interval))
    trainer.extend(extensions.PrintReport([
        'epoch',
        'iteration',
        'enc/loss',
        'dec/loss',
    ]),
                   trigger=display_interval)
    trainer.extend(extensions.ProgressBar(update_interval=10))

    if args.resume:
        # Resume from a snapshot
        chainer.serializers.load_npz(args.resume, trainer)

    # Run the training
    trainer.run()
예제 #8
0
def main():
    parser = argparse.ArgumentParser(
        description='chainer implementation of pix2pix')
    parser.add_argument('--batchsize',
                        '-b',
                        type=int,
                        default=1,
                        help='バッチサイズ')  #50
    parser.add_argument('--epoch', '-e', type=int, default=50,
                        help='エポック数')  #20
    parser.add_argument('--gpu', '-g', type=int, default=0, help='GPUの有無')
    parser.add_argument('--dataset',
                        '-i',
                        default='./facade/base',
                        help='Directory of image files.')
    parser.add_argument('--out', '-o', default='result', help='リザルトファイルのフォルダ')
    parser.add_argument('--resume',
                        '-r',
                        default='',
                        help='Resume the training from snapshot')
    parser.add_argument('--seed', type=int, default=0, help='Random seed')
    parser.add_argument('--frequency',
                        '-f',
                        type=int,
                        default=5,
                        help='Frequency of taking a snapshot')
    parser.add_argument('--snapshot_interval',
                        type=int,
                        default=1000,
                        help='Interval of snapshot')
    parser.add_argument('--display_interval',
                        type=int,
                        default=100,
                        help='Interval of displaying log to console')
    args = parser.parse_args()

    print('GPU: {}'.format(args.gpu))
    print('# Minibatch-size: {}'.format(args.batchsize))
    print('# epoch: {}'.format(args.epoch))
    print('')

    # Set up a neural network to train
    enc = Encoder(in_ch=3)
    dec = Decoder(out_ch=3)
    dis = Discriminator(in_ch=3, out_ch=3)

    if args.gpu >= 0:
        chainer.cuda.get_device(args.gpu).use()  # Make a specified GPU current
        enc.to_gpu()  # Copy the model to the GPU
        dec.to_gpu()
        dis.to_gpu()

    # Setup an optimizer
    def make_optimizer(model, alpha=0.0002, beta1=0.5):
        optimizer = chainer.optimizers.Adam(alpha=alpha, beta1=beta1)
        optimizer.setup(model)
        optimizer.add_hook(chainer.optimizer.WeightDecay(0.00001), 'hook_dec')
        return optimizer

    opt_enc = make_optimizer(enc)
    opt_dec = make_optimizer(dec)
    opt_dis = make_optimizer(dis)

    train_data = dataset.DatasetPourDot(trainQuePath, trainAnsPath)
    test_data = dataset.DatasetPourDot(testQuePath, testAnsPath)
    train_iter = chainer.iterators.SerialIterator(train_data, args.batchsize)
    test_iter = chainer.iterators.SerialIterator(test_data, args.batchsize)

    # Set up a trainer
    updater = FacadeUpdater(models=(enc, dec, dis),
                            iterator={
                                'main': train_iter,
                                'test': test_iter
                            },
                            optimizer={
                                'enc': opt_enc,
                                'dec': opt_dec,
                                'dis': opt_dis
                            },
                            device=args.gpu)
    trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.out)

    #途中経過の表示用の記述
    frequency = args.epoch if args.frequency == -1 else max(1, args.frequency)
    trainer.extend(extensions.snapshot(), trigger=(frequency, 'epoch'))
    trainer.extend(extensions.LogReport())
    trainer.extend(
        extensions.snapshot(filename='snapshot_iter_{.updater.iteration}.npz'),
        trigger=(frequency, 'epoch'))
    trainer.extend(extensions.LogReport())
    if extensions.PlotReport.available():
        trainer.extend(
            extensions.PlotReport(['dis/loss'], 'epoch', file_name='loss.png'))
        trainer.extend(
            extensions.PlotReport(['dis/accuracy'],
                                  'epoch',
                                  file_name='accuracy.png'))
    trainer.extend(
        extensions.PrintReport([
            'epoch',
            'iteration',
            'enc/loss',
            'dec/loss',
            'dis/loss',
        ]))
    trainer.extend(extensions.ProgressBar())
    """
    trainer.extend(
        out_image(
            updater, enc, dec,
            5, 5, args.seed, args.out),
        trigger=snapshot_interval)
    """
    #中断データの有無、あれば続きから
    if args.resume:
        chainer.serializers.load_npz(args.resume, trainer)
    #実験開始、trainerにお任せ
    trainer.run()
    #CPUで計算できるようにしておく
    enc.to_cpu()
    dec.to_cpu()
    dis.to_cpu()
    #npz形式で書き出し
    chainer.serializers.save_npz(args.out + '/mymodel_enc.npz', enc)
    chainer.serializers.save_npz(args.out + '/mymodel_dec.npz', dec)
    chainer.serializers.save_npz(args.out + '/mymodel_dis.npz', dis)