示例#1
0
def main():
    parser = argparse.ArgumentParser(description='chainer implementation of pix2pix')
    parser.add_argument('--gpu', '-g', type=int, default=-1,
                        help='GPU ID (negative value indicates CPU)')
    parser.add_argument('--img', '-i', help='Input image')
    parser.add_argument('--out', '-o', default='result_dehighlight',
                        help='Directory to output the result')
    args = parser.parse_args()


    # Set up a neural network to train
    enc = Encoder(in_ch=3)
    dec = Decoder(out_ch=1)

    chainer.serializers.load_npz(ENC_W, enc)
    chainer.serializers.load_npz(DEC_W, dec)

    if args.gpu >= 0:
        chainer.cuda.get_device(args.gpu).use()  # Make a specified GPU current
        enc.to_gpu()  # Copy the model to the GPU
        dec.to_gpu()

    inimg = loadimg(args.img)
    ch, h, w = inimg.shape
    # add paddings so that input array has the size of mutiples of 256.
    in_ary = np.zeros((ch,math.ceil(h/256)*256, math.ceil(w/256)*256), dtype="f")
    in_ary[:,0:h,0:w] = inimg
    x_in = in_ary[np.newaxis,:] # to fit into the minibatch shape
    print(x_in.shape)
    # x_in as an input image
    x_in = chainer.Variable(x_in)
    if args.gpu >= 0:
        x_in.to_gpu()

    st = time.time()

    for i in range(10):

        z = enc(x_in)
        x_out = dec(z)

    ts = time.time() - st
    print("time:{:.2f}".format(ts/10.0))


    if args.gpu >= 0:
        out_ary = x_out.data.get()[0]
    else:
        out_ary = x_out.data[0]
    #img_show = np.zeros((inimg.shape[0], inimg.shape[1], inimg.shape[2]*2))
    #img_show[:,:,:inimg.shape[2]] = inimg
    #img_show[:,:outimg.shape[1],inimg.shape[2]:inimg.shape[2]+outimg.shape[2]] = outimg
    outimg = out_ary[:,0:h,0:w] # trim paddings
    img_show = np.concatenate((inimg, outimg), axis=2)
    bgrpic = to_bgr(img_show).copy()
    cv2.putText(bgrpic,"input",(3,15),cv2.FONT_HERSHEY_DUPLEX, 0.5,(255,0,0))
    cv2.putText(bgrpic,"output",(w+3,15),cv2.FONT_HERSHEY_DUPLEX, 0.5,(255,0,0))
    cv2.imshow("result", bgrpic)
    cv2.waitKey()
示例#2
0
def main():
    parser = argparse.ArgumentParser(description='chainer implementation of pix2pix')
    parser.add_argument('--gpu', '-g', type=int, default=-1,
                        help='GPU ID (negative value indicates CPU)')
    parser.add_argument('--seed', type=int, default=0,
                        help='Random seed')
    parser.add_argument('--model', '-m', default='',
                        help='model snapshot')
    parser.add_argument('--enc', '-e', type=str, default='enc_iter_60000.npz', help='encoder snapshot')
    parser.add_argument('--dec', '-d', type=str, default='dec_iter_60000.npz', help='decoder snapshot')
    parser.add_argument('--out', '-o', type=str, default='out', help='output dir')
    parser.add_argument('--input', '-i', default='sample.jpg', help='input jpg', required=True)
    parser.add_argument('--contour', '-c', action='store_true', help='from contour image or not')
    args = parser.parse_args()

    print('GPU: {}'.format(args.gpu))

    enc = Encoder(in_ch=3)
    dec = Decoder(out_ch=3)
    dis = Discriminator(in_ch=3, out_ch=3)

    if args.gpu >= 0:
        chainer.cuda.get_device(args.gpu).use()  # Make a specified GPU current
        enc.to_gpu()  # Copy the model to the GPU
        dec.to_gpu()
        dis.to_gpu()

    # Setup an optimizer
    def make_optimizer(model, alpha=0.0002, beta1=0.5):
        optimizer = chainer.optimizers.Adam(alpha=alpha, beta1=beta1)
        optimizer.setup(model)
        optimizer.add_hook(chainer.optimizer.WeightDecay(0.00001), 'hook_dec')
        return optimizer

    if args.model:
        opt_enc = make_optimizer(enc)
        opt_dec = make_optimizer(dec)
        opt_dis = make_optimizer(dis)

        # Set up a trainer
        updater = FacadeUpdater(
            models=(enc, dec, dis),
            iterator={},
            optimizer={
                'enc': opt_enc, 'dec': opt_dec,
                'dis': opt_dis},
            device=args.gpu)
        trainer = training.Trainer(updater, (200, 'epoch'), out='generate/')
        chainer.serializers.load_npz(args.model, trainer)
    elif args.enc and args.dec:
        chainer.serializers.load_npz(args.enc, enc)
        chainer.serializers.load_npz(args.dec, dec)

    if not args.contour:
        from make_contour import get_contour_image
        get_contour_image(args.input)

    generate_image_from_contour(args.input, enc, dec, args.out)
示例#3
0
def main():
    parser = argparse.ArgumentParser(
        description='chainer implementation of pix2pix')
    parser.add_argument('--gpu', '-g', type=int, default=-1,
        help='GPU ID (negative value indicates CPU)')
    parser.add_argument('--dec', '-d', type=str, default='dec_model.npz',
        help='decoder model')
    parser.add_argument('--enc', '-e', type=str, default='enc_model.npz',
        help='encoder model')
    args = parser.parse_args()

    enc = Encoder(in_ch=3)
    dec = Decoder(out_ch=1)

    chainer.serializers.load_npz(str(MODEL_PATH.joinpath(args.dec)), dec)
    chainer.serializers.load_npz(str(MODEL_PATH.joinpath(args.enc)), enc)

    if args.gpu >= 0:
        chainer.cuda.get_device(args.gpu).use()  # Make a specified GPU current
        enc.to_gpu()  # Copy the model to the GPU
        dec.to_gpu()
    xp = enc.xp

    branch_images = np.zeros((12, 256, 256), dtype=np.uint8)
    plant_images = np.zeros((12, 256, 256, 3), dtype=np.uint8)
    p2b_images = np.zeros((12, 256, 256), dtype=np.uint8)

    for i in tqdm(range(1,12)):
        branch_path = DATASET_PATH.joinpath('branch', str(i))
        plant_path = DATASET_PATH.joinpath('plant', str(i))
        name = random.choice([_ for _ in branch_path.glob('*.png')]).name
        branch_image_path = branch_path.joinpath(name)
        plant_image_path = plant_path.joinpath(name)

        # open image
        branch_image = np.asarray(Image.open(branch_image_path).convert('L'))
        branch_images[i-1,:] = branch_image
        plant_image = np.asarray(Image.open(plant_image_path).convert('RGB'))
        plant_images[i-1,:] = plant_image

        plant_image = xp.array(plant_image).astype("f").transpose(2, 0, 1) / 128.0-1.0
        plant_image = plant_image.reshape(1, *plant_image.shape)

        with chainer.no_backprop_mode(), chainer.using_config('train', False):
            p2b_image = np.asarray(dec(enc(plant_image)).data.get())
            p2b_image = np.asarray(np.clip(p2b_image * 128 + 128, 0.0, 255.0), dtype=np.uint8).reshape(256, 256)
        p2b_images[i-1, :] = p2b_image

    Image.fromarray(branch_images.reshape(3, 4, 256, 256).transpose(0, 2, 1, 3).reshape(3*256, 4*256))\
        .save(str(RESULT_PATH.joinpath('branch_image.png')))
    Image.fromarray(plant_images.reshape(3, 4, 256, 256, 3).transpose(0, 2, 1, 3, 4).reshape(3*256, 4*256, 3))\
        .save(str(RESULT_PATH.joinpath('plant_image.png')))
    Image.fromarray(p2b_images.reshape(3, 4, 256, 256).transpose(0, 2, 1, 3).reshape(3*256, 4*256))\
        .save(str(RESULT_PATH.joinpath('p2b_image.png')))
def encode(args):
    enc = Encoder()
    npz.load_npz(args.enc, enc)
    if args.gpu >= 0:
        chainer.cuda.get_device_from_id(args.gpu).use()
        enc.to_gpu()
    xp = enc.xp

    image = CelebADataset([args.infile])[0]
    x = Variable(xp.asarray([image])) / 255.
    x = F.resize_images(x, (64, 64))

    with chainer.using_config('train', False):
        z = enc(x)
    return z, x.data[0]
def main():
    parser = argparse.ArgumentParser(
        description='chainer implementation of pix2pix')
    parser.add_argument('--batchsize',
                        '-b',
                        type=int,
                        default=1,
                        help='Number of images in each mini-batch')
    parser.add_argument('--epoch',
                        '-e',
                        type=int,
                        default=200,
                        help='Number of sweeps over the dataset to train')
    parser.add_argument('--gpu',
                        '-g',
                        type=int,
                        default=-1,
                        help='GPU ID (negative value indicates CPU)')
    parser.add_argument('--dataset',
                        '-i',
                        default='./facade/base',
                        help='Directory of image files.')
    parser.add_argument('--out',
                        '-o',
                        default='result',
                        help='Directory to output the result')
    parser.add_argument('--resume',
                        '-r',
                        default='',
                        help='Resume the training from snapshot')
    parser.add_argument('--seed', type=int, default=0, help='Random seed')
    parser.add_argument('--snapshot_interval',
                        type=int,
                        default=1000,
                        help='Interval of snapshot')
    parser.add_argument('--display_interval',
                        type=int,
                        default=100,
                        help='Interval of displaying log to console')
    args = parser.parse_args()

    print('GPU: {}'.format(args.gpu))
    print('# Minibatch-size: {}'.format(args.batchsize))
    print('# epoch: {}'.format(args.epoch))
    print('')

    # Set up a neural network to train
    enc = Encoder(in_ch=12)
    dec = Decoder(out_ch=3)
    dis = Discriminator(in_ch=12, out_ch=3)

    if args.gpu >= 0:
        chainer.cuda.get_device(args.gpu).use()  # Make a specified GPU current
        enc.to_gpu()  # Copy the model to the GPU
        dec.to_gpu()
        dis.to_gpu()

    # Setup an optimizer
    def make_optimizer(model, alpha=0.0002, beta1=0.5):
        optimizer = chainer.optimizers.Adam(alpha=alpha, beta1=beta1)
        optimizer.setup(model)
        optimizer.add_hook(chainer.optimizer.WeightDecay(0.00001), 'hook_dec')
        return optimizer

    opt_enc = make_optimizer(enc)
    opt_dec = make_optimizer(dec)
    opt_dis = make_optimizer(dis)

    train_d = FacadeDataset(args.dataset, data_range=(1, 300))
    test_d = FacadeDataset(args.dataset, data_range=(300, 379))
    #train_iter = chainer.iterators.MultiprocessIterator(train_d, args.batchsize, n_processes=4)
    #test_iter = chainer.iterators.MultiprocessIterator(test_d, args.batchsize, n_processes=4)
    train_iter = chainer.iterators.SerialIterator(train_d, args.batchsize)
    test_iter = chainer.iterators.SerialIterator(test_d, args.batchsize)

    # Set up a trainer
    updater = FacadeUpdater(models=(enc, dec, dis),
                            iterator={
                                'main': train_iter,
                                'test': test_iter
                            },
                            optimizer={
                                'enc': opt_enc,
                                'dec': opt_dec,
                                'dis': opt_dis
                            },
                            device=args.gpu)
    trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.out)

    snapshot_interval = (args.snapshot_interval, 'iteration')
    display_interval = (args.display_interval, 'iteration')
    trainer.extend(
        extensions.snapshot(filename='snapshot_iter_{.updater.iteration}.npz'),
        trigger=snapshot_interval)
    trainer.extend(extensions.snapshot_object(
        enc, 'enc_iter_{.updater.iteration}.npz'),
                   trigger=snapshot_interval)
    trainer.extend(extensions.snapshot_object(
        dec, 'dec_iter_{.updater.iteration}.npz'),
                   trigger=snapshot_interval)
    trainer.extend(extensions.snapshot_object(
        dis, 'dis_iter_{.updater.iteration}.npz'),
                   trigger=snapshot_interval)
    trainer.extend(extensions.LogReport(trigger=display_interval))
    trainer.extend(extensions.PrintReport([
        'epoch',
        'iteration',
        'enc/loss',
        'dec/loss',
        'dis/loss',
    ]),
                   trigger=display_interval)
    trainer.extend(extensions.ProgressBar(update_interval=10))
    trainer.extend(out_image(updater, enc, dec, 5, 5, args.seed, args.out),
                   trigger=snapshot_interval)

    if args.resume:
        # Resume from a snapshot
        chainer.serializers.load_npz(args.resume, trainer)

    # Run the training
    trainer.run()
def main():
    parser = argparse.ArgumentParser(description='chainer implementation of pix2pix')
    parser.add_argument('--batchsize', '-b', type=int, default=1,
                        help='Number of images in each mini-batch')
    parser.add_argument('--epoch', '-e', type=int, default=200,
                        help='Number of sweeps over the dataset to train')
    parser.add_argument('--gpu', '-g', type=int, default=-1,
                        help='GPU ID (negative value indicates CPU)')
    parser.add_argument('--dataset', '-i', default='./facade/base',
                        help='Directory of image files.')
    parser.add_argument('--out', '-o', default='result',
                        help='Directory to output the result')
    parser.add_argument('--resume', '-r', default='',
                        help='Resume the training from snapshot')
    parser.add_argument('--seed', type=int, default=0,
                        help='Random seed')
    parser.add_argument('--snapshot_interval', type=int, default=1000,
                        help='Interval of snapshot')
    parser.add_argument('--display_interval', type=int, default=100,
                        help='Interval of displaying log to console')
    args = parser.parse_args()

    print('GPU: {}'.format(args.gpu))
    print('# Minibatch-size: {}'.format(args.batchsize))
    print('# epoch: {}'.format(args.epoch))
    print('')

    # Set up a neural network to train
    enc = Encoder(in_ch=12)
    dec = Decoder(out_ch=3)
    dis = Discriminator(in_ch=12, out_ch=3)
    
    if args.gpu >= 0:
        chainer.cuda.get_device(args.gpu).use()  # Make a specified GPU current
        enc.to_gpu()  # Copy the model to the GPU
        dec.to_gpu()
        dis.to_gpu()

    # Setup an optimizer
    def make_optimizer(model, alpha=0.0002, beta1=0.5):
        optimizer = chainer.optimizers.Adam(alpha=alpha, beta1=beta1)
        optimizer.setup(model)
        optimizer.add_hook(chainer.optimizer.WeightDecay(0.00001), 'hook_dec')
        return optimizer
    opt_enc = make_optimizer(enc)
    opt_dec = make_optimizer(dec)
    opt_dis = make_optimizer(dis)

    train_d = FacadeDataset(args.dataset, data_range=(1,300))
    test_d = FacadeDataset(args.dataset, data_range=(300,379))
    #train_iter = chainer.iterators.MultiprocessIterator(train_d, args.batchsize, n_processes=4)
    #test_iter = chainer.iterators.MultiprocessIterator(test_d, args.batchsize, n_processes=4)
    train_iter = chainer.iterators.SerialIterator(train_d, args.batchsize)
    test_iter = chainer.iterators.SerialIterator(test_d, args.batchsize)

    # Set up a trainer
    updater = FacadeUpdater(
        models=(enc, dec, dis),
        iterator={
            'main': train_iter,
            'test': test_iter},
        optimizer={
            'enc': opt_enc, 'dec': opt_dec, 
            'dis': opt_dis},
        device=args.gpu)
    trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.out)

    snapshot_interval = (args.snapshot_interval, 'iteration')
    display_interval = (args.display_interval, 'iteration')
    trainer.extend(extensions.snapshot(
        filename='snapshot_iter_{.updater.iteration}.npz'),
                   trigger=snapshot_interval)
    trainer.extend(extensions.snapshot_object(
        enc, 'enc_iter_{.updater.iteration}.npz'), trigger=snapshot_interval)
    trainer.extend(extensions.snapshot_object(
        dec, 'dec_iter_{.updater.iteration}.npz'), trigger=snapshot_interval)
    trainer.extend(extensions.snapshot_object(
        dis, 'dis_iter_{.updater.iteration}.npz'), trigger=snapshot_interval)
    trainer.extend(extensions.LogReport(trigger=display_interval))
    trainer.extend(extensions.PrintReport([
        'epoch', 'iteration', 'enc/loss', 'dec/loss', 'dis/loss',
    ]), trigger=display_interval)
    trainer.extend(extensions.ProgressBar(update_interval=10))
    trainer.extend(
        out_image(
            updater, enc, dec,
            5, 5, args.seed, args.out),
        trigger=snapshot_interval)

    if args.resume:
        # Resume from a snapshot
        chainer.serializers.load_npz(args.resume, trainer)

    # Run the training
    trainer.run()
示例#7
0
def main():
    parser = argparse.ArgumentParser(
        description='chainer implementation of pix2pix')
    parser.add_argument('--batchsize',
                        '-b',
                        type=int,
                        default=1,
                        help='Number of images in each mini-batch')
    parser.add_argument('--epoch',
                        '-e',
                        type=int,
                        default=500,
                        help='Number of sweeps over the dataset to train')
    parser.add_argument('--gpu',
                        '-g',
                        type=int,
                        default=-1,
                        help='GPU ID (negative value indicates CPU)')
    parser.add_argument('--out',
                        '-o',
                        default='result_dehighlight',
                        help='Directory to output the result')
    parser.add_argument('--resume',
                        '-r',
                        default='',
                        help='Resume the training from snapshot')
    parser.add_argument('--seed', type=int, default=0, help='Random seed')
    parser.add_argument('--snapshot_interval',
                        type=int,
                        default=10000,
                        help='Interval of snapshot')
    parser.add_argument('--display_interval',
                        type=int,
                        default=10000,
                        help='Interval of displaying log to console')
    args = parser.parse_args()

    print('GPU: {}'.format(args.gpu))
    print('# Minibatch-size: {}'.format(args.batchsize))
    print('# epoch: {}'.format(args.epoch))
    print('')

    # Set up a neural network to train
    enc = Encoder(in_ch=3)
    dec = Decoder(out_ch=1)
    dis = Discriminator(in_ch=3, out_ch=1)
    gen = Generator(in_ch=3, out_ch=1)
    serializers.load_npz("depro.npz", depro)
    gen.encoder = enc
    gen.decoder = dec

    if args.gpu >= 0:
        chainer.cuda.get_device(args.gpu).use()  # Make a specified GPU current
        enc.to_gpu()  # Copy the model to the GPU
        dec.to_gpu()
        dis.to_gpu()
        gen.to_gpu()

    depro.disable_update()

    # Setup an optimizer
    def make_optimizer(model, alpha=0.0002, beta1=0.5):
        optimizer = chainer.optimizers.Adam(alpha=alpha, beta1=beta1)
        optimizer.setup(model)
        optimizer.add_hook(chainer.optimizer.WeightDecay(0.00001), 'hook_dec')
        return optimizer

    opt_enc = make_optimizer(enc)
    opt_dec = make_optimizer(dec)
    opt_dis = make_optimizer(dis)

    #will fix
    train_d = NyuDataset("E:/nyu_depth_v2_labeled.mat",
                         startnum=0,
                         endnum=1000)
    test_d = NyuDataset("E:/nyu_depth_v2_labeled.mat",
                        startnum=1000,
                        endnum=1449)
    #train_iter = chainer.iterators.MultiprocessIterator(train_d, args.batchsize, n_processes=4)
    #test_iter = chainer.iterators.MultiprocessIterator(test_d, args.batchsize, n_processes=4)
    train_iter = chainer.iterators.SerialIterator(train_d, args.batchsize)
    test_iter = chainer.iterators.SerialIterator(test_d, args.batchsize)
    test_iter2 = chainer.iterators.SerialIterator(test_d,
                                                  args.batchsize,
                                                  repeat=False,
                                                  shuffle=False)

    # Set up a trainer
    updater = PicUpdater(models=(enc, dec, dis),
                         iterator={
                             'main': train_iter,
                             'test': test_iter
                         },
                         optimizer={
                             'enc': opt_enc,
                             'dec': opt_dec,
                             'dis': opt_dis
                         },
                         device=args.gpu)
    trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.out)

    snapshot_interval = (args.snapshot_interval, 'iteration')
    display_interval = (args.display_interval, 'iteration')
    trainer.extend(
        extensions.snapshot(filename='snapshot_iter_{.updater.iteration}.npz'),
        trigger=snapshot_interval)
    trainer.extend(extensions.snapshot_object(
        enc, 'enc_iter_{.updater.iteration}.npz'),
                   trigger=snapshot_interval)
    trainer.extend(extensions.snapshot_object(
        dec, 'dec_iter_{.updater.iteration}.npz'),
                   trigger=snapshot_interval)
    trainer.extend(extensions.snapshot_object(
        dis, 'dis_iter_{.updater.iteration}.npz'),
                   trigger=snapshot_interval)
    trainer.extend(extensions.LogReport(trigger=display_interval))
    trainer.extend(extensions.PrintReport([
        'epoch', 'iteration', 'enc/loss_enc', 'dec/loss_dec', 'dis/loss_dis',
        "validation/main/loss"
    ]),
                   trigger=display_interval)
    trainer.extend(extensions.Evaluator(test_iter2, gen, device=args.gpu))
    trainer.extend(
        extensions.PlotReport(['dec/loss_dec'],
                              x_key='iteration',
                              file_name='dec_loss.png',
                              trigger=display_interval))
    trainer.extend(
        extensions.PlotReport(['dis/loss_dis'],
                              x_key='iteration',
                              file_name='dis_loss.png',
                              trigger=display_interval))
    trainer.extend(
        extensions.PlotReport(["validation/main/loss"],
                              x_key='iteration',
                              file_name='gen_loss.png',
                              trigger=display_interval))
    trainer.extend(extensions.ProgressBar(update_interval=10))
    trainer.extend(out_image(updater, depro, enc, dec, 3, 3, args.seed,
                             args.out),
                   trigger=snapshot_interval)

    if args.resume:
        # Resume from a snapshot
        chainer.serializers.load_npz(args.resume, trainer)

    # Run the training
    trainer.run()
示例#8
0
文件: train.py 项目: g089vg/python
                        help='Interval of displaying log to console')
    args = parser.parse_args()

    print('GPU: {}'.format(args.gpu))
    print('# Minibatch-size: {}'.format(args.batchsize))
    print('# epoch: {}'.format(args.epoch))
    print('')

    # Set up a neural network to train
    enc = Encoder(in_ch=5)
    dec = Decoder(out_ch=3)
    dis = Discriminator(in_ch=5, out_ch=3)
    
    if args.gpu >= 0:
        chainer.cuda.get_device(args.gpu).use()  # Make a specified GPU current
        enc.to_gpu()  # Copy the model to the GPU
        dec.to_gpu()
        dis.to_gpu()

    # Setup an optimizer
    def make_optimizer(model, alpha=0.0002, beta1=0.5):
        optimizer = chainer.optimizers.Adam(alpha=alpha, beta1=beta1)
        optimizer.setup(model)
        optimizer.add_hook(chainer.optimizer.WeightDecay(0.00001), 'hook_dec')
        return optimizer
    opt_enc = make_optimizer(enc)
    opt_dec = make_optimizer(dec)
    opt_dis = make_optimizer(dis)

    train_d = CrackDataset(args.dataset, data_range=(1,90))
    test_d = CrackDataset(args.dataset, data_range=(91,100))
示例#9
0
def main():
    parser = argparse.ArgumentParser(
        description='chainer creating pictures of seat')
    parser.add_argument('--gpu',
                        '-g',
                        type=int,
                        default=-1,
                        help='GPU ID (negative value indicates CPU)')
    parser.add_argument('--data',
                        '-d',
                        default='./detect',
                        help='Directory of image files.')
    parser.add_argument('--mask',
                        '-ma',
                        default='./mask',
                        help='Directory of image files.')
    parser.add_argument('--out',
                        '-o',
                        default='./result',
                        help='Directory to output the result')
    parser.add_argument('--seed', type=int, default=0, help='Random seed')
    parser.add_argument('--model',
                        '-m',
                        default='snapshot_iter_83000.npz',
                        help='Loading model')
    parser.add_argument('--batchsize',
                        '-b',
                        default=16,
                        type=int,
                        help='The same value as that of trainer')
    args = parser.parse_args()

    print('GPU: {}'.format(args.gpu))
    print('# Minibatch-size: {}'.format(args.batchsize))
    print('')

    # Set up a neural network to train
    enc = Encoder(in_ch=4)
    dec = Decoder(out_ch=3)

    if args.gpu >= 0:
        cuda.get_device(args.gpu).use()  # Make a specified GPU current
        enc.to_gpu()  # Copy the model to the GPU
        dec.to_gpu()

    xp = cuda.cupy

    # Setup an model
    model = args.model
    print('---Loading model---')
    serializers.load_npz(model, enc, path='updater/model:enc/')
    serializers.load_npz(model, dec, path='updater/model:dec/')

    # Trun off a trainer
    chainer.config.train = False
    chainer.config.debug = False
    chainer.config.enable_backprop = False
    print('Setuped model!')

    # Load datasets
    print('---Loading datasets---')
    data_path = args.data
    data_sets = glob.glob(data_path + '/*.jpg')
    data_mask = glob.glob(args.mask + '/*.jpg')
    dataset = []
    names = []
    for data in data_sets:
        d_name = os.path.basename(data)
        d_name = d_name[:-4]
        img = Image.open(data)
        img = xp.asarray(img).transpose(2, 0, 1)
        for _ in range(10):
            mask = random.choice(data_mask)
            mask = Image.open(mask)
            mask = xp.asarray(mask)
            mask = mask[xp.newaxis, :, :]
            img_ = img + mask
            img_ = xp.asarray(img_).astype('f') / 128.0 - 1.0
            mask = xp.asarray(mask).astype('f') / 128.0 - 1.0
            img_ = xp.concatenate([img_, mask], axis=0)
            dataset.append(img_)
            f_name = d_name + '_' + str(_)
            names.append(f_name)
    print('Setuped datasets!')

    # Create picture
    print('---Creating---')
    in_ch = 4
    in_h = 256
    in_w = 256
    out_put = 0

    batch_size = args.batchsize
    out_dir = args.out

    if not (os.path.exists(out_dir)):
        os.mkdir(out_dir)

    _ = 0
    for name, data in zip(names, dataset):
        X_in = xp.zeros((batch_size, in_ch, in_h, in_w)).astype("f")
        for i in range(batch_size):
            X_in[i, :] = xp.asarray(data)
        X_in = Variable(X_in)

        z = enc(X_in)
        X_out = dec(z)
        out_put = xp.asarray(X_out.data)
        out_put = out_put[0]
        out_put += 1.0
        out_put *= 128.0
        xp.save(out_dir + '/' + name, out_put)
        _ += 1
        print('created {} / {}'.format(_, len(dataset)))

    print('Finished all process!')
    print('Numpy shape : ', out_put.shape)
    print('Number of Numpy file : ', len(dataset))
示例#10
0
def main():
    parser = argparse.ArgumentParser(description='Train Encoder')
    parser.add_argument('--batchsize', '-b', type=int, default=64,
                        help='Number of images in each mini-batch')
    parser.add_argument('--epoch', '-e', type=int, default=100,
                        help='Number of sweeps over the dataset to train')
    parser.add_argument('--gpu', '-g', type=int, default=-1,
                        help='GPU ID (negative value indicates CPU)')
    parser.add_argument('--dataset', '-i', default='data/celebA/',
                        help='Directory of image files.')
    parser.add_argument('--out', '-o', default='result',
                        help='Directory to output the result')
    parser.add_argument('--resume', '-r', default='',
                        help='Resume the training from snapshot')
    parser.add_argument('--snapshot_interval', type=int, default=10000,
                        help='Interval of snapshot')
    parser.add_argument('--display_interval', type=int, default=1000,
                        help='Interval of displaying log to console')
    parser.add_argument('--gen', default='gen.npz')
    parser.add_argument('--enc', default=None)
    args = parser.parse_args()

    print('GPU: {}'.format(args.gpu))
    print('# batchsize: {}'.format(args.batchsize))
    print('# epoch: {}'.format(args.epoch))
    print('')

    # Set up a neural network to train
    gen = Generator()
    npz.load_npz(args.gen, gen)
    enc = Encoder()
    if args.enc is not None:
        npz.load_npz(args.enc, enc)

    if args.gpu >= 0:
        chainer.cuda.get_device_from_id(args.gpu).use()
        gen.to_gpu()
        enc.to_gpu()

    # Setup an optimizer
    def make_optimizer(model, alpha=0.0005, beta1=0.9):
        optimizer = chainer.optimizers.Adam(alpha=alpha, beta1=beta1)
        optimizer.setup(model)
        optimizer.add_hook(chainer.optimizer.WeightDecay(0.0001), 'hook_dec')
        return optimizer
    opt_gen = make_optimizer(gen)
    gen.disable_update()
    opt_enc = make_optimizer(enc)

    # Setup a dataset
    all_files = os.listdir(args.dataset)
    image_files = [f for f in all_files if ('png' in f or 'jpg' in f)]
    print('{} contains {} image files'.format(args.dataset, len(image_files)))
    train = CelebADataset(paths=image_files, root=args.dataset)

    train_iter = chainer.iterators.SerialIterator(train, args.batchsize)

    # Set up a trainer
    updater = EncUpdater(
        models=(gen, enc),
        iterator=train_iter,
        optimizer={'gen': opt_gen, 'enc': opt_enc},
        device=args.gpu)
    trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.out)

    snapshot_interval = (args.snapshot_interval, 'iteration')
    display_interval = (args.display_interval, 'iteration')
    trainer.extend(
        extensions.snapshot(filename='snapshot_enc_iter_{.updater.iteration}.npz'),
        trigger=snapshot_interval)
    trainer.extend(extensions.ExponentialShift(
        'alpha', 0.5, optimizer=opt_enc), trigger=(10, 'epoch'))
    trainer.extend(extensions.snapshot_object(
        enc, 'enc_iter_{.updater.iteration}.npz'), trigger=snapshot_interval)
    trainer.extend(extensions.LogReport(trigger=display_interval, log_name='train_enc.log'))
    trainer.extend(extensions.PrintReport([
        'epoch', 'iteration', 'enc/loss',
    ]), trigger=display_interval)
    trainer.extend(extensions.PlotReport(
        ['enc/loss'], trigger=display_interval, file_name='enc-loss.png'))
    trainer.extend(extensions.ProgressBar(update_interval=10))

    if args.resume:
        chainer.serializers.load_npz(args.resume, trainer)

    # Run the training
    trainer.run()
示例#11
0
def main():
    parser = argparse.ArgumentParser(description='chainer implementation of pix2pix')
    parser.add_argument('--gpu', '-g', type=int, default=-1,
                        help='GPU ID (negative value indicates CPU)')
    parser.add_argument('--img', '-i', help='Input image')

    parser.add_argument('--out', '-o', default='result_dehighlight',
                        help='Directory to output the result')
    args = parser.parse_args()

    ENC_W = os.path.join(args.out, "enc_iter_2500000.npz")
    #DEC_W = "trained_model/dec_iter_176000.npz"
    # to avoid GitHub 100M limit, one .npz files are divided into two zip files.
    DEC_Ws = [os.path.join(args.out, "dec_iter_2500000.npz")]

    #shutil.copy("net.py", args.out)

    # Set up a neural network to train
    enc = Encoder(in_ch=3)
    dec = Decoder(out_ch=3)

    chainer.serializers.load_npz(ENC_W, enc)
    # to avoid GitHub 100M limit, 1 .npz file is devided into 2 files
    for npzfile in DEC_Ws:
        with np.load(npzfile) as f:
            d = NpzDeserializer(f, strict=False)
            d.load(dec)

    if args.gpu >= 0:
        chainer.cuda.get_device(args.gpu).use()  # Make a specified GPU current
        enc.to_gpu()  # Copy the model to the GPU
        dec.to_gpu()

    inimg = loadimg(args.img)
    ch, h, w = inimg.shape
    # add paddings so that input array has the size of mutiples of 256.
    in_ary = np.zeros((ch,math.ceil(h/256)*256, math.ceil(w/256)*256), dtype="f")
    in_ary[:,0:h,0:w] = inimg
    x_in = in_ary[np.newaxis,:] # to fit into the minibatch shape
    print(x_in.shape)
    # x_in as an input image
    x_in = chainer.Variable(x_in)
    if args.gpu >= 0:
        x_in.to_gpu()

    st = time.time()
    for i in tqdm(range(10)):
        z = enc(x_in)
        x_out = dec(z)
    ts = (time.time() - st)/10
    print("mean estimation time:{:.2f}".format(ts))
    with open(os.path.join(args.out, "time.txt"), "a") as f:
        f.write("gpu:{}, time:{:.4f}, FPS:{:.4f}\n".format(args.gpu, ts, 1/ts))

    if args.gpu >= 0:
        out_ary = x_out.data.get()[0]
    else:
        out_ary = x_out.data[0]
    #img_show = np.zeros((inimg.shape[0], inimg.shape[1], inimg.shape[2]*2))
    #img_show[:,:,:inimg.shape[2]] = inimg
    #img_show[:,:outimg.shape[1],inimg.shape[2]:inimg.shape[2]+outimg.shape[2]] = outimg
    outimg = out_ary[:,0:h,0:w] # trim paddings
    img_show = np.concatenate((inimg, outimg), axis=2)
    bgrpic = to_bgr(img_show).copy()
    cv2.putText(bgrpic,"input",(3,15),cv2.FONT_HERSHEY_DUPLEX, 0.5,(255,0,0))
    cv2.putText(bgrpic,"output",(w+3,15),cv2.FONT_HERSHEY_DUPLEX, 0.5,(255,0,0))
    cv2.imshow("result", bgrpic)
    cv2.waitKey(0)
    cv2.destroyAllWindows()
示例#12
0
def main():
    parser = argparse.ArgumentParser(
        description="chainer implementation of pix2pix")
    parser.add_argument("--batchsize",
                        "-b",
                        type=int,
                        default=1,
                        help="Number of images in each mini-batch")
    parser.add_argument("--epoch",
                        "-e",
                        type=int,
                        default=40000,
                        help="Number of sweeps over the dataset to train")
    parser.add_argument("--gpu",
                        "-g",
                        type=int,
                        default=-1,
                        help="GPU ID (negative value indicates CPU)")
    parser.add_argument("--dataset",
                        "-i",
                        default="./input/png/",
                        help="Directory of image files.")
    parser.add_argument("--out",
                        "-o",
                        default="D:/output/imasUtaConverter/",
                        help="Directory to output the result")
    parser.add_argument("--resume",
                        "-r",
                        default="",
                        help="Resume the training from snapshot")
    parser.add_argument("--seed", type=int, default=0, help="Random seed")
    parser.add_argument("--snapshot_interval",
                        type=int,
                        default=10000,
                        help="Interval of snapshot")
    parser.add_argument("--display_interval",
                        type=int,
                        default=20,
                        help="Interval of displaying log to console")
    args = parser.parse_args()

    print("GPU: {}".format(args.gpu))
    print("# Minibatch-size: {}".format(args.batchsize))
    print("# epoch: {}".format(args.epoch))
    print("")

    # Set up a neural network to train
    enc = Encoder(in_ch=2)
    dec = Decoder(out_ch=2)
    dis = Discriminator(in_ch=2, out_ch=2)

    if args.gpu >= 0:
        chainer.backends.cuda.get_device_from_id(
            args.gpu).use()  # Make a specified GPU current
        enc.to_gpu()  # Copy the model to the GPU
        dec.to_gpu()
        dis.to_gpu()

    # Setup an optimizer
    def make_optimizer(model, alpha=0.0002, beta1=0.5):
        optimizer = chainer.optimizers.Adam(alpha=alpha, beta1=beta1)
        optimizer.setup(model)
        optimizer.add_hook(chainer.optimizer.WeightDecay(0.00001), "hook_dec")
        return optimizer

    opt_enc = make_optimizer(enc)
    opt_dec = make_optimizer(dec)
    opt_dis = make_optimizer(dis)

    train_d = FacadeDataset(args.dataset, data_range=(0, 38))
    test_d = FacadeDataset(args.dataset, data_range=(38, 40))
    #train_iter = chainer.iterators.MultiprocessIterator(train_d, args.batchsize, n_processes=4)
    #test_iter = chainer.iterators.MultiprocessIterator(test_d, args.batchsize, n_processes=4)
    train_iter = chainer.iterators.SerialIterator(train_d, args.batchsize)
    test_iter = chainer.iterators.SerialIterator(test_d, args.batchsize)

    # Set up a trainer
    updater = FacadeUpdater(models=(enc, dec, dis),
                            iterator={
                                "main": train_iter,
                                "test": test_iter
                            },
                            optimizer={
                                "enc": opt_enc,
                                "dec": opt_dec,
                                "dis": opt_dis
                            },
                            device=args.gpu)
    trainer = training.Trainer(updater, (args.epoch, "epoch"), out=args.out)

    snapshot_interval = (args.snapshot_interval, "iteration")
    display_interval = (args.display_interval, "iteration")
    trainer.extend(
        extensions.snapshot(filename="snapshot_iter_{.updater.iteration}.npz"),
        trigger=snapshot_interval)
    trainer.extend(extensions.snapshot_object(
        enc, "enc_iter_{.updater.iteration}.npz"),
                   trigger=snapshot_interval)
    trainer.extend(extensions.snapshot_object(
        dec, "dec_iter_{.updater.iteration}.npz"),
                   trigger=snapshot_interval)
    trainer.extend(extensions.snapshot_object(
        dis, "dis_iter_{.updater.iteration}.npz"),
                   trigger=snapshot_interval)
    trainer.extend(extensions.LogReport(trigger=display_interval))
    trainer.extend(extensions.PrintReport([
        "epoch",
        "iteration",
        "enc/loss",
        "dec/loss",
        "dis/loss",
    ]),
                   trigger=display_interval)
    #trainer.extend(extensions.PlotReport(["enc/loss", "dis/loss"], x_key="epoch", file_name="loss.png"))
    trainer.extend(extensions.ProgressBar(update_interval=20))
    trainer.extend(out_image(updater, enc, dec, 1, 10, args.seed, args.out),
                   trigger=snapshot_interval)

    if args.resume:
        # Resume from a snapshot
        chainer.serializers.load_npz(args.resume, trainer)

    # Run the training
    trainer.run()
示例#13
0
def main():
    parser = argparse.ArgumentParser(
        description='chainer implementation of pix2pix')

    parser.add_argument('--gpu',
                        '-g',
                        type=int,
                        default=-1,
                        help='GPU ID (negative value indicates CPU)')

    parser.add_argument('--seed', type=int, default=0, help='Random seed')

    parser.add_argument('--model', '-m', default='', help='model snapshot')

    parser.add_argument('--input',
                        '-i',
                        default='../images/generate/sample.jpg',
                        help='input jpg')

    args = parser.parse_args()

    print('GPU: {}'.format(args.gpu))

    # Set up a neural network to train
    enc = Encoder(in_ch=3)
    dec = Decoder(out_ch=3)
    dis = Discriminator(in_ch=3, out_ch=3)

    if args.gpu >= 0:
        chainer.cuda.get_device(args.gpu).use()  # Make a specified GPU current
        enc.to_gpu()  # Copy the model to the GPU
        dec.to_gpu()
        dis.to_gpu()

    # Setup an optimizer
    def make_optimizer(model, alpha=0.0002, beta1=0.5):
        optimizer = chainer.optimizers.Adam(alpha=alpha, beta1=beta1)
        optimizer.setup(model)
        optimizer.add_hook(chainer.optimizer.WeightDecay(0.00001), 'hook_dec')
        return optimizer

    opt_enc = make_optimizer(enc)
    opt_dec = make_optimizer(dec)
    opt_dis = make_optimizer(dis)

    if os.path.exists('generate_tmp'):
        shutil.rmtree('generate_tmp')

    os.mkdir('generate_tmp')
    os.mkdir('generate_tmp/base')
    os.mkdir('generate_tmp/label')
    shutil.copyfile(args.input, 'generate_tmp/base/tmp.jpg')
    shutil.copyfile(args.input, 'generate_tmp/label/tmp.jpg')
    test_d = FacadeDataset('generate_tmp/')
    test_iter = chainer.iterators.SerialIterator(test_d, 1)

    # Set up a trainer
    updater = FacadeUpdater(models=(enc, dec, dis),
                            iterator={},
                            optimizer={
                                'enc': opt_enc,
                                'dec': opt_dec,
                                'dis': opt_dis
                            },
                            device=args.gpu)
    trainer = training.Trainer(updater, (200, 'epoch'),
                               out='../results/generate/')
    chainer.serializers.load_npz(args.model, trainer)

    out_image(updater, enc, dec, 1, 1, args.seed, '../results/generate/', True,
              test_iter)(trainer)
示例#14
0
def main():
    parser = argparse.ArgumentParser(
        description='chainer implementation of pix2pix')
    parser.add_argument('--batchsize',
                        '-b',
                        type=int,
                        default=1,
                        help='Number of images in each mini-batch')
    parser.add_argument('--epoch',
                        '-e',
                        type=int,
                        default=200,
                        help='Number of sweeps over the dataset to train')
    parser.add_argument('--gpu',
                        '-g',
                        type=int,
                        default=-1,
                        help='GPU ID (negative value indicates CPU)')
    parser.add_argument('--dataset',
                        '-i',
                        default='./facade/base',
                        help='Directory of image files.')
    parser.add_argument('--out',
                        '-o',
                        default='result',
                        help='Directory to output the result')
    parser.add_argument('--resume',
                        '-r',
                        default='',
                        help='Resume the training from snapshot')
    parser.add_argument('--seed', type=int, default=0, help='Random seed')
    parser.add_argument('--snapshot_interval',
                        type=int,
                        default=1000,
                        help='Interval of snapshot')
    parser.add_argument('--display_interval',
                        type=int,
                        default=100,
                        help='Interval of displaying log to console')
    parser.add_argument(
        '--n_processes',
        type=int,
        default=None,
        help='processes of chainer.iterators.MultiprocessIterator')
    parser.add_argument(
        '--shared_mem',
        type=int,
        default=None,
        help=
        'shared memory per data, for chainer.iterators.MultiprocessIterator. None means auto ajust.'
    )
    parser.add_argument('--audio_dataset_second',
                        type=int,
                        default=None,
                        help='time length(second) of train audio data .')
    args = parser.parse_args()

    print('GPU: {}'.format(args.gpu))
    print('# Minibatch-size: {}'.format(args.batchsize))
    print('# epoch: {}'.format(args.epoch))
    print('')

    args.out = os.path.join(args.out, datetime.now().strftime("%Y%m%d_%H%M%S"))
    util.audio_dataset_second = args.audio_dataset_second
    if args.batchsize > 1:
        assert util.audio_dataset_second != None, "when minibatch training (e.g. --batchsize > 1), --audio_dataset_second option is required."

    # Set up a neural network to train
    enc = Encoder(in_ch=2)
    dec = Decoder(out_ch=2)
    dis = Discriminator(in_ch=2, out_ch=2)

    if args.gpu >= 0:
        chainer.cuda.get_device(args.gpu).use()  # Make a specified GPU current
        enc.to_gpu()  # Copy the model to the GPU
        dec.to_gpu()
        dis.to_gpu()

    # Setup an optimizer
    def make_optimizer(model, alpha=0.0002, beta1=0.5):
        optimizer = chainer.optimizers.Adam(alpha=alpha, beta1=beta1)
        optimizer.setup(model)
        optimizer.add_hook(chainer.optimizer.WeightDecay(0.00001), 'hook_dec')
        return optimizer

    opt_enc = make_optimizer(enc)
    opt_dec = make_optimizer(dec)
    opt_dis = make_optimizer(dis)

    train_d = Vp2pDataset(args.dataset + "/train")
    test_d = Vp2pDataset(args.dataset + "/test")
    train_iter = chainer.iterators.MultiprocessIterator(
        train_d,
        args.batchsize,
        n_processes=args.n_processes,
        shared_mem=args.shared_mem)
    test_iter = chainer.iterators.MultiprocessIterator(
        test_d,
        args.batchsize,
        n_processes=args.n_processes,
        shared_mem=args.shared_mem)
    # train_iter = chainer.iterators.MultiprocessIterator(train_d, args.batchsize)
    # test_iter = chainer.iterators.MultiprocessIterator(test_d, args.batchsize)

    # Set up a trainer
    updater = VoiceP2PUpdater(models=(enc, dec, dis),
                              iterator={
                                  'main': train_iter,
                                  'test': test_iter
                              },
                              optimizer={
                                  'enc': opt_enc,
                                  'dec': opt_dec,
                                  'dis': opt_dis
                              },
                              device=args.gpu)
    trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.out)

    snapshot_interval = (args.snapshot_interval, 'iteration')
    display_interval = (args.display_interval, 'iteration')
    trainer.extend(
        extensions.snapshot(filename='snapshot_iter_{.updater.iteration}.npz'),
        trigger=snapshot_interval)
    trainer.extend(extensions.snapshot_object(
        enc, 'enc_iter_{.updater.iteration}.npz'),
                   trigger=snapshot_interval)
    trainer.extend(extensions.snapshot_object(
        dec, 'dec_iter_{.updater.iteration}.npz'),
                   trigger=snapshot_interval)
    trainer.extend(extensions.snapshot_object(
        dis, 'dis_iter_{.updater.iteration}.npz'),
                   trigger=snapshot_interval)
    trainer.extend(extensions.LogReport(trigger=display_interval))
    trainer.extend(extensions.PrintReport([
        'epoch',
        'iteration',
        'enc/loss',
        'dec/loss',
        'dis/loss',
    ]),
                   trigger=display_interval)
    trainer.extend(extensions.ProgressBar(update_interval=10))
    trainer.extend(out_image(updater, enc, dec, 5, 5, args.seed, args.out),
                   trigger=snapshot_interval)

    if args.resume:
        # Resume from a snapshot
        chainer.serializers.load_npz(args.resume, trainer)

    # Run the training
    trainer.run()
示例#15
0
def main():
    parser = argparse.ArgumentParser(
        description="chainer implementation of Unet")
    parser.add_argument("--batchsize",
                        "-b",
                        type=int,
                        default=1,
                        help="Number of images in each mini-batch")
    parser.add_argument("--epoch", "-e", type=int, default=200, help="epoch")
    parser.add_argument("--gpu", "-g", type=int, default=-1, help="GPU ID")
    parser.add_argument("--dataset",
                        "-i",
                        default="./train/",
                        help="Directory of image files")
    parser.add_argument("--out",
                        "-o",
                        default="result/",
                        help="Directory to output the result")
    parser.add_argument('--resume',
                        '-r',
                        default='',
                        help='Resume the training from snapshot')
    parser.add_argument('--seed', type=int, default=0, help='Random seed')
    parser.add_argument('--snapshot_interval',
                        type=int,
                        default=1000,
                        help='Interval of snapshot')
    parser.add_argument('--display_interval',
                        type=int,
                        default=100,
                        help='Interval of displaying log to console')
    args = parser.parse_args()

    print('GPU: {}'.format(args.gpu))
    print('# Minibatch-size: {}'.format(args.batchsize))
    print('# epoch: {}'.format(args.epoch))
    print('')

    # Only Encoder Decoder with Unet
    enc = Encoder(in_ch=3)  # in_ch => 3(YCbCr)
    dec = Decoder(out_ch=3)  # out_ch => 3(DCT)

    # GPU set up
    if args.gpu >= 0:
        chainer.cuda.get_device(args.gpu).use()
        enc.to_gpu()
        dec.to_gpu()

    # Setup an optimizer
    def make_optimizer(model, alpha=0.0002, beta1=0.5):
        optimizer = chainer.optimizers.Adam(alpha=alpha, beta1=beta1)
        optimizer.setup(model)
        optimizer.add_hook(chainer.optimizer.WeightDecay(0.00001), 'hook_dec')
        return optimizer

    opt_enc = make_optimizer(enc)
    opt_dec = make_optimizer(dec)

    train_d = ImgDCTDataset(args.dataset, data_range=(0, 1000))
    test_d = ImgDCTDataset(args.dataset, data_range=(1000, 2000))
    train_iter = chainer.iterators.SerialIterator(train_d, args.batchsize)
    test_iter = chainer.iterators.SerialIterator(test_d, args.batchsize)

    updater = FacadeUpdater(models=(enc, dec),
                            iterator={
                                'main': train_iter,
                                'test': test_iter
                            },
                            optimizer={
                                'enc': opt_enc,
                                'dec': opt_dec
                            },
                            device=args.gpu)
    trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.out)

    snapshot_interval = (args.snapshot_interval, 'iteration')
    display_interval = (args.display_interval, 'iteration')
    trainer.extend(
        extensions.snapshot(filename='snapshot_iter_{.updater.iteration}.npz'),
        trigger=snapshot_interval)
    trainer.extend(extensions.snapshot_object(
        enc, 'enc_iter_{.updater.iteration}.npz'),
                   trigger=snapshot_interval)
    trainer.extend(extensions.snapshot_object(
        dec, 'dec_iter_{.updater.iteration}.npz'),
                   trigger=snapshot_interval)
    # trainer.extend(extensions.snapshot_object(
    #     dis, 'dis_iter_{.updater.iteration}.npz'), trigger=snapshot_interval)
    trainer.extend(extensions.LogReport(trigger=display_interval))
    trainer.extend(extensions.PrintReport([
        'epoch',
        'iteration',
        'enc/loss',
        'dec/loss',
    ]),
                   trigger=display_interval)
    trainer.extend(extensions.ProgressBar(update_interval=10))

    if args.resume:
        # Resume from a snapshot
        chainer.serializers.load_npz(args.resume, trainer)

    # Run the training
    trainer.run()
def main():
    attr_columns = [
        '5_o_Clock_Shadow', 'Arched_Eyebrows', 'Attractive', 'Bags_Under_Eyes',
        'Bald', 'Bangs', 'Big_Lips', 'Big_Nose', 'Black_Hair', 'Blond_Hair',
        'Blurry', 'Brown_Hair', 'Bushy_Eyebrows', 'Chubby', 'Double_Chin',
        'Eyeglasses', 'Goatee', 'Gray_Hair', 'Heavy_Makeup', 'High_Cheekbones',
        'Male', 'Mouth_Slightly_Open', 'Mustache', 'Narrow_Eyes', 'No_Beard',
        'Oval_Face', 'Pale_Skin', 'Pointy_Nose', 'Receding_Hairline',
        'Rosy_Cheeks', 'Sideburns', 'Smiling', 'Straight_Hair', 'Wavy_Hair',
        'Wearing_Earrings', 'Wearing_Hat', 'Wearing_Lipstick',
        'Wearing_Necklace', 'Wearing_Necktie', 'Young'
    ]

    parser = argparse.ArgumentParser(description='Get Attribute Vector')
    parser.add_argument('--batchsize',
                        '-b',
                        type=int,
                        default=512,
                        help='Number of images in each mini-batch')
    parser.add_argument('--gpu',
                        '-g',
                        type=int,
                        default=-1,
                        help='GPU ID (negative value indicates CPU)')
    parser.add_argument('--dataset',
                        '-i',
                        default='data/celebA/',
                        help='Directory of image files.')
    parser.add_argument('--attr_list',
                        '-a',
                        default='data/list_attr_celeba.txt')
    parser.add_argument('--get_attr',
                        default='all',
                        nargs='+',
                        choices=attr_columns + ['all'])
    parser.add_argument('--outfile', '-o', default='attr_vec.json')
    parser.add_argument('--enc', default='pre-trained/enc_iter_310000.npz')
    args = parser.parse_args()

    enc = Encoder()
    npz.load_npz(args.enc, enc)
    if args.gpu >= 0:
        chainer.cuda.get_device_from_id(args.gpu).use()
        enc.to_gpu()

    all_files = os.listdir(args.dataset)
    image_files = [f for f in all_files if ('png' in f or 'jpg' in f)]

    vectors = {}
    attr_df = pd.read_csv(args.attr_list, delim_whitespace=True, header=1)
    if args.get_attr == 'all':
        args.get_attr = attr_columns
    for attr_name in tqdm(list(set(args.get_attr) & set(attr_df.columns))):
        with_attr_files = attr_df[attr_df[attr_name] == 1].index.tolist()
        with_attr_files = list(set(with_attr_files) & set(image_files))
        with_attr_vec = get_vector(enc, with_attr_files, args)

        without_attr_files = attr_df[attr_df[attr_name] != 1].index.tolist()
        without_attr_files = list(set(without_attr_files) & set(image_files))
        without_attr_vec = get_vector(enc, without_attr_files, args)

        vectors[attr_name] = (with_attr_vec - without_attr_vec).tolist()

    with open(args.outfile, 'w') as f:
        f.write(
            json.dumps(vectors,
                       indent=4,
                       sort_keys=True,
                       separators=(',', ': ')))