コード例 #1
0
ファイル: forward_FI.py プロジェクト: shimo8810/STVSR
def main():
    '''
    main function, start point
    '''
    # 引数関連
    parser = argparse.ArgumentParser()
    parser.add_argument('--batchsize',
                        '-b',
                        type=int,
                        default=128,
                        help='Number of images in each mini-batch')
    parser.add_argument('--gpu',
                        '-g',
                        type=int,
                        default=0,
                        help='Number of images in each mini-batch')
    parser.add_argument('--model', '-m', type=str, help=('Using Model'))
    args = parser.parse_args()

    # parameter出力

    # 保存ディレクトリ
    # save didrectory

    # prepare model
    # 個々でネットワークを帰る
    model = N.GenEvaluator(N.AEFINetConcat(ch=4, f_size=5))
    if args.gpu >= 0:
        chainer.cuda.get_device_from_id(args.gpu).use()
        model.to_gpu()

    model_path = path.join(ROOT_PATH, 'models', args.model)
    chainer.serializers.load_npz(model_path, model)

    movie_img = 'acrobat_s01'

    img_list = sorted(os.listdir(path.join(FULL_PATH, movie_img)))
    h, w, c = io.imread(path.join(FULL_PATH, movie_img,
                                  img_list[0]))[:, :, :].shape
    h = 521
    w = 941

    # 保存用ディレクトリの作成
    save_path = path.join(ROOT_PATH, 'examples', 'result_acrobat_s01',
                          args.model.split('.')[0])
    if not path.exists(save_path):
        os.makedirs(save_path)

    for i in tqdm(range(0, len(img_list) - 1, 2)):
        img1_path = path.join(FULL_PATH, movie_img, img_list[i])
        img2_path = path.join(FULL_PATH, movie_img, img_list[i + 2])
        data = np.zeros((2, h, w, c)).astype(np.uint8)
        data[0] = io.imread(img1_path)[:h, :w, :]
        data[1] = io.imread(img2_path)[:h, :w, :]
        data = data.transpose(0, 3, 1, 2).reshape(1, 2, c, h, w).astype(
            np.float32) / 255.0
        data = cp.array(data)
        with chainer.using_config('train', False):
            with chainer.using_config('enable_backprop', False):
                data = model.generator(data)
        data = chainer.cuda.to_cpu(data.data) * 255
        data = np.clip(data, 0, 255)
        data = data.astype(np.uint8)
        data = data.reshape(c, h, w).transpose(1, 2, 0)
        name, _ = img_list[i].split('.')
        img1 = io.imread(img1_path)[:h, :w, :]
        io.imsave(
            path.join(save_path, '{:}_fi_{:04d}.bmp'.format(movie_img, i + 1)),
            img1)
        io.imsave(
            path.join(save_path, '{:}_fi_{:04d}.bmp'.format(movie_img, i + 2)),
            data)
コード例 #2
0
ファイル: train_AESR.py プロジェクト: shimo8810/STVSR
def main():
    '''
    main function, start point
    '''
    # 引数関連
    parser = argparse.ArgumentParser()
    parser.add_argument('--batchsize',
                        '-b',
                        type=int,
                        default=128,
                        help='Number of images in each mini-batch')
    parser.add_argument('--learnrate',
                        '-l',
                        type=float,
                        default=0.001,
                        help='Learning rate for SGD')
    parser.add_argument('--epoch',
                        '-e',
                        type=int,
                        default=300,
                        help='Number of sweeps over the dataset to train')
    parser.add_argument('--gpu',
                        '-g',
                        type=int,
                        default=0,
                        help='GPU ID (negative value indicates CPU)')
    parser.add_argument('--resume',
                        '-r',
                        default='',
                        help='Resume the training from snapshot')
    parser.add_argument('--iter_parallel',
                        action='store_true',
                        default=False,
                        help='filter(kernel) sizes')
    parser.add_argument('--opt',
                        '-o',
                        default='sgd',
                        help='Resume the training from snapshot')
    parser.add_argument('--fsize',
                        '-f',
                        default=3,
                        type=int,
                        help='Resume the training from snapshot')
    parser.add_argument('--ch',
                        '-c',
                        default=1,
                        type=int,
                        help='Resume the training from snapshot')
    args = parser.parse_args()

    # parameter出力
    print("-=Learning Parameter=-")
    print("# Max Epochs: {}".format(args.epoch))
    print("# Batch Size: {}".format(args.batchsize))
    print("# Learning Rate: {}".format(args.learnrate))
    print('# Train Dataet: General 100')
    print('# Test Dataet: Set 14')
    if args.iter_parallel:
        print("# Data Iters that loads in Parallel")
    print("\n")

    # 保存ディレクトリ
    # save didrectory
    model_dir_name = 'AESR_opt_{}_fs_{}_ch_{}'.format(args.opt, args.fsize,
                                                      args.ch)
    outdir = path.join(ROOT_PATH, 'results', model_dir_name)
    if not path.exists(outdir):
        os.makedirs(outdir)
    with open(path.join(outdir, 'arg_param.txt'), 'w') as f:
        for k, v in args.__dict__.items():
            f.write('{}:{}\n'.format(k, v))

    print('# loading dataet(General100, Set14) ...')
    train, test = load_dataset()

    # prepare model
    model = N.GenEvaluator(N.AESR(f_size=args.fsize, ch=args.ch))
    if args.gpu >= 0:
        chainer.cuda.get_device_from_id(args.gpu).use()
        model.to_gpu()

    # setup optimizer
    if args.opt == 'sgd':
        optimizer = chainer.optimizers.MomentumSGD(lr=args.learnrate,
                                                   momentum=0.9)
        optimizer.setup(model)
        optimizer.add_hook(chainer.optimizer.WeightDecay(0.0001))
        optimizer.add_hook(chainer.optimizer.GradientClipping(0.1))
    elif args.opt == 'adam':
        optimizer = chainer.optimizers.Adam(alpha=args.learnrate)
        optimizer.setup(model)
        optimizer.add_hook(chainer.optimizer.WeightDecay(0.0001))

    # setup iter
    if args.iter_parallel:
        train_iter = chainer.iterators.MultiprocessIterator(train,
                                                            args.batchsize,
                                                            n_processes=8)
        test_iter = chainer.iterators.MultiprocessIterator(test,
                                                           args.batchsize,
                                                           repeat=False,
                                                           shuffle=False,
                                                           n_processes=8)
    else:
        train_iter = chainer.iterators.SerialIterator(train, args.batchsize)
        test_iter = chainer.iterators.SerialIterator(test,
                                                     args.batchsize,
                                                     repeat=False,
                                                     shuffle=False)

    # setup trainer
    updater = training.StandardUpdater(train_iter, optimizer, device=args.gpu)
    trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=outdir)

    # eval test data
    trainer.extend(extensions.Evaluator(test_iter, model, device=args.gpu))
    # dump loss graph
    trainer.extend(extensions.dump_graph('main/loss'))
    # lr shift
    if args.opt == 'sgd':
        trainer.extend(extensions.ExponentialShift("lr", 0.1),
                       trigger=(100, 'epoch'))
    elif args.opt == 'adam':
        trainer.extend(extensions.ExponentialShift("alpha", 0.1),
                       trigger=(100, 'epoch'))
    # save snapshot
    trainer.extend(extensions.snapshot(), trigger=(10, 'epoch'))
    trainer.extend(extensions.snapshot_object(
        model, 'model_snapshot_{.updater.epoch}'),
                   trigger=(10, 'epoch'))
    # log report
    trainer.extend(extensions.LogReport())
    trainer.extend(extensions.observe_lr(), trigger=(1, 'epoch'))
    #  plot loss graph
    trainer.extend(
        extensions.PlotReport(['main/loss', 'validation/main/loss'],
                              'epoch',
                              file_name='loss.png'))
    # plot acc graph
    trainer.extend(
        extensions.PlotReport(['main/PSNR', 'validation/main/PSNR'],
                              'epoch',
                              file_name='PSNR.png'))
    # print info
    trainer.extend(
        extensions.PrintReport([
            'epoch', 'main/loss', 'validation/main/loss', 'main/PSNR',
            'validation/main/PSNR', 'lr', 'elapsed_time'
        ]))
    # print progbar
    trainer.extend(extensions.ProgressBar())

    trainer.run()

    # save final model
    model_outdir = path.join(ROOT_PATH, 'models', model_dir_name)
    if not path.exists(model_outdir):
        os.makedirs(model_outdir)
    model_name = 'AESR_opt_{}_ch_{}_fsize_{}.npz'.format(
        args.opt, args.ch, args.fsize)
    chainer.serializers.save_npz(path.join(model_outdir, model_name), model)

    model_parameter = {
        'name': 'AESR',
        'parameter': {
            'f_size': args.fsize,
            'ch': args.ch
        }
    }
    with open(path.join(model_outdir, 'model_parameter.json'), 'w') as f:
        json.dump(model_parameter, f)
コード例 #3
0
ファイル: train_aefinet_test.py プロジェクト: shimo8810/STVSR
def main():
    '''
    main function, start point
    '''
    # 引数関連
    parser = argparse.ArgumentParser()
    parser.add_argument('--batchsize',
                        '-b',
                        type=int,
                        default=128,
                        help='Number of images in each mini-batch')
    parser.add_argument('--learnrate',
                        '-l',
                        type=float,
                        default=0.001,
                        help='Learning rate for SGD')
    parser.add_argument('--epoch',
                        '-e',
                        type=int,
                        default=100,
                        help='Number of sweeps over the dataset to train')
    parser.add_argument('--gpu0',
                        '-g',
                        type=int,
                        default=0,
                        help='GPU1 ID (negative value indicates CPU)')
    parser.add_argument('--gpu1',
                        '-G',
                        type=int,
                        default=2,
                        help='GPU2 ID (negative value indicates CPU)')
    parser.add_argument('--resume',
                        '-r',
                        default='',
                        help='Resume the training from snapshot')
    parser.add_argument('--iter_parallel',
                        '-p',
                        action='store_true',
                        default=False,
                        help='loading dataset from disk')
    parser.add_argument('--opt',
                        '-o',
                        type=str,
                        choices=('adam', 'sgd'),
                        default='adam')
    parser.add_argument('--fsize', '-f', type=int, default=5)
    parser.add_argument('--ch', '-c', type=int, default=4)
    args = parser.parse_args()

    # parameter出力
    print("-=Learning Parameter=-")
    print("# Max Epochs: {}".format(args.epoch))
    print("# Batch Size: {}".format(args.batchsize))
    print("# Learning Rate: {}".format(args.learnrate))
    print("# Optimizer Method: {}".format(args.opt))
    print("# Filter Size: {}".format(args.fsize))
    print("# Channel Scale: {}".format(args.ch))
    print('# Train Dataet: General 100')
    if args.iter_parallel:
        print("# Data Iters that loads in Parallel")
    print("\n")

    # 保存ディレクトリ
    # make result dir
    network_name = 'AEFINetConcat'
    model_name = 'AEFINet_Test_opt_{}_ch_{}_fsize_{}'.format(
        args.opt, args.ch, args.fsize)
    outdir = path.join(ROOT_PATH, 'results', 'FI', 'AEFINet', model_name)
    util.make_result_dir(args, outdir)

    #loading dataset
    if args.iter_parallel:
        train = datasets.SequenceDataset(
            dataset='UCF101_train_size64_frame3_group10_max100_p')
        test = datasets.SequenceDataset(
            dataset='UCF101_test_size64_frame3_group25_max5_p')
    else:
        train = datasets.SequenceDatasetOnMem(
            dataset='UCF101_train_size64_frame3_group10_max100_p')
        test = datasets.SequenceDatasetOnMem(
            dataset='UCF101_test_size64_frame3_group25_max5_p')

# prepare model
    chainer.cuda.get_device_from_id(args.gpu0).use()
    model = N.GenEvaluator(N.AEFINetConcat(f_size=args.fsize, ch=args.ch))

    # setup optimizer
    if args.opt == 'adam':
        optimizer = chainer.optimizers.Adam(alpha=args.learnrate)
    elif args.opt == 'sgd':
        optimizer = chainer.optimizers.MomentumSGD(lr=args.learnrate,
                                                   momentum=0.9)
    optimizer.setup(model)
    optimizer.add_hook(chainer.optimizer.WeightDecay(0.0001))

    # setup iter
    if args.iter_parallel:
        train_iter = chainer.iterators.MultiprocessIterator(train,
                                                            args.batchsize,
                                                            n_processes=8)
        test_iter = chainer.iterators.MultiprocessIterator(test,
                                                           args.batchsize,
                                                           repeat=False,
                                                           shuffle=False,
                                                           n_processes=8)
    else:
        train_iter = chainer.iterators.SerialIterator(train, args.batchsize)
        test_iter = chainer.iterators.SerialIterator(test,
                                                     args.batchsize,
                                                     repeat=False,
                                                     shuffle=False)

    # setup trainer
    updater = training.ParallelUpdater(
        train_iter,
        optimizer,
        devices={
            'main': args.gpu0,
            'second': args.gpu1
        },
    )
    trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=outdir)

    # # eval test data
    trainer.extend(extensions.Evaluator(test_iter, model, device=args.gpu0))
    # dump loss graph
    trainer.extend(extensions.dump_graph('main/loss'))
    # lr shift
    if args.opt == 'sgd':
        trainer.extend(extensions.ExponentialShift("lr", 0.1),
                       trigger=(100, 'epoch'))
    elif args.opt == 'adam':
        trainer.extend(extensions.ExponentialShift("alpha", 0.1),
                       trigger=(100, 'epoch'))
    # save snapshot
    trainer.extend(extensions.snapshot(), trigger=(10, 'epoch'))
    trainer.extend(extensions.snapshot_object(
        model, 'model_snapshot_{.updater.epoch}'),
                   trigger=(10, 'epoch'))
    # log report
    trainer.extend(extensions.LogReport())
    trainer.extend(extensions.observe_lr(), trigger=(1, 'epoch'))
    #  plot loss graph
    trainer.extend(
        extensions.PlotReport(['main/loss', 'validation/main/loss'],
                              'epoch',
                              file_name='loss.png'))
    # plot acc graph
    trainer.extend(
        extensions.PlotReport(['main/PSNR', 'validation/main/PSNR'],
                              'epoch',
                              file_name='PSNR.png'))
    # print info
    trainer.extend(
        extensions.PrintReport([
            'epoch', 'main/loss', 'validation/main/loss', 'main/PSNR',
            'validation/main/PSNR', 'lr', 'elapsed_time'
        ]))
    # print progbar
    trainer.extend(extensions.ProgressBar())

    # [ChainerUI] enable to send commands from ChainerUI
    trainer.extend(CommandsExtension())
    # [ChainerUI] save 'args' to show experimental conditions
    save_args(args, outdir)

    if args.resume:
        # Resume from a snapshot
        chainer.serializers.load_npz(args.resume, trainer)

    # start train
    trainer.run()

    # save final model
    util.save_trained_model(model_name,
                            model,
                            network_name,
                            f_size=args.fsize,
                            ch=args.ch)