コード例 #1
0
ファイル: factory.py プロジェクト: ljdmaster/Det
def _register():
    __sets['VGGnet_train'] = networks.VGG16(is_train=True)
    __sets['VGGnet_test'] = networks.VGG16(is_train=False)
    __sets['Resnet50_train'] = networks.Resnet50(is_train=True)
    __sets['Resnet50_test'] = networks.Resnet50(is_train=False)
    __sets['Resnet101_train'] = networks.Resnet101(is_train=True)
    __sets['Resnet101_test'] = networks.Resnet101(is_train=False)
    __sets['MobilenetV1_train'] = networks.MobilenetV1(is_train=True)
    __sets['MobilenetV1_test'] = networks.MobilenetV1(is_train=False)
    __sets['PVAnet_train'] = networks.PVAnet(is_train=True)
    __sets['PVAnet_test'] = networks.PVAnet(is_train=False)
コード例 #2
0
ファイル: factory.py プロジェクト: nuanyang123/SSH-TensorFlow
def _register():
    __sets['VGGnet_train'] = networks.VGG16(is_train=True)
    __sets['VGGnet_test'] = networks.VGG16(is_train=False)
コード例 #3
0
ファイル: train_content_loss.py プロジェクト: shimo8810/STVSR
def main():
    '''
    main function, start point
    '''
    # 引数関連
    parser = argparse.ArgumentParser()
    parser.add_argument('--batchsize',
                        '-b',
                        type=int,
                        default=128,
                        help='Number of images in each mini-batch')
    parser.add_argument('--learnrate',
                        '-l',
                        type=float,
                        default=0.001,
                        help='Learning rate for SGD')
    parser.add_argument('--epoch',
                        '-e',
                        type=int,
                        default=100,
                        help='Number of sweeps over the dataset to train')
    parser.add_argument('--gpu',
                        '-g',
                        type=int,
                        default=0,
                        help='GPU ID (negative value indicates CPU)')
    parser.add_argument('--resume',
                        '-r',
                        default='',
                        help='Resume the training from snapshot')
    parser.add_argument('--iter_parallel',
                        '-p',
                        action='store_true',
                        default=False,
                        help='filter(kernel) sizes')
    parser.add_argument('--opt',
                        '-o',
                        type=str,
                        choices=('adam', 'sgd'),
                        default='adam')
    args = parser.parse_args()

    # parameter出力
    print("-=Learning Parameter=-")
    print("# Max Epochs: {}".format(args.epoch))
    print("# Batch Size: {}".format(args.batchsize))
    print("# Learning Rate: {}".format(args.learnrate))
    print("# Optimizer Method: {}".format(args.opt))
    print('# Train Dataet: General 100')
    if args.iter_parallel:
        print("# Data Iters that loads in Parallel")
    print("\n")

    # 保存ディレクトリ
    # save didrectory
    outdir = path.join(
        ROOT_PATH,
        'results/FI/AEFINet/AEFINet_ch6_fsize5_VGG_content_loss_opt_{}'.format(
            args.opt))
    if not path.exists(outdir):
        os.makedirs(outdir)
    with open(path.join(outdir, 'arg_param.txt'), 'w') as f:
        for k, v in args.__dict__.items():
            f.write('{}:{}\n'.format(k, v))

    print('# loading dataet(General100_train, General100_test) ...')
    if args.iter_parallel:
        train = SequenceDataset(dataset='train')
        test = SequenceDataset(dataset='test')
    else:
        train = SequenceDatasetOnMem(dataset='train')
        test = SequenceDatasetOnMem(dataset='test')

# prepare model
    vgg16 = N.VGG16()
    if args.gpu >= 0:
        chainer.cuda.get_device_from_id(args.gpu).use()
        vgg16.to_gpu()
    chainer.serializers.load_npz(path.join(ROOT_PATH, 'models/VGG16.npz'),
                                 vgg16)
    model = N.VGG16Evaluator(N.AEFINet(ch=6, f_size=5), vgg16)
    if args.gpu >= 0:
        model.to_gpu()

    # setup optimizer
    if args.opt == 'adam':
        optimizer = chainer.optimizers.Adam(alpha=args.learnrate)
    elif args.opt == 'sgd':
        optimizer = chainer.optimizers.MomentumSGD(lr=args.learnrate,
                                                   momentum=0.9)
    optimizer.setup(model)
    optimizer.add_hook(chainer.optimizer.WeightDecay(0.0001))

    # setup iter
    if args.iter_parallel:
        train_iter = chainer.iterators.MultiprocessIterator(train,
                                                            args.batchsize,
                                                            n_processes=8)
        test_iter = chainer.iterators.MultiprocessIterator(test,
                                                           args.batchsize,
                                                           repeat=False,
                                                           shuffle=False,
                                                           n_processes=8)
    else:
        train_iter = chainer.iterators.SerialIterator(train, args.batchsize)
        test_iter = chainer.iterators.SerialIterator(test,
                                                     args.batchsize,
                                                     repeat=False,
                                                     shuffle=False)

    # setup trainer
    updater = training.StandardUpdater(train_iter, optimizer, device=args.gpu)
    trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=outdir)

    # # eval test data
    trainer.extend(extensions.Evaluator(test_iter, model, device=args.gpu))
    # dump loss graph
    trainer.extend(extensions.dump_graph('main/loss'))
    # lr shift
    if args.opt == 'sgd':
        trainer.extend(extensions.ExponentialShift("lr", 0.1),
                       trigger=(100, 'epoch'))
    if args.opt == 'adam':
        trainer.extend(extensions.ExponentialShift("alpha", 0.1),
                       trigger=(50, 'epoch'))
    # save snapshot
    trainer.extend(extensions.snapshot(), trigger=(10, 'epoch'))
    trainer.extend(extensions.snapshot_object(
        model, 'model_snapshot_{.updater.epoch}'),
                   trigger=(10, 'epoch'))
    # log report
    trainer.extend(extensions.LogReport())
    trainer.extend(extensions.observe_lr(), trigger=(1, 'epoch'))
    #  plot loss graph
    trainer.extend(
        extensions.PlotReport(['main/loss', 'validation/main/loss'],
                              'epoch',
                              file_name='loss.png'))
    # plot acc graph
    trainer.extend(
        extensions.PlotReport(['main/PSNR', 'validation/main/PSNR'],
                              'epoch',
                              file_name='PSNR.png'))
    # print info
    trainer.extend(
        extensions.PrintReport([
            'epoch', 'main/loss', 'validation/main/loss', 'main/loss_mse',
            'main/loss_cont', 'main/PSNR', 'validation/main/PSNR', 'lr',
            'elapsed_time'
        ]))
    # print progbar
    trainer.extend(extensions.ProgressBar())

    trainer.run()
コード例 #4
0
def main():
    global lossvals, best_acc1
    opt = options_direct_inference.generate_parser()

    start_epoch = 0
    im_dim = 224

    if opt.seed is not None:
        random.seed(opt.seed)
        torch.manual_seed(opt.seed)
        cudnn.deterministic = True

    model = networks.VGG16(num_classes=2, batch_norm=True)
    model = model.cuda(opt.gpu)

    criterion = nn.CrossEntropyLoss().cuda(opt.gpu)

    optimizer = torch.optim.SGD(model.parameters(), opt.lr,
                                momentum=opt.momentum,
                                weight_decay=opt.weight_decay)

    # create checkpoints folder
    if not os.path.exists(opt.checkpoints_dir):
        os.mkdir(opt.checkpoints_dir)

    lossvals = dict()
    lossvals['train'] = opt.epochs*[0]
    lossvals['train_class0'] = opt.epochs * [0]
    lossvals['train_class1'] = opt.epochs * [0]
    lossvals['val'] = opt.epochs * [0]
    lossvals['val_class0'] = opt.epochs * [0]
    lossvals['val_class1'] = opt.epochs * [0]

    # optionally resume from a checkpoint
    if opt.resume_epoch is not None:
        resume_filename = os.path.join(opt.checkpoints_dir, 'checkpoint_epoch%d.pth.tar' % opt.resume_epoch)
        if os.path.isfile(resume_filename):
            print("=> loading checkpoint '{}'".format(resume_filename))
            checkpoint = torch.load(resume_filename, map_location=lambda storage, loc: storage.cuda(0))
            print('Done with torch.load')
            start_epoch = checkpoint['epoch']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            curr_lr = optimizer.param_groups[0]['lr']
            #lossvals = np.load(os.path.join(opt.checkpoints_dir, 'lossvals.npy'))
            print("=> loaded checkpoint '{}' (epoch {})".format(resume_filename, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(resume_filename))

    cudnn.benchmark = True

    normalize = transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
    # Load training data
    train_transform = transforms.Compose([
        transforms.RandomResizedCrop(im_dim),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        normalize,
    ])
    train_dataset = data.DatasetFromMultipleFilenames(opt,
                                                      [opt.dataA_dir, opt.dataB_dir],
                                                      [opt.filenamesA, opt.filenamesB],
                                                      train_transform)

    # Load validation data
    val_transform = transforms.Compose([
        transforms.Resize(im_dim+32),
        transforms.CenterCrop(im_dim),
        transforms.ToTensor(),
        normalize,
    ])
    val_dataset = data.DatasetFromMultipleFilenames(opt,
                                                    [opt.dataA_dir, opt.dataB_dir],
                                                    [opt.filenamesA_val, opt.filenamesB_val],
                                                    val_transform)

    train_loader = torch.utils.data.DataLoader(
        train_dataset, batch_size=opt.batch_size, shuffle=True, num_workers=opt.num_threads, pin_memory=True)

    val_loader = torch.utils.data.DataLoader(
        val_dataset, batch_size=opt.batch_size, shuffle=False, num_workers=opt.num_threads, pin_memory=True)

    if opt.evaluate:
        validate(val_loader, model, criterion, opt)
        return

    for epoch in range(start_epoch, opt.epochs):

        adjust_learning_rate(optimizer, epoch, opt)

        # train for one epoch
        train(train_loader, model, criterion, optimizer, epoch, opt)

        # evaluate on validation set
        acc1 = validate(val_loader, model, criterion, epoch, opt)

        # remember best acc@1 and save checkpoint
        is_best = acc1 > best_acc1
        best_acc1 = max(acc1, best_acc1)
        np.save(os.path.join(opt.checkpoints_dir, 'lossvals.npy'), lossvals)
        save_checkpoint({
            'epoch': epoch + 1,
            'arch': opt.arch,
            'state_dict': model.state_dict(),
            'best_acc1': best_acc1,
            'optimizer': optimizer.state_dict(),
        }, is_best)