Exemplo n.º 1
0
def main():
    print('===> Loading datasets')
    test_set = get_eval_set(opt.data_dir, opt.test_dir, opt.sr_upscale_factor,
                            opt.num_classes)
    test_loader = DataLoader(dataset=test_set,
                             num_workers=opt.threads,
                             batch_size=1,
                             shuffle=False)

    print('Building SR model ', opt.sr_model_name)
    if opt.sr_model_name == 'DBPN':
        sr_model = DBPN(num_channels=3,
                        base_filter=64,
                        feat=256,
                        num_stages=7,
                        scale_factor=opt.sr_upscale_factor)
        sr_model = torch.nn.DataParallel(sr_model, device_ids=gpus_list)
        model_name = os.path.join(opt.models_dir, exp_name, opt.sr_model)
        print(model_name)
        sr_model.load_state_dict(
            torch.load(model_name, map_location=lambda storage, loc: storage))
        print('Pre-trained SR model is loaded.')
    else:
        sys.exit('Invalid SR network')

    print('Building SemSeg model', opt.seg_model_name)
    if opt.seg_model_name == 'segnet':
        seg_model = segnet(num_classes=opt.num_classes, in_channels=3)
        seg_model = torch.nn.DataParallel(seg_model, device_ids=gpus_list)
        model_name = os.path.join(opt.models_dir, exp_name, opt.seg_model)
        print(model_name)
        seg_model.load_state_dict(torch.load(model_name))
        print('Pre-trained SemSeg model is loaded.')
    else:
        sys.exit('Invalid Semantic segmentation network')

    if cuda:
        sr_model = sr_model.cuda(gpus_list[0])
        seg_model = seg_model.cuda(gpus_list[0])

    check_mkdir(os.path.join('Results'))
    check_mkdir(os.path.join('Results', exp_name))
    check_mkdir(os.path.join('Results', exp_name, 'segmentation'))
    check_mkdir(os.path.join('Results', exp_name, 'super-resolution'))
    check_mkdir(os.path.join('heat_maps'))
    check_mkdir(os.path.join('heat_maps', exp_name))

    test(test_loader, sr_model, seg_model)
Exemplo n.º 2
0
def main():
    sys.stdout = Logger(os.path.join(opt.save_test_log,'test_'+systime+'.txt'))
    if not torch.cuda.is_available():
        raise Exception('No Gpu found, please run with gpu')
    else:
        use_gpu = torch.cuda.is_available()
    if use_gpu:
        cudnn.benchmark = False
        torch.cuda.manual_seed(opt.seed)
    pin_memory = True if use_gpu else False 
    n_c = 128
    n_b = opt.num_layers
    rrn = RRN(opt.scale, n_c, n_b) # initial filter generate network
    print(rrn)
    print("Model size: {:.5f}M".format(sum(p.numel() for p in rrn.parameters())*4/1048576))
    print('===> {}L model has been initialized'.format(n_b))
    rrn = torch.nn.DataParallel(rrn, device_ids=gpus_list)
    print('===> load pretrained model')
    if os.path.isfile('./model/RRN-{}L.pth'.format(n_b)):
        rrn.load_state_dict(torch.load('./model/RRN-{}L.pth'.format(n_b), map_location=lambda storage, loc: storage))
        print('===> pretrained model is load')
    else:
        raise Exception('pretrain model is not exists')
    if use_gpu:
        rrn = rrn.cuda(gpus_list[0])

    print('===> Loading test Datasets')
    
    #scene_list = os.listdir(opt.test_dir)
    scene_list = [opt.scene_name]
    
    for scene_name in scene_list:
        test_set = get_eval_set(opt.test_dir, opt.scale, scene_name)
        test_loader = DataLoader(dataset=test_set, num_workers=opt.threads, batch_size=opt.testbatchsize, shuffle=False, pin_memory=pin_memory, drop_last=False)
        print('===> DataLoading Finished')
        test(test_loader, rrn, opt.scale, scene_name, n_c)
Exemplo n.º 3
0
opt = parser.parse_args()

gpus_list = range(opt.gpus)
print(opt)

cuda = opt.gpu_mode
if cuda and not torch.cuda.is_available():
    raise Exception("No GPU found, please run without --cuda")

torch.manual_seed(opt.seed)
if cuda:
    torch.cuda.manual_seed(opt.seed)

print('===> Loading datasets')
test_set = get_eval_set(os.path.join(opt.input_dir, opt.test_dataset),
                        opt.upscale_factor)
testing_data_loader = DataLoader(dataset=test_set,
                                 num_workers=opt.threads,
                                 batch_size=opt.testBatchSize,
                                 shuffle=False)

print('===> Building model')
if opt.model_type == 'DBPNLL':
    model = DBPNLL(num_channels=3,
                   base_filter=64,
                   feat=256,
                   num_stages=10,
                   scale_factor=opt.upscale_factor)  ###D-DBPN
elif opt.model_type == 'DBPN-RES-MR64-3':
    model = DBPNITER(num_channels=3,
                     base_filter=64,
Exemplo n.º 4
0
opt = parser.parse_args()

gpus_list = range(opt.gpus)
print(opt)

cuda = opt.gpu_mode
if cuda and not torch.cuda.is_available():
    raise Exception("No GPU found, please run without --cuda")

torch.manual_seed(opt.seed)
if cuda:
    torch.cuda.manual_seed(opt.seed)

print('===> Loading datasets')
test_set = get_eval_set(os.path.join(opt.input_dir, opt.test_dataset),
                        os.path.join(opt.input_dir, opt.test_rgb_dataset))
testing_data_loader = DataLoader(dataset=test_set,
                                 num_workers=opt.threads,
                                 batch_size=opt.testBatchSize,
                                 shuffle=False)

print('===> Building model')
if opt.model_type == 'PMBAX8':
    model = PMBAX8(num_channels=1,
                   base_filter=64,
                   feat=256,
                   num_stages=3,
                   scale_factor=opt.upscale_factor)  ##For NTIRE2018
else:
    model = PMBAX8(base_filter=64,
                   feat=256,
Exemplo n.º 5
0
    parser.add_argument('--bicubic', default=False, type = bool, help='if yes, generate bicubic images')
    opt = parser.parse_args()

    gpus_list = range(opt.gpus)
    print(opt)

    cuda = opt.gpu_mode
    if cuda and not torch.cuda.is_available():
        raise Exception("No GPU found, please run without --cuda")

    torch.manual_seed(opt.seed)
    if cuda:
        torch.cuda.manual_seed(opt.seed)

    print('===> Loading datasets')
    test_set = get_eval_set(opt.input_dir+opt.test_dataset, opt.upscale_factor)
    testing_data_loader = DataLoader(dataset=test_set, num_workers=opt.threads, batch_size=opt.testBatchSize, shuffle=False)

    print('===> Building model')
    if opt.model_type == 'DBPNLL':
        model = DBPNLL(num_channels=3, base_filter=64, feat=256, num_stages=10,
                       scale_factor=opt.upscale_factor)  ###D-DBPN
    elif opt.model_type == 'DBPN-RES-MR64-3':
        model = DBPNITER(num_channels=3, base_filter=64, feat=256, num_stages=3,
                         scale_factor=opt.upscale_factor)  ###D-DBPN
    else:
        model = DBPN(num_channels=3, base_filter=64, feat=256, num_stages=7, scale_factor=opt.upscale_factor)  ###D-DBPN

    if cuda:
        model = torch.nn.DataParallel(model, device_ids=gpus_list)
Exemplo n.º 6
0
Arquivo: main.py Projeto: qynan/DASAA
parser.add_argument('--epochs',
                    type=int,
                    default=20000,
                    help='number of epochs to train')
parser.add_argument('--model_name', default='_DAVSR_5', help='model to select')

args = parser.parse_args()
gpu_lists = [0]
# os.environ["CUDA_VISIBLE_DEVICES"] = "0"
torch.manual_seed(123)
torch.cuda.manual_seed(123)

save_model_path = '/home/qyn/qyn-prog/DASR/models/' + args.model_name + '/'

print('===> Loading datasets')
eval_set = get_eval_set(args.dataDir, args.eval_file_list, 5)
eval_data_loader = torch.utils.data.DataLoader(dataset=eval_set,
                                               num_workers=args.nThreads,
                                               batch_size=1,
                                               shuffle=False)

train_set = get_training_set(args.dataDir, args.train_file_list, 5,
                             args.patchSize, args.scale, True)

training_data_loader = torch.utils.data.DataLoader(dataset=train_set,
                                                   num_workers=args.nThreads,
                                                   batch_size=args.batchSize,
                                                   shuffle=True)

model = DAVSR_5(args.nChannel, args.nFeat, args.scale)
model = nn.DataParallel(model, gpu_lists)
Exemplo n.º 7
0
def main():
    print('===> Loading datasets')
    train_set = get_training_set(opt.data_dir, opt.train_dir, opt.patch_size,
                                 opt.sr_patch_size, opt.sr_upscale_factor,
                                 opt.num_classes, opt.sr_data_augmentation)

    if opt.val_dir != None:
        val_set = get_eval_set(opt.data_dir, opt.val_dir,
                               opt.sr_upscale_factor, opt.num_classes)
        train_loader = DataLoader(dataset=train_set,
                                  num_workers=opt.threads,
                                  batch_size=opt.batch_size)
        val_loader = DataLoader(dataset=val_set,
                                num_workers=opt.threads,
                                batch_size=1)
    else:
        # Creating data indices for training and validation splits:
        validation_split = .2
        dataset_size = len(train_set)
        indices = list(range(dataset_size))
        split = int(np.floor(validation_split * dataset_size))
        np.random.seed(opt.seed)
        np.random.shuffle(indices)
        train_indices, val_indices = indices[split:], indices[:split]
        train_sampler = SubsetRandomSampler(train_indices)
        val_sampler = SubsetRandomSampler(val_indices)

        train_loader = DataLoader(dataset=train_set,
                                  num_workers=opt.threads,
                                  batch_size=opt.batch_size,
                                  sampler=train_sampler)
        val_loader = DataLoader(dataset=train_set,
                                num_workers=opt.threads,
                                batch_size=1,
                                sampler=val_sampler)

    print('Building SR model ', opt.sr_model_name)
    if opt.sr_model_name == 'DBPN':
        sr_model = DBPN(num_channels=3,
                        base_filter=64,
                        feat=256,
                        num_stages=7,
                        scale_factor=opt.sr_upscale_factor)
        sr_model = torch.nn.DataParallel(sr_model, device_ids=gpus_list)
        if opt.sr_pretrained:
            model_name = os.path.join(opt.save_folder +
                                      opt.sr_pretrained_model)
            print(model_name)
            sr_model.load_state_dict(
                torch.load(model_name,
                           map_location=lambda storage, loc: storage))
            print('Pre-trained SR model is loaded.')
    else:
        sys.exit('Invalid SR network')

    print('Building SemSeg model', opt.seg_model_name)

    if opt.seg_model_name == 'segnet':
        seg_model = segnet(num_classes=opt.num_classes, in_channels=3)
        if not opt.seg_pretrained:
            seg_model.init_vgg16_params()
            print('segnet params initialized')
            seg_model = torch.nn.DataParallel(seg_model, device_ids=gpus_list)
        if opt.seg_pretrained:
            model_name = os.path.join(opt.save_folder +
                                      opt.seg_pretrained_model)
            print(model_name)
            seg_model.load_state_dict(torch.load(model_name))
            print('Pre-trained SemSeg model is loaded.')
            seg_model = torch.nn.DataParallel(seg_model, device_ids=gpus_list)

    sr_criterion = nn.L1Loss()
    psnr_criterion = nn.MSELoss()
    if cuda:
        sr_model = sr_model.cuda(gpus_list[0])
        seg_model = seg_model.cuda(gpus_list[0])
        sr_criterion = sr_criterion.cuda(gpus_list[0])
        psnr_criterion = psnr_criterion.cuda(gpus_list[0])
    if 'grss' in opt.data_dir:
        seg_criterion = CrossEntropyLoss2d(ignore_index=-1).cuda()
    else:
        seg_criterion = CrossEntropyLoss2d().cuda()

    sr_optimizer = optim.Adam(sr_model.parameters(),
                              lr=opt.sr_lr,
                              betas=(0.9, 0.999),
                              eps=1e-8)
    seg_optimizer = optim.Adam(seg_model.parameters(),
                               lr=opt.seg_lr,
                               weight_decay=opt.seg_weight_decay,
                               betas=(opt.seg_momentum, 0.99))

    scheduler = ReduceLROnPlateau(seg_optimizer,
                                  'min',
                                  factor=0.5,
                                  patience=opt.seg_lr_patience,
                                  min_lr=2.5e-5,
                                  verbose=True)

    check_mkdir(os.path.join('outputs', exp_name))
    check_mkdir(os.path.join('outputs', exp_name, 'segmentation'))
    check_mkdir(os.path.join('outputs', exp_name, 'super-resolution'))
    check_mkdir(os.path.join(opt.save_folder, exp_name))

    #best_iou = 0
    best_iou = val_results = validate(0, val_loader, sr_model, seg_model,
                                      sr_criterion, psnr_criterion,
                                      seg_criterion, sr_optimizer,
                                      seg_optimizer)
    #sys.exit()
    #best_epoch = -1
    best_epoch = 0
    best_model = (sr_model, seg_model)
    since_last_best = 0

    for epoch in range(opt.start_iter, opt.epoch_num + 1):
        train(epoch, train_loader, sr_model, seg_model, sr_criterion,
              psnr_criterion, seg_criterion, sr_optimizer, seg_optimizer)

        val_results = validate(epoch, val_loader, sr_model, seg_model,
                               sr_criterion, psnr_criterion, seg_criterion,
                               sr_optimizer, seg_optimizer)

        if val_results > best_iou:
            best_iou = val_results
            best_epoch = epoch
            print('New best iou ', best_iou)
            best_model = (copy.deepcopy(sr_model), copy.deepcopy(seg_model))
            since_last_best = 0
            checkpoint(epoch, sr_model, seg_model, 'tmp_best')
        else:
            print('Best iou epoch: ', best_epoch, ':', best_iou)

        scheduler.step(val_results)

        if (epoch) % (opt.epoch_num / 2) == 0:
            for param_group in sr_optimizer.param_groups:
                param_group['lr'] /= 10.0
            print('SR Learning rate decay: lr={}'.format(
                sr_optimizer.param_groups[0]['lr']))

        if (epoch) % (opt.snapshots) == 0:
            checkpoint(epoch, sr_model, seg_model)

        #since_last_best += 1
        #if since_last_best == 20:
        #    checkpoint(epoch, best_model[0], best_model[1], 'tmp_best')

    print('Saving final best model')
    checkpoint(epoch, best_model[0], best_model[1], 'best')
Exemplo n.º 8
0
print('Loading datasets!')
train_set = get_training_set(
        opt.data_dir, 
        opt.hr_train_dataset, 
        opt.upscale_factor, 
        opt.patch_size, 
        opt.data_augmentation
    )
training_data_loader = DataLoader(
        dataset=train_set, 
        num_workers=opt.threads, 
        batch_size=opt.batchSize, 
        shuffle=True
    )    
test_set = get_eval_set(os.path.join(opt.data_dir,opt.hr_valid_dataset), opt.upscale_factor)
testing_data_loader = DataLoader(dataset=test_set, num_workers=0, batch_size=1, shuffle=False)

write_log('Buildind model!')
model = model = Srnet(
        num_channels=opt.n_colors, 
        base_filter=opt.base_filter,  
        num_stages=opt.num_stages, 
        scale_factor=opt.upscale_factor
    ) 

model = torch.nn.DataParallel(model, device_ids=gpus_list)

write_log('---------- Networks architecture -------------')
print('Saving config!')
write_log(model, refresh=True)
Exemplo n.º 9
0
def main(not_parsed_args):
    logging.info('Build dataset')
    train_set = get_training_set(FLAGS.dataset_h, FLAGS.dataset_l,
                                 FLAGS.frames, FLAGS.scale, True,
                                 'filelist.txt', True, FLAGS.patch_size,
                                 FLAGS.future_frame)
    if FLAGS.dataset_val:
        val_set = get_eval_set(FLAGS.dataset_val_h, FLAGS.dataset_val_l,
                               FLAGS.frames, FLAGS.scale, True, 'filelist.txt',
                               True, FLAGS.patch_size, FLAGS.future_frame)

    logging.info('Build model')
    model = RBPN()
    model.summary()
    last_epoch, last_step = load_weights(model)
    model.compile(optimizer=optimizers.Adam(FLAGS.lr),
                  loss=losses.mae,
                  metrics=[psnr])

    # checkpoint = ModelCheckpoint('models/model.hdf5', verbose=1)
    tensorboard = TensorBoard(log_dir='./tf_logs',
                              batch_size=FLAGS.batch_size,
                              write_graph=False,
                              write_grads=True,
                              write_images=True,
                              update_freq='batch')
    tensorboard.set_model(model)

    logging.info('Training start')
    for e in range(last_epoch, FLAGS.epochs):
        tensorboard.on_epoch_begin(e)
        for s in range(last_step + 1, len(train_set) // FLAGS.batch_size):
            tensorboard.on_batch_begin(s)
            x, y = train_set.batch(FLAGS.batch_size)
            loss = model.train_on_batch(x, y)
            print('Epoch %d step %d, loss %f psnr %f' %
                  (e, s, loss[0], loss[1]))
            tensorboard.on_batch_end(s, named_logs(model, loss, s))

            if FLAGS.dataset_val and s > 0 and s % FLAGS.val_interval == 0 or s == len(
                    train_set) // FLAGS.batch_size - 1:
                logging.info('Validation start')
                val_loss = 0
                val_psnr = 0
                for j in range(len(val_set)):
                    x_val, y_val = val_set.batch(1)
                    score = model.test_on_batch(x_val, y_val)
                    val_loss += score[0]
                    val_psnr += score[1]
                val_loss /= len(val_set)
                val_psnr /= len(val_set)
                logging.info('Validation average loss %f psnr %f' %
                             (val_loss, val_psnr))

            if s > 0 and s % FLAGS.save_interval == 0 or s == len(
                    train_set) // FLAGS.batch_size - 1:
                logging.info('Saving model')
                filename = 'model_%d_%d.h5' % (e, s)
                path = os.path.join(FLAGS.model_dir, filename)
                path_info = os.path.join(FLAGS.model_dir, 'info')
                model.save_weights(path)
                f = open(path_info, 'w')
                f.write(filename)
                f.close()
        tensorboard.on_epoch_end(e)
        last_step = -1
Exemplo n.º 10
0
parser.add_argument('--lr_eval_dataset', type=str, default='val_low_bmp')

opt = parser.parse_args()

gpus_list = range(opt.gpus)

cuda = opt.gpu_mode
if cuda and not torch.cuda.is_available():
    raise Exception("No GPU found, please run without --cuda")

torch.manual_seed(opt.seed)
if cuda:
    torch.cuda.manual_seed(opt.seed)

print('===> Loading datasets')
eval_set = get_eval_set(opt.eval_dir, opt.hr_eval_dataset, opt.lr_eval_dataset,
                        opt.upscale_factor)
eval_data_loader = DataLoader(dataset=eval_set,
                              num_workers=opt.threads,
                              batch_size=opt.testBatchSize,
                              shuffle=False)

print('===> Building model ', opt.model_type)
if opt.model_type == 'WDSR':
    model = WDSR(num_channels=3, feat=64, scale=opt.upscale_factor)

if cuda:
    model = torch.nn.DataParallel(model, device_ids=gpus_list)

model.load_state_dict(
    torch.load(opt.model, map_location=lambda storage, loc: storage))
print('Pre-trained SR model is loaded.')