parser.add_argument('--result_dir', default='./results/sidd/sidd_rgb/', type=str, help='Directory for results') parser.add_argument('--weights', default='./pretrained_models/denoising/sidd_rgb.pth', type=str, help='Path to weights') parser.add_argument('--gpus', default='0', type=str, help='CUDA_VISIBLE_DEVICES') parser.add_argument('--save_images', action='store_true', help='Save denoised images in result directory') args = parser.parse_args() os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" os.environ["CUDA_VISIBLE_DEVICES"] = args.gpus utils.mkdir(args.result_dir) test_dataset = get_validation_data(args.input_dir) test_loader = DataLoader(dataset=test_dataset, batch_size=4, shuffle=False, num_workers=8, drop_last=False) model_restoration = DenoiseNet() utils.load_checkpoint(model_restoration,args.weights) print("===>Testing using weights: ", args.weights) model_restoration.cuda() model_restoration=nn.DataParallel(model_restoration) model_restoration.eval()
print("==> Resuming Training with learning rate:", new_lr) print('------------------------------------------------------------------------------') if len(device_ids)>1: model_restoration = nn.DataParallel(model_restoration, device_ids = device_ids) ######### Loss ########### criterion = CharbonnierLoss().cuda() ######### DataLoaders ########### img_options_train = {'patch_size':opt.TRAINING.TRAIN_PS} train_dataset = get_training_data(train_dir, img_options_train) train_loader = DataLoader(dataset=train_dataset, batch_size=opt.OPTIM.BATCH_SIZE, shuffle=True, num_workers=16, drop_last=False) val_dataset = get_validation_data(val_dir) val_loader = DataLoader(dataset=val_dataset, batch_size=16, shuffle=False, num_workers=8, drop_last=False) print('===> Start Epoch {} End Epoch {}'.format(start_epoch,opt.OPTIM.NUM_EPOCHS + 1)) print('===> Loading datasets') mixup = utils.MixUp_AUG() best_psnr = 0 best_epoch = 0 best_iter = 0 eval_now = len(train_loader)//4 - 1 print(f"\nEvaluation after every {eval_now} Iterations !!!\n") for epoch in range(start_epoch, opt.OPTIM.NUM_EPOCHS + 1): epoch_start_time = time.time()
device_ids=device_ids) ######### Loss ########### criterion = CharbonnierLoss().cuda() ######### DataLoaders ########### img_options_train = {'patch_size': opt.TRAINING.TRAIN_PS} train_dataset = get_training_data('train', img_options_train) train_loader = DataLoader(dataset=train_dataset, batch_size=opt.OPTIM.BATCH_SIZE, shuffle=True, num_workers=16, drop_last=False) val_dataset = get_validation_data('test') val_loader = DataLoader(dataset=val_dataset, batch_size=1, shuffle=False, num_workers=8, drop_last=False) print('===> Start Epoch {} End Epoch {}'.format(start_epoch, opt.OPTIM.NUM_EPOCHS + 1)) print('===> Loading datasets') mixup = utils.MixUp_AUG() best_psnr = 0 best_epoch = 0 best_iter = 0