def get_generator(model_config):
    generator_name = model_config['g_name']
    if generator_name == 'resnet':
        model_g = ResnetGenerator(
            norm_layer=get_norm_layer(norm_type=model_config['norm_layer']),
            use_dropout=model_config['dropout'],
            n_blocks=model_config['blocks'],
            learn_residual=model_config['learn_residual'])
    elif generator_name == 'fpn_mobilenet':
        model_g = FPNMobileNet(norm_layer=get_norm_layer(
            norm_type=model_config['norm_layer']))
    elif generator_name == 'fpn_inception':
        model_g = FPNInception(norm_layer=get_norm_layer(
            norm_type=model_config['norm_layer']))
    elif generator_name == 'fpn_inception_simple':
        model_g = FPNInceptionSimple(norm_layer=get_norm_layer(
            norm_type=model_config['norm_layer']))
    elif generator_name == 'fpn_dense':
        model_g = FPNDense()
    elif generator_name == 'unet_seresnext':
        model_g = UNetSEResNext(
            norm_layer=get_norm_layer(norm_type=model_config['norm_layer']),
            pretrained=model_config['pretrained'])
    elif generator_name == 'mirnet':
        model_g = MIRNet(in_channels=3,
                         out_channels=3,
                         n_feat=32,
                         kernel_size=3,
                         stride=2,
                         n_RRG=3,
                         n_MSRB=2,
                         height=3,
                         width=2,
                         bias=False)
    else:
        raise ValueError("Generator Network [%s] not recognized." %
                         generator_name)

    return nn.DataParallel(model_g)
start_epoch = 1
mode = opt.MODEL.MODE
session = opt.MODEL.SESSION

result_dir = os.path.join(opt.TRAINING.SAVE_DIR, mode, 'results', session)
model_dir  = os.path.join(opt.TRAINING.SAVE_DIR, mode, 'models',  session)

utils.mkdir(result_dir)
utils.mkdir(model_dir)

train_dir = opt.TRAINING.TRAIN_DIR
val_dir   = opt.TRAINING.VAL_DIR
save_images = opt.TRAINING.SAVE_IMAGES

######### Model ###########
model_restoration = MIRNet()
model_restoration.cuda()

device_ids = [i for i in range(torch.cuda.device_count())]
if torch.cuda.device_count() > 1:
  print("\n\nLet's use", torch.cuda.device_count(), "GPUs!\n\n")


new_lr = opt.OPTIM.LR_INITIAL

optimizer = optim.Adam(model_restoration.parameters(), lr=new_lr, betas=(0.9, 0.999),eps=1e-8, weight_decay=1e-8)

######### Scheduler ###########
if warmup:
    warmup_epochs = 3
    scheduler_cosine = optim.lr_scheduler.CosineAnnealingLR(optimizer, opt.OPTIM.NUM_EPOCHS-warmup_epochs, eta_min=1e-6)
Exemple #3
0
args = parser.parse_args()

os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpus

utils.mkdir(args.result_dir + 'matfile')
utils.mkdir(args.result_dir + 'png')

test_dataset = get_test_data(args.input_dir)
test_loader = DataLoader(dataset=test_dataset,
                         batch_size=args.bs,
                         shuffle=False,
                         num_workers=8,
                         drop_last=False)

model_restoration = MIRNet()

utils.load_checkpoint(model_restoration, args.weights)
print("===>Testing using weights: ", args.weights)

model_restoration.cuda()

model_restoration = nn.DataParallel(model_restoration)

model_restoration.eval()

with torch.no_grad():
    psnr_val_rgb = []
    for ii, data_test in enumerate(tqdm(test_loader), 0):
        rgb_noisy = data_test[0].cuda()
        filenames = data_test[1]
Exemple #4
0
start_epoch = 1
mode = opt.MODEL.MODE
session = opt.MODEL.SESSION

result_dir = os.path.join(opt.TRAINING.SAVE_DIR, mode, 'results', session)
model_dir = os.path.join(opt.TRAINING.SAVE_DIR, mode, 'models', session)

utils.mkdir(result_dir)
utils.mkdir(model_dir)

train_dir = opt.TRAINING.TRAIN_DIR
val_dir = opt.TRAINING.VAL_DIR
save_images = opt.TRAINING.SAVE_IMAGES

######### Model ###########
model_restoration = MIRNet()
model_restoration.cuda()

device_ids = [i for i in range(torch.cuda.device_count())]
if torch.cuda.device_count() > 1:
    print("\n\nLet's use", torch.cuda.device_count(), "GPUs!\n\n")

new_lr = opt.OPTIM.LR_INITIAL

optimizer = optim.Adam(model_restoration.parameters(),
                       lr=new_lr,
                       betas=(0.9, 0.999),
                       eps=1e-8,
                       weight_decay=1e-8)

######### Resume ###########
Exemple #5
0
result_dir = os.path.join(opt.TRAINING.SAVE_DIR, mode, 'results', session)
model_dir = os.path.join(opt.TRAINING.SAVE_DIR, mode, 'models', session)

utils.mkdir(result_dir)
utils.mkdir(model_dir)

train_dir = opt.TRAINING.TRAIN_DIR
val_dir = opt.TRAINING.VAL_DIR
save_images = opt.TRAINING.SAVE_IMAGES

######### Model ###########
model_restoration = MIRNet(in_channels=3,
                           out_channels=3,
                           n_feat=64,
                           kernel_size=3,
                           stride=2,
                           n_RRG=3,
                           n_MSRB=2,
                           height=3,
                           width=2,
                           bias=False)
model_restoration.cuda()

device_ids = [i for i in range(torch.cuda.device_count())]
if torch.cuda.device_count() > 1:
    print("\n\nLet's use", torch.cuda.device_count(), "GPUs!\n\n")
    #model_restoration = nn.DataParallel(model_restoration)

new_lr = opt.OPTIM.LR_INITIAL

optimizer = optim.Adam(model_restoration.parameters(),
                       lr=new_lr,
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpus

test_dataset = get_test_images('test')
test_loader = DataLoader(dataset=test_dataset,
                         batch_size=args.bs,
                         shuffle=False,
                         num_workers=8,
                         drop_last=False)

#model_restoration = MIRNet(in_channels=3, out_channels=3, n_feat=64, kernel_size=3, stride=2, n_RRG=3, n_MSRB=2, height=3, width=2, bias=False)
model_restoration = MIRNet(in_channels=3,
                           out_channels=3,
                           n_feat=64,
                           kernel_size=3,
                           stride=2,
                           n_RRG=4,
                           n_MSRB=1,
                           height=2,
                           width=1,
                           bias=False)

utils.load_checkpoint(model_restoration, args.weights)
print("===>Testing using weights: ", args.weights)

model_restoration.cuda()

model_restoration = nn.DataParallel(model_restoration)

model_restoration.eval()

with torch.no_grad():