Exemple #1
0
###LOSS
MSE_loss = nn.MSELoss()
BCE_loss = nn.BCELoss()

print('---------- Generator architecture -------------')
utils.print_network(model)
print('---------- Discriminator architecture ---------')
utils.print_network(D)
print('-----------------------------------------------')

if opt.load_pretrained:
    model_name = os.path.join(opt.save_folder + opt.pretrained_sr)
    if os.path.exists(model_name):
        #model= torch.load(model_name, map_location=lambda storage, loc: storage)
        model.load_state_dict(
            torch.load(model_name, map_location=lambda storage, loc: storage))
        print('Pre-trained SR model is loaded.')

if opt.load_pretrained_D:
    D_name = os.path.join(opt.save_folder + opt.pretrained_D)
    if os.path.exists(D_name):
        #model= torch.load(model_name, map_location=lambda storage, loc: storage)
        D.load_state_dict(
            torch.load(D_name, map_location=lambda storage, loc: storage))
        print('Pre-trained Discriminator model is loaded.')

if cuda:
    model = model.cuda(gpus_list[0])
    D = D.cuda(gpus_list[0])
    feature_extractor = feature_extractor.cuda(gpus_list[0])
    MSE_loss = MSE_loss.cuda(gpus_list[0])
Exemple #2
0
    model = DBPNITER(num_channels=3,
                     base_filter=64,
                     feat=256,
                     num_stages=3,
                     scale_factor=opt.upscale_factor)  ###D-DBPN
else:
    model = DBPN(num_channels=3,
                 base_filter=64,
                 feat=256,
                 num_stages=7,
                 scale_factor=opt.upscale_factor)  ###D-DBPN

if cuda:
    model = torch.nn.DataParallel(model, device_ids=gpus_list)

model.load_state_dict(
    torch.load(opt.model, map_location=lambda storage, loc: storage))
print('Pre-trained SR model is loaded.')

if cuda:
    model = model.cuda(gpus_list[0])


def eval():
    model.eval()
    for batch in testing_data_loader:
        with torch.no_grad():
            input, bicubic, name = Variable(batch[0]), Variable(
                batch[1]), batch[2]
        if cuda:
            input = input.cuda(gpus_list[0])
            bicubic = bicubic.cuda(gpus_list[0])
Exemple #3
0
                 feat=256,
                 num_stages=7,
                 scale_factor=opt.upscale_factor)  ###D-DBPN

if cuda:
    model = torch.nn.DataParallel(model, device_ids=gpus_list)

#여기까진 일단됨.
from collections import OrderedDict
new_state_dict = OrderedDict()
state_dict = torch.load(opt.model, map_location=lambda storage, loc: storage)
for k, v in state_dict.items():
    name = k[7:]  # remove `module.`
    new_state_dict[name] = v

model.load_state_dict(new_state_dict)

print('Pre-trained SR model is loaded.')

if cuda:
    model = model.cuda(gpus_list[0])


def eval():
    model.eval()
    for batch in testing_data_loader:
        with torch.no_grad():
            input, bicubic, name = Variable(batch[0]), Variable(
                batch[1]), batch[2]
        if cuda:
            input = input.cuda(gpus_list[0])