Ejemplo n.º 1
0
dev = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')

nepochs = 50
log_step = 100

# Define network
net = ESPCN(8)
net.to(dev)
print(net)

# Define loss
criterion = nn.MSELoss()

# Define optim
optimizer = optim.Adam(net.parameters(), lr=0.0001)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=5, verbose=True)

pretrained = torch.load('models/unet_bn_20190912_040318.pth', map_location='cuda:0')
net.load_state_dict(pretrained['model_state_dict'])
optimizer.load_state_dict(pretrained['optimizer_state_dict'])

net.eval()
with torch.no_grad():
    val_loss = 0.0
    start = time.time()
    for i, (inps, lbls) in enumerate(dataloaders['val']):
        inps = inps.to(dev)
        lbls = lbls.to(dev)

        outs = net(inps)
Ejemplo n.º 2
0
train_set = get_training_set(args.upscale_factor)
test_set = get_test_set(args.upscale_factor)
train_data_loader = DataLoader(dataset=train_set, num_workers=args.threads, \
                                batch_size=args.train_batch_size, shuffle=True)
test_data_loader = DataLoader(dataset=test_set, num_workers=args.threads, \
                                batch_size=args.test_batch_size, shuffle=False)

net = ESPCN(upscale_factor=args.upscale_factor).to(device)

# Uncomment below to load trained weights
# weights = torch.load('data/weights/weights_epoch_30.pth')
# net.load_state_dict(weights)

criterion = nn.MSELoss()
optim = torch.optim.Adam(net.parameters(), lr=args.lr)


def train(epoch):
    epoch_loss = 0
    for iteration, batch in enumerate(train_data_loader, 1):
        img_in, target = batch[0].to(device).type(dtype), \
                         batch[1].to(device).type(dtype)

        optim.zero_grad()
        loss = criterion(net(img_in), target)
        epoch_loss += loss.item()

        loss.backward()
        optim.step()
Ejemplo n.º 3
0
                                  num_workers=args.num_workers)

    val_dataset = Dataset(path=args.path,
                          mode='val',
                          upscale_factor=args.upscale_factor)
    val_dataloader = DataLoader(val_dataset,
                                batch_size=args.batch_size,
                                shuffle=False,
                                num_workers=args.num_workers)

    print("==>Loading model")
    model = ESPCN(upscale_factor=args.upscale_factor).to(device)
    #data parallel
    if len(args.gpu_ids) > 1:
        model = nn.DataParallel(model, args.gpu_ids)

    print(model)

    optimizer = optim.Adam(model.parameters(), lr=args.lr)
    criterion = nn.MSELoss().to(device)
    psnr_best = 0
    for epoch in range(args.epochs):
        train(train_dataloader, model, epoch, criterion, optimizer, args)
        psnr = evaluate(val_dataloader, model, epoch, criterion, args)
        if psnr > psnr_best:
            psnr_best = psnr
            save_path_name = os.path.join(
                args.save_path,
                'x{}_best_model.pth'.format(args.upscale_factor))
            print('model is saved at ', save_path_name)
            torch.save(model, save_path_name)