Пример #1
0
def main():
    # Build model
    print('Loading model ...\n')
   
    net = RDN(64, 3)

    device_ids = [0]
    model = nn.DataParallel(net, device_ids=device_ids).cuda()
    model.load_state_dict(torch.load(os.path.join(opt.logdir, opt.name, opt.which_model)))
    model.eval()
    # load data info
    print('Loading data info ...\n')
    # files_source = glob.glob(os.path.join('data', opt.test_data, '*.png'))
    # files_source.sort()
    mat_file = scio.loadmat(opt.test_path)
    data = mat_file['BenchmarkNoisyBlocksSrgb']
    # process data
    ave_time = 0
    cnt = 1
    for p in range(40):
        for q in range(32):
            # image
            img = data[p, q, :, :, :]

            input = transforms.ToTensor()(img)
            input = input.unsqueeze(0)
            input = input.cuda()

            with torch.no_grad():  # this can save much memory
                torch.cuda.synchronize()
                start = time.time()
                out = model(input)
                torch.cuda.synchronize()
                end = time.time()
                ave_time = ave_time + end - start

                out = torch.clamp(out, 0., 1.) * 255
                out_img = out.squeeze(0).cpu().numpy()
                out_img = out_img.astype('uint8')
                out_img = np.transpose(out_img, (1, 2, 0))

                cnt = cnt + 1
                # print(cnt)

                data[p, q, :, :, :] = out_img

            print('cnt : %d', p*32+q)

    model_dir = os.path.join('data', 'Resultstest')
    print('create checkpoint directory %s...' % model_dir)
    if not os.path.exists(model_dir):
        os.makedirs(model_dir)

    mat_file['BenchmarkNoisyBlocksSrgb'] = data
    scio.savemat(os.path.join(model_dir, 'Resultstest.mat'), {'results': data})

    ave_time = ave_time / (1280)
    ave_time = ave_time * (1000/256) * (1000/256)
    print('average time : %4f', ave_time)
Пример #2
0
def main():

    model_dir = os.path.join(opt.outf, opt.name)
    print('create checkpoint directory %s...' % model_dir)
    if not os.path.exists(model_dir):
        os.makedirs(model_dir)

    # Load dataset
    print('Loading dataset ...\n')
    dataset_train = Dataset(train=True)
    loader_train = DataLoader(dataset=dataset_train, num_workers=4, batch_size=opt.batchSize, shuffle=True)
    print("# of training samples: %d\n" % int(len(dataset_train)))
    num = len(dataset_train)
    # Build model
    net = RDN(64, 3)

    num_params = 0
    for parm in net.parameters():
        num_params += parm.numel()
    print(net)
    print('[Network %s] Total number of parameters : %.3f M' % (opt.name, num_params / 1e6))
    # Move to GPU
    device_ids = [0]
    model = nn.DataParallel(net, device_ids=device_ids).cuda()
    #model.load_state_dict(torch.load(os.path.join('logs/', opt.name, '40_net.pth')))  # !!!

    # Optimizer
    optimizer = optim.Adam(model.parameters(), lr=opt.lr)  #weight_decay=opt.weight_decay
    # training
    step = 0
    for epoch in range(opt.epochs):
        # set learning rate
        current_lr = update_lr(opt.lr, epoch)
        for param_group in optimizer.param_groups:
            param_group["lr"] = current_lr
        print('learning rate %f' % current_lr)

        # train
        model.train()
        start_time = time.time()
        ave_loss = 0
        ave_psnr = 0
        ave_ssim = 0
        for i, data in enumerate(loader_train, 0):
            # training step
            time1 = time.time()
            model.zero_grad()
            optimizer.zero_grad()

            noise_img = data[:, :3, :, :]
            gt_img = data[:, 3:, :, :]

            noise_img, gt_img = noise_img.cuda(), gt_img.cuda()
            res = noise_img - gt_img
            pred_res = model(noise_img)

            loss1 = torch.mean(torch.abs(pred_res - gt_img))
            loss2 = torch.mean(SSIM(pred_res, gt_img))
            loss = loss1 #0.75*loss1 + 0.25*loss2

            loss.backward()
            optimizer.step()

            # evaluate
            #result = torch.clamp(noise_img-pred_res, 0., 1.)
            result = torch.clamp(pred_res, 0., 1.)
            psnr_train = batch_PSNR(result, gt_img, 1.)

            ave_loss = (ave_loss*i + loss.item()) / (i+1)
            ave_psnr = (ave_psnr*i + psnr_train) / (i+1)
            ave_ssim = (ave_ssim*i + 1-loss2.item()*2) / (i+1)

            time2 = time.time()

            if i % 100 == 0:

                print("[epoch %d][%d/%d] time: %.3f t_time: %.3f loss: %.4f PSNR_train: %.4f SSIM_train: %.4f" %
                    (epoch+1, i, len(loader_train), (time2 - time1), (time2 - start_time), ave_loss, ave_psnr, ave_ssim))

            if step % 1000 == 0:
                torch.save(model.state_dict(), os.path.join(model_dir, 'latest_net.pth'))
            step += 1
        print('Time for the epoch is %f' % (time.time() - start_time))
        ## the end of each epoch

        # save model
        save_name = '%d_net.pth' % (epoch+1)
        torch.save(model.state_dict(), os.path.join(model_dir, save_name))
Пример #3
0
if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--weights-file', type=str, required=True)
    parser.add_argument('--image-file', type=str, required=True)
    parser.add_argument('--num-features', type=int, default=64)
    parser.add_argument('--growth-rate', type=int, default=64)
    parser.add_argument('--num-blocks', type=int, default=16)
    parser.add_argument('--num-layers', type=int, default=8)
    parser.add_argument('--scale', type=int, default=4)
    args = parser.parse_args()

    cudnn.benchmark = True
    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')

    model = RDN(colorChannel=3, scale=4)
    model.to(device)

    state_dict = model.state_dict()
    for n, p in torch.load(args.weights_file,
                           map_location=lambda storage, loc: storage).items():
        if n in state_dict.keys():
            state_dict[n].copy_(p)
        else:
            raise KeyError(n)

    model.eval()

    image = pil_image.open(args.image_file).convert('RGB')

    image_width = (image.width // args.scale) * args.scale
Пример #4
0
    parser = argparse.ArgumentParser()
    parser.add_argument('--weights-file', type=str, required=True)
    parser.add_argument('--image-file', type=str, required=True)
    parser.add_argument('--num-features', type=int, default=64)
    parser.add_argument('--growth-rate', type=int, default=64)
    parser.add_argument('--num-blocks', type=int, default=16)
    parser.add_argument('--num-layers', type=int, default=8)
    parser.add_argument('--scale', type=int, default=4)
    args = parser.parse_args()

    cudnn.benchmark = True
    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')

    model = RDN(scale_factor=args.scale,
                num_channels=3,
                num_features=args.num_features,
                growth_rate=args.growth_rate,
                num_blocks=args.num_blocks,
                num_layers=args.num_layers).to(device)

    state_dict = model.state_dict()
    for n, p in torch.load(args.weights_file,
                           map_location=lambda storage, loc: storage).items():
        if n in state_dict.keys():
            state_dict[n].copy_(p)
        else:
            raise KeyError(n)

    model.eval()

    image = pil_image.open(args.image_file).convert('RGB')
Пример #5
0
def main():
    # Build model
    print('Loading model ...\n')
    net = RDN(64, 3)

    device_ids = [0]
    model = nn.DataParallel(net, device_ids=device_ids).cuda()
    model.load_state_dict(
        torch.load(os.path.join(opt.logdir, opt.name, opt.which_model)))
    model.eval()
    # load data info
    print('Loading data info ...\n')
    # files_source = glob.glob(os.path.join('data', opt.test_data, '*.png'))
    # files_source.sort()
    mat_file = scio.loadmat(validate_path)
    data = mat_file['ValidationNoisyBlocksSrgb']
    # process data
    ave_time = 0
    cnt = 1
    for p in range(40):
        for q in range(32):
            # image
            img = data[p, q, :, :, :]
            disp_img = img

            img = normalize(np.float32(img))

            input = transforms.ToTensor()(img)
            input = input.unsqueeze(0)
            input = input.cuda()

            out = torch.Tensor(input.size()).cuda()
            with torch.no_grad():  # this can save much memory
                k = 1
                n = int(256 / k)
                for i in range(k):
                    for j in range(k):
                        input1 = input[:, :, i * n:n + i * n, j * n:n + j * n]
                        torch.cuda.synchronize()
                        start = time.time()
                        out1 = model(input1)
                        torch.cuda.synchronize()
                        end = time.time()
                        ave_time = ave_time + end - start
                        out[:, :, i * n:n + i * n, j * n:n + j * n] = out1

                out = torch.clamp(out, 0., 1.) * 255
                out_img = out.squeeze(0).cpu().numpy()
                out_img = out_img.astype('uint8')
                out_img = np.transpose(out_img, (1, 2, 0))

                cnt = cnt + 1
                data[p, q, :, :, :] = out_img

            print('cnt : %d', p * 32 + q)

    mat_file['ValidationNoisyBlocksSrgb'] = data
    scio.savemat('data/Resultsl116_full', {'results': data})

    ave_time = ave_time / (1280)
    ave_time = ave_time * (1000 / 256) * (1000 / 256)
    print('average time : %4f', ave_time)
Пример #6
0
    parser.add_argument('--seed', type=int, default=123)
    args = parser.parse_args()

    args.outputs_dir = os.path.join(args.outputs_dir, 'x{}'.format(args.scale))

    if not os.path.exists(args.outputs_dir):
        os.makedirs(args.outputs_dir)

    cudnn.benchmark = True
    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')

    torch.manual_seed(args.seed)

    model = RDN(scale_factor=args.scale,
                num_channels=3,
                num_features=args.num_features,
                growth_rate=args.growth_rate,
                num_blocks=args.num_blocks,
                num_layers=args.num_layers).to(device)

    if args.weights_file is not None:
        state_dict = model.state_dict()
        for n, p in torch.load(args.weights_file, map_location=lambda storage, loc: storage).items():
            if n in state_dict.keys():
                state_dict[n].copy_(p)
            else:
                raise KeyError(n)

    criterion = nn.L1Loss()
    optimizer = optim.Adam(model.parameters(), lr=args.lr)

    train_dataset = TrainDataset(args.train_file, patch_size=args.patch_size, scale=args.scale)
Пример #7
0
    parser.add_argument('--num-workers', type=int, default=8)
    parser.add_argument('--seed', type=int, default=123)
    args = parser.parse_args()

    args.outputs_dir = os.path.join(args.outputs_dir, 'x{}'.format(args.scale))

    if not os.path.exists(args.outputs_dir):
        os.makedirs(args.outputs_dir)

    cudnn.benchmark = True
    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')

    torch.manual_seed(args.seed)

    model = RDN(colorChannel=3,
                scale=4,
                blockDepth=args.num_blocks,
                convDepth=args.num_layers)
    model.to(device)

    if args.weights_file is not None:
        state_dict = model.state_dict()
        for n, p in torch.load(
                args.weights_file,
                map_location=lambda storage, loc: storage).items():
            if n in state_dict.keys():
                state_dict[n].copy_(p)
            else:
                raise KeyError(n)

    criterion = nn.L1Loss()
    optimizer = optim.Adam(model.parameters(), lr=args.lr)
Пример #8
0
    parser.add_argument('--batch_size', type=int, default=8)
    parser.add_argument('--gpus', type=int, default=4, help='number of gpus')

    args = parser.parse_args()

    cudnn.benchmark = True
    gpus_list = range(args.gpus)

    test_dataset = TestDataset(args.test_path)
    test_dataloader = DataLoader(dataset=test_dataset,
                                 batch_size=args.batch_size)

    model = RDN(
        scale_factor=args.scale,
        num_channels=3,
        num_features=args.num_features,
        growth_rate=args.growth_rate,
        num_blocks=args.num_blocks,
        num_layers=args.num_layers,
    ).cuda(gpus_list[0])
    model = torch.nn.DataParallel(model, device_ids=list(gpus_list))

    state_dict = model.state_dict()
    for n, p in torch.load(args.weights_file,
                           map_location=lambda storage, loc: storage).items():
        if n in state_dict.keys():
            state_dict[n].copy_(p)
        else:
            raise KeyError(n)

    for (idx, input, input_path) in test_dataloader:
        input = input.cuda(gpus_list[0])
Пример #9
0
    args = parser.parse_args()
    args.outputs_path = os.path.join(args.outputs_path,
                                     'x{}'.format(args.scale))

    if not os.path.exists(args.outputs_path):
        os.makedirs(args.outputs_path)

    gpus_list = range(args.gpus)
    cudnn.benchmark = True

    torch.manual_seed(args.seed)

    model = RDN(
        scale_factor=args.scale,
        num_channels=3,
        num_features=args.num_features,
        growth_rate=args.growth_rate,
        num_blocks=args.num_blocks,
        num_layers=args.num_layers,
    ).cuda(gpus_list[0])

    model = torch.nn.DataParallel(model, device_ids=list(gpus_list))

    if args.weights_file is not None:
        state_dict = model.state_dict()
        for n, p in torch.load(
                args.weights_file,
                map_location=lambda storage, loc: storage).items():
            if n in state_dict.keys():
                state_dict[n].copy_(p)
            else:
                raise KeyError(n)