コード例 #1
0
def main():
    # Load data
    print("=> Load data...")
    root = './STL10/'
    data = STL10(opt.path_data, split='train', transform=transform_train, download=True)
    loader_train = torch.utils.data.DataLoader(data, batch_size=opt.batch_size, shuffle=True, num_workers=2)
    # Build model
    print("=> Build model...")
    net = DnCNN(channels=1, num_of_layers=opt.num_of_layers)
    if opt.resume and os.path.exists(os.path.join(opt.outf, 'net.pth')):
        print("Resuming training.")
        net.load_state_dict(torch.load(os.path.join(opt.outf, 'net.pth')))
    else:
        print("Training from scratch.")
        net.apply(weights_init_kaiming)
    # Loss
    criterion = nn.MSELoss(size_average=False)
    # Optimizer
    optimizer = optim.Adam(net.parameters(), lr=opt.lr)
    # Training
    step = 0
    print("=> Begin training...")
    for epoch in range(opt.epochs):
        if epoch < opt.milestone:
            current_lr = opt.lr
        else:
            current_lr = opt.lr / 5.
        # Set learning rate
        for param_group in optimizer.param_groups:
            param_group["lr"] = current_lr
        print('Learning rate %f' % current_lr)
        # Train
        for i, (img_train, imgn_train) in enumerate(loader_train, 0):
            # training step
            net.train()
            net.zero_grad()
            optimizer.zero_grad()
            out_train = net(imgn_train.float())
            loss = criterion(out_train, img_train) / (imgn_train.size()[0]*2)
            loss.backward()
            optimizer.step()
            # Results
            net.eval()
            out_train = torch.clamp(out_train, 0., 1.)
            psnr_train = batch_PSNR(out_train, img_train, 1.)
            print("[epoch %d][%d/%d] loss: %.4f PSNR_train: %.4f" %
                (epoch+1, i+1, len(loader_train), loss.item(), psnr_train))
            step += 1

        # Save model
        torch.save(net.state_dict(), os.path.join(opt.outf, 'net.pth'))
コード例 #2
0
ファイル: our_model.py プロジェクト: c-vision-91/BIGPrior
    def __init__(self, args):
        super(full_model, self).__init__()

        if 'col' in args.experiment:
            self.model_channels = 1
        else:
            self.model_channels = 3

        if args.extend_input:
            self.model_channels += 3

        if args.backbone == 'D':
            print('** DnCNN backbone **')
            net = DnCNN(channels=self.model_channels,
                        num_of_layers=args.dncnn_layers)
            net.apply(weights_init_kaiming)
        elif args.backbone == 'M':
            print('** MemNet backbone **')
            net = MemNet(in_channels=self.model_channels,
                         channels=args.memnet_channels,
                         num_memblock=args.memnet_memblocks,
                         num_resblock=args.memnet_resblocks)
            net.apply(weights_init_kaiming)
        elif args.backbone == 'R':
            print('** RIDNet backbone **')
            net = RIDNET(in_channels=self.model_channels)
            net.apply(weights_init_kaiming)
        elif args.backbone == 'N':
            net = RNAN(n_colors=self.model_channels)

        self.backbone = net
        self.args = args
        self.sigmoid = nn.Sigmoid()
コード例 #3
0
def main():
    # Load dataset
    print('Loading dataset ...\n')
    use_cuda = torch.cuda.is_available()
    torch.manual_seed(1234)
    use_cuda = torch.cuda.is_available()
    device = torch.device("cuda" if use_cuda else "cpu")

    kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
    
    if opt.dataset=='celeba':
        dataset_train = MyCelebA('../data', split="train", target_type="bbox", download=False,
                                   transform=transforms.Compose([transforms.Resize((opt.img_size, opt.img_size)),
                                                                 transforms.ToTensor(),
                                                                 transforms.Normalize((0.5, 0.5, 0.5), 
                                                                                      (0.5, 0.5, 0.5))]))

        loader_train = torch.utils.data.DataLoader(dataset_train, batch_size=opt.batchSize, shuffle=True)

        dataset_val = MyCelebA('../data', split="valid",  target_type="bbox",
                                            transform=transforms.Compose([transforms.Resize((opt.img_size, opt.img_size)),
                                                                         transforms.ToTensor(),
                                                                         transforms.Normalize((0.5, 0.5, 0.5),
                                                                                              (0.5, 0.5, 0.5))]))
        test_loader = torch.utils.data.DataLoader(dataset_val, batch_size=opt.batchSize, shuffle=True)

        
    elif opt.dataset=='dtd':
        dataset_train = MyDTD('../data', split="train", download=False,
                                   transform=transforms.Compose([transforms.Resize((opt.img_size, opt.img_size)),
                                                                 transforms.ToTensor(),
                                                                 transforms.Normalize((0.5, 0.5, 0.5), 
                                                                                      (0.5, 0.5, 0.5))]))

        loader_train = torch.utils.data.DataLoader(dataset_train, batch_size=opt.batchSize, shuffle=True)

        dataset_val = MyDTD('../data', split="valid", 
                                   transform=transforms.Compose([transforms.Resize((opt.img_size, opt.img_size)),
                                                                         transforms.ToTensor(),
                                                                         transforms.Normalize((0.5, 0.5, 0.5),
                                                                                              (0.5, 0.5, 0.5))]))
        test_loader = torch.utils.data.DataLoader(dataset_val, batch_size=opt.batchSize, shuffle=True)
        

    elif opt.dataset=='paris_streetview':
        dataset_train = MyParis_streetview('../data', split="train", download=False,
                                   transform=transforms.Compose([transforms.Resize((opt.img_size, opt.img_size)),
                                                                 transforms.ToTensor(),
                                                                 transforms.Normalize((0.5, 0.5, 0.5), 
                                                                                      (0.5, 0.5, 0.5))]))

        loader_train = torch.utils.data.DataLoader(dataset_train, batch_size=opt.batchSize, shuffle=True)

        dataset_val = MyParis_streetview('../data', split="valid", 
                                    transform=transforms.Compose([transforms.Resize((opt.img_size, opt.img_size)),
                                                                 transforms.ToTensor(),
                                                                 transforms.Normalize((0.5, 0.5, 0.5),
                                                                                      (0.5, 0.5, 0.5))]))
        test_loader = torch.utils.data.DataLoader(dataset_val, batch_size=opt.batchSize, shuffle=True)
        
    
        
    print("# of training samples: %d\n" % int(len(dataset_train)))

    # Build model
    net = DnCNN(channels=12, out_ch=6, num_of_layers=opt.num_of_layers) # channels was 6
    net.apply(weights_init_kaiming)
    criterion = nn.MSELoss(size_average=False) # ToDo: Add weighted MSE loss
    # Move to GPU
    model = net.cuda()
    criterion.cuda()
    # Optimizer
    optimizer = optim.Adam(model.parameters(), lr=opt.lr)
    # training
    logdir = opt.outf + '/{}{}_{}'.format(prefix, opt.dataset, datetime_f)
    os.makedirs(logdir, exist_ok=True)
    
    writer = SummaryWriter(opt.outf + '/{}tb_{}_{}'.format(prefix, opt.dataset, datetime_f))
    step = 0
    for epoch in range(opt.epochs):
        if epoch < opt.milestone:
            current_lr = opt.lr
        else:
            current_lr = opt.lr / 10.
        # set learning rate
        for param_group in optimizer.param_groups:
            param_group["lr"] = current_lr
        print('learning rate %f' % current_lr)
        # train
        for i, data in enumerate(loader_train, 0):
            # training step
            model.train()
            model.zero_grad()
            optimizer.zero_grad()
            img_train = data[0]
            
            
            if use_irregular:
                x_masked, x_fft, x_masked_fft, lims_list, idx_list, idx_list_m, all_masks, mask_fft = get_color_fft_images_irregular(img_train.numpy(), True)
            else:
                x_masked, x_fft, x_masked_fft, lims_list, idx_list, idx_list_m = get_color_fft_images(img_train.numpy(), 
                                                                                                      dx=64, half=use_half)

            img_train = torch.from_numpy(x_fft).type(torch.FloatTensor) 
            imgn_train = torch.from_numpy(x_masked_fft).type(torch.FloatTensor) 
            mask_fft = torch.from_numpy(mask_fft).type(torch.FloatTensor) 

            img_train, imgn_train = Variable(img_train.cuda()), Variable(imgn_train.cuda())
            mask_train_in = Variable(mask_fft.cuda())
            imgn_train_cat = torch.cat((mask_train_in, imgn_train), axis=1)
            
#             import ipdb; ipdb.set_trace()
            
            out_train = model(imgn_train_cat)
            loss = criterion(out_train, img_train) / (imgn_train.size()[0]*2)
            loss.backward()
            optimizer.step()
            # results
            model.eval()
            # out_train = torch.clamp(imgn_train-model(imgn_train), 0., 1.)
            out_train = torch.clamp(model(imgn_train_cat), 0., 1.)
            
            img_back = get_color_images_back(img_train.cpu().numpy(), lims_list, idx_list)
            img_back_masked = get_color_images_back(imgn_train.cpu().numpy(), lims_list, idx_list_m)
            img_back_recon = get_color_images_back(out_train.detach().cpu().numpy(), lims_list, idx_list)
#             import ipdb; ipdb.set_trace()
            #orig_im = (img_train + 1)/2
            img_back = (torch.from_numpy(img_back) + 1)/2
            img_back_masked = (torch.from_numpy(img_back_masked) + 1)/2
            img_back_recon = (torch.from_numpy(img_back_recon) + 1)/2
            
            psnr_train = batch_PSNR(img_back, img_back_recon, 1.)
            print("[epoch %d][%d/%d] loss: %.4f PSNR_train: %.4f" %
                (epoch+1, i+1, len(loader_train), loss.item(), psnr_train))
            # if you are using older version of PyTorch, you may need to change loss.item() to loss.data[0]
            if step % 10 == 0:
                # Log the scalar values
                writer.add_scalar('loss', loss.item(), step)
                writer.add_scalar('PSNR on training data', psnr_train, step)
            step += 1
            ## the end of each epoch
            model.eval()
            if step % 50 == 0:
                print('Saving images...')
                Img = utils.make_grid(img_back.data, nrow=8, normalize=True, scale_each=True)
                Imgn = utils.make_grid(img_back_masked.data, nrow=8, normalize=True, scale_each=True)
                Irecon = utils.make_grid(img_back_recon.data, nrow=8, normalize=True, scale_each=True)
                writer.add_image('clean image', Img, step//50)
                writer.add_image('noisy image', Imgn, step//50)
                writer.add_image('reconstructed image', Irecon, step//50)

                utils.save_image(img_back.data, logdir + '/clean_image_{}.png'.format(step//50))
                utils.save_image(img_back_masked.data, logdir + '/noisy_image_{}.png'.format(step//50))
                utils.save_image(img_back_recon.data, logdir + '/reconstructed_image_{}.png'.format(step//50))
                
        # save model
        torch.save(model.state_dict(), os.path.join(opt.outf, '{}{}_net.pth'.format(prefix, opt.dataset)))
コード例 #4
0
ファイル: train.py プロジェクト: kwthj/DnCNN-PyTorch
def main():
    # Load dataset
    print('Loading dataset ...\n')
    dataset_train = Dataset(train=True)
    dataset_val = Dataset(train=False)
    loader_train = DataLoader(dataset=dataset_train,
                              num_workers=4,
                              batch_size=opt.batchSize,
                              shuffle=True)
    print("# of training samples: %d\n" % int(len(dataset_train)))
    # Build model
    net = DnCNN(channels=1, num_of_layers=opt.num_of_layers)
    net.apply(weights_init_kaiming)
    criterion = nn.MSELoss(size_average=False)
    # Move to GPU
    device_ids = [0]
    model = nn.DataParallel(net, device_ids=device_ids).cuda()
    criterion.cuda()
    # Optimizer
    optimizer = optim.Adam(model.parameters(), lr=opt.lr)
    # training
    writer = SummaryWriter(opt.outf)
    step = 0
    noiseL_B = [0, 55]  # ingnored when opt.mode=='S'
    for epoch in range(opt.epochs):
        if epoch < opt.milestone:
            current_lr = opt.lr
        else:
            current_lr = opt.lr / 10.
        # set learning rate
        for param_group in optimizer.param_groups:
            param_group["lr"] = current_lr
        print('learning rate %f' % current_lr)
        # train
        for i, data in enumerate(loader_train, 0):
            # training step
            model.train()
            model.zero_grad()
            optimizer.zero_grad()
            img_train = data
            if opt.mode == 'S':
                noise = torch.FloatTensor(img_train.size()).normal_(
                    mean=0, std=opt.noiseL / 255.)
            if opt.mode == 'B':
                noise = torch.zeros(img_train.size())
                stdN = np.random.uniform(noiseL_B[0],
                                         noiseL_B[1],
                                         size=noise.size()[0])
                for n in range(noise.size()[0]):
                    sizeN = noise[0, :, :, :].size()
                    noise[n, :, :, :] = torch.FloatTensor(sizeN).normal_(
                        mean=0, std=stdN[n] / 255.)
            imgn_train = img_train + noise
            img_train, imgn_train = Variable(img_train.cuda()), Variable(
                imgn_train.cuda())
            noise = Variable(noise.cuda())
            out_train = model(imgn_train)
            loss = criterion(out_train, noise) / (imgn_train.size()[0] * 2)
            loss.backward()
            optimizer.step()
            # results
            model.eval()
            out_train = torch.clamp(imgn_train - model(imgn_train), 0., 1.)
            psnr_train = batch_PSNR(out_train, img_train, 1.)
            print(
                "[epoch %d][%d/%d] loss: %.4f PSNR_train: %.4f" %
                (epoch + 1, i + 1, len(loader_train), loss.item(), psnr_train))
            # if you are using older version of PyTorch, you may need to change loss.item() to loss.data[0]
            if step % 10 == 0:
                # Log the scalar values
                writer.add_scalar('loss', loss.item(), step)
                writer.add_scalar('PSNR on training data', psnr_train, step)
            step += 1
        ## the end of each epoch
        model.eval()
        # validate
        psnr_val = 0
        for k in range(len(dataset_val)):
            img_val = torch.unsqueeze(dataset_val[k], 0)
            noise = torch.FloatTensor(img_val.size()).normal_(
                mean=0, std=opt.val_noiseL / 255.)
            imgn_val = img_val + noise
            img_val, imgn_val = Variable(
                img_val.cuda(), volatile=True), Variable(imgn_val.cuda(),
                                                         volatile=True)
            out_val = torch.clamp(imgn_val - model(imgn_val), 0., 1.)
            psnr_val += batch_PSNR(out_val, img_val, 1.)
        psnr_val /= len(dataset_val)
        print("\n[epoch %d] PSNR_val: %.4f" % (epoch + 1, psnr_val))
        writer.add_scalar('PSNR on validation data', psnr_val, epoch)
        # log the images
        out_train = torch.clamp(imgn_train - model(imgn_train), 0., 1.)
        Img = utils.make_grid(img_train.data,
                              nrow=8,
                              normalize=True,
                              scale_each=True)
        Imgn = utils.make_grid(imgn_train.data,
                               nrow=8,
                               normalize=True,
                               scale_each=True)
        Irecon = utils.make_grid(out_train.data,
                                 nrow=8,
                                 normalize=True,
                                 scale_each=True)
        writer.add_image('clean image', Img, epoch)
        writer.add_image('noisy image', Imgn, epoch)
        writer.add_image('reconstructed image', Irecon, epoch)
        # save model
        torch.save(model.state_dict(), os.path.join(opt.outf, 'net.pth'))
コード例 #5
0
ファイル: train_ABX.py プロジェクト: pandalalalala/DnCNN-Dual
def main():
    # Load dataset
    print('Loading dataset ...\n')
    start = time.time()
    dataset_train = Dataset(train=True,
                            data_path_A=opt.A,
                            data_path_B=opt.B,
                            data_path_val_A=opt.val_A,
                            data_path_val_B=opt.val_B,
                            patch_size_dn=30,
                            patch_size_sr=120,
                            stride=5,
                            aug_times=2,
                            if_reseize=True)
    dataset_val = Dataset(train=False,
                          data_path_A=opt.A,
                          data_path_B=opt.B,
                          data_path_val_A=opt.val_A,
                          data_path_val_B=opt.val_B,
                          patch_size_dn=30,
                          patch_size_sr=120,
                          stride=5,
                          aug_times=2,
                          if_reseize=True)
    loader_train = DataLoader(dataset=dataset_train,
                              num_workers=4,
                              batch_size=opt.batchSize,
                              shuffle=True)
    print("# of training samples: %d\n\n" % int(len(dataset_train)))
    end = time.time()
    print(round(end - start, 7))

    # Build model
    net_dn = DnCNN(channels=1, num_of_layers=opt.num_of_layers)
    net_dn.apply(weights_init_kaiming)
    criterion_dn = nn.MSELoss(size_average=False)

    # Build model
    net_sr = DnCNN(channels=1, num_of_layers=opt.num_of_layers)
    net_sr.apply(weights_init_kaiming)
    criterion_sr = nn.MSELoss(size_average=False)

    # Move to GPU
    device_ids = [opt.device_ids]  # we will deal with this later
    model_dn = nn.DataParallel(net_dn, device_ids=device_ids).cuda()
    model_sr = nn.DataParallel(net_sr, device_ids=device_ids).cuda()

    criterion_dn.cuda()
    criterion_sr.cuda()
    # Optimizer
    optimizer_dn = optim.Adam(model_dn.parameters(), lr=opt.lr)
    optimizer_sr = optim.Adam(model_sr.parameters(), lr=opt.lr)
    # training
    writer = SummaryWriter(opt.outf)
    step = 0

    Upsample_4x = nn.Upsample(scale_factor=4, mode='bilinear')
    for epoch in range(opt.epochs):
        if epoch < opt.milestone:
            current_lr = opt.lr
        else:
            current_lr = opt.lr / 10.
        # set learning rate
        for param_group in optimizer_dn.param_groups:
            param_group["lr"] = current_lr
        print('learning rate %f' % current_lr)

        # set learning rate for the second model
        for param_group_s in optimizer_sr.param_groups:
            param_group_s["lr"] = current_lr

        # train
        for i, data in enumerate(loader_train, 0):
            #print(Variable(data).size())
            img_A_train, img_LB_data = Variable(data[0]), Variable(
                data[1], requires_grad=False)
            img_L_train, img_B_train = torch.split(img_LB_data, 1, dim=1)
            #print(img_A_train.size())
            difference_dn = img_B_train - img_L_train
            img_A_train, img_L_train, img_B_train = Variable(
                img_A_train.cuda()), Variable(img_L_train.cuda()), Variable(
                    img_B_train.cuda())
            difference_dn = Variable(difference_dn.cuda())
            # training step
            model_dn.train()
            model_sr.train()

            # Update super-resolution network
            model_sr.zero_grad()
            optimizer_sr.zero_grad()

            out_train_dn = model_dn(img_B_train)
            loss_dn = criterion_dn(out_train_dn,
                                   difference_dn) / (img_B_train.size()[0] * 2)
            in_train_sr = Variable(img_B_train.cuda() - out_train_dn.cuda())
            in_train_sr = Upsample_4x(in_train_sr)
            difference_sr = in_train_sr - img_A_train

            out_train_sr = model_sr(in_train_sr.detach())
            loss_sr = criterion_sr(out_train_sr,
                                   difference_sr) / (img_A_train.size()[0] * 2)
            #loss_sr.backward()
            #optimizer_sr.step()
            #model_sr.eval()

            # Update denoiser network
            model_dn.zero_grad()
            optimizer_dn.zero_grad()
            out_train_dn = model_dn(img_B_train)
            loss_dn2 = criterion_dn(
                out_train_dn, difference_dn) / (img_B_train.size()[0] * 2)
            loss_dn2.backward()
            optimizer_dn.step()
            model_dn.eval()

            # results
            out_train_dn = torch.clamp(img_B_train - out_train_dn, 0., 1.)
            out_train_sr = torch.clamp(in_train_sr - out_train_sr, 0., 1.)

            psnr_train = batch_PSNR(out_train_sr, img_A_train, 1.)
            print(
                "[epoch %d][%d/%d] loss_dn: %.4f loss_sr: %.4f PSNR_train: %.4f"
                % (epoch + 1, i + 1, len(loader_train), loss_dn2.item(),
                   loss_sr.item(), psnr_train))

            # if you are using older version of PyTorch, you may need to change loss.item() to loss.data[0]
            if step % 10 == 0:
                # Log the scalar values
                writer.add_scalar('loss_sr', loss_sr.item(), step)
                writer.add_scalar('PSNR on training data', psnr_train, step)
            step += 1
            torch.save(
                model_dn.state_dict(),
                os.path.join(opt.outf, "epoch_%d_net_dn.pth" % (epoch + 1)))
            torch.save(
                model_sr.state_dict(),
                os.path.join(opt.outf, "epoch_%d_net_sr.pth" % (epoch + 1)))

            img_A_save = torch.clamp(difference_sr, 0., 1.)
            img_A_save = img_A_save[0, :, :].cpu()
            img_A_save = img_A_save[0].detach().numpy().astype(
                np.float32) * 255
            #print(np.amax(img_A_save))
            cv2.imwrite(os.path.join(opt.outf, "%#04dA.png" % (step)),
                        img_A_save)

            img_B_save = torch.clamp(out_train_dn, 0., 1.)
            img_B_save = img_B_save[0, :, :].cpu()
            img_B_save = img_B_save[0].detach().numpy().astype(
                np.float32) * 255
            #print(np.amax(img_A_save))
            cv2.imwrite(os.path.join(opt.outf, "%#04dB.png" % (step)),
                        img_B_save)

        ## the end of each epoch
        model_dn.eval()
        model_sr.eval()
        # validate
        psnr_val = 0
        for k in range(len(dataset_val)):
            img_val_A = torch.unsqueeze(dataset_val[k][0], 0)
            img_val_B = torch.unsqueeze(dataset_val[k][1], 0)
            img_val_A, img_val_B = Variable(img_val_A.cuda()), Variable(
                img_val_B.cuda())

            out_val_dn = model_dn(img_val_B)
            in_val_sr = Variable(img_val_B.cuda() - out_val_dn.cuda())
            in_val_sr = Upsample_4x(in_val_sr)

            out_val_dn = torch.clamp(out_val_dn, 0., 1.)
            out_val_sr = model_sr(in_val_sr)
            out_val_sr = torch.clamp(in_val_sr - out_val_sr, 0., 1.)
            psnr_val += batch_PSNR(out_val_sr, img_val_A, 1.)

        psnr_val /= len(dataset_val)
        print("\n[epoch %d] PSNR_val: %.4f" % (epoch + 1, psnr_val))
        writer.add_scalar('PSNR on validation data', psnr_val, epoch)

        # log the images
        out_train_dn = model_dn(img_B_train)
        out_train_dn = torch.clamp(img_B_train - out_train_dn, 0., 1.)

        in_train_sr = Variable(out_train_dn.cuda(), requires_grad=False)
        in_train_sr.resize_(img_val_A.size())
        out_train_sr = model_sr(in_train_sr)

        Img_A = utils.make_grid(img_A_train.data,
                                nrow=8,
                                normalize=True,
                                scale_each=True)
        Img_B = utils.make_grid(img_B_train.data,
                                nrow=8,
                                normalize=True,
                                scale_each=True)
        Irecon = utils.make_grid(out_train_dn.data,
                                 nrow=8,
                                 normalize=True,
                                 scale_each=True)
        writer.add_image('clean image', Img_A, epoch)
        writer.add_image('input image', Img_B, epoch)
        writer.add_image('reconstructed image', Irecon, epoch)

        # save model
        torch.save(model_dn.state_dict(), os.path.join(opt.outf, 'net_dn.pth'))
        torch.save(model_sr.state_dict(), os.path.join(opt.outf, 'net_sr.pth'))
コード例 #6
0
def main():
    ## Load dataset
    print('Loading dataset ...\n')
    dataset_train = sio.loadmat(r'E:/data/class2/traindata_wan.mat')
    #dataset_train = sio.loadmat(r'E:/DnCNN-PyTorch-master/200data.mat')
    train3dimen = dataset_train['B']
    train3dimen = train3dimen.astype(np.float32)
    #traindata1 = train3dimen.reshape(20480, 100)
    #trainsize = train3dimen.shape[1]*train3dimen.shape[2]
    #for i in range(0,100):
    traindata = np.expand_dims(train3dimen[:, :, :].copy(), 1)
    #traindata = traindata.reshape(1, 20480, 100)
    dataset_noise = sio.loadmat(r'E:/data/class2/trainnoise_wan.mat')
    #dataset_noise = sio.loadmat(r'E:/DnCNN-PyTorch-master/200noise.mat')
    noise3dimen = dataset_noise['Bnoise']
    noise3dimen = noise3dimen.astype(np.float32)
    #noisedata1 = noise3dimen.reshape(20480, 100)
    noisedata = np.expand_dims(noise3dimen[:, :, :].copy(), 1)
    #for j in range(0, 100):
    #train2dimen = train3dimen[i]
    #loader_train = DataLoader(dataset=dataset_train, num_workers=4, batch_size=opt.batchSize, shuffle=True)
    #print("# of training samples: %d\n" % int(len(dataset_train)))
    ## Build model
    net = DnCNN(channels=1, num_of_layers=opt.num_of_layers)
    net.apply(weights_init_kaiming)
    criterion = nn.MSELoss(size_average=False)  #返回标量,loss.sum()
    # Move to GPU
    device_ids = [0]
    model = nn.DataParallel(net, device_ids=device_ids).cuda()
    criterion.cuda()
    # Optimizer
    optimizer = optim.Adam(model.parameters(), lr=opt.lr)
    # training
    writer = SummaryWriter(opt.outf)
    step = 0
    #noiseL_B=[0,55] # ingnored when opt.mode=='S'
    for epoch in range(opt.epochs):
        if epoch < opt.milestone:
            current_lr = opt.lr
        else:
            current_lr = opt.lr / 10.
        # set learning rate
        for param_group in optimizer.param_groups:
            param_group["lr"] = current_lr
        #print('learning rate %f' % current_lr)
        # train
        #for j in range(0, 100):
        # training step
        model.train()
        model.zero_grad()
        optimizer.zero_grad()
        img_train = traindata[:, :, :, :]
        noise = noisedata[:, :, :, :]
        imgn_train = img_train + noise
        '''
        np.save("imgn_train.npy",imgn_train)
        mat = np.load("imgn_train.npy")
        sio.savemat('imgn_train.mat', {'imgn_train': mat})
        '''
        img_train = torch.Tensor(img_train)
        img_train = Variable(img_train)
        imgn_train = torch.Tensor(imgn_train)
        imgn_train = Variable(imgn_train)
        img_train = img_train.cuda()
        imgn_train = imgn_train.cuda()
        '''
        imgn_train1 = imgn_train1.cpu()
        imgn_train1 = imgn_train1.data.numpy()
        np.save("imgn_train1.npy",imgn_train1)
        mat1 = np.load("imgn_train1.npy")
        sio.savemat('imgn_train1.mat', {'imgn_train1': mat1})
        '''
        #print (imgn_train.shape)
        noise = torch.Tensor(noise)
        noise = Variable(noise)
        noise = noise.cuda()
        '''
        noise = noise.cpu()
        noise = noise.data.numpy()
        np.save("out_noise.npy",noise)
        mat = np.load("out_noise.npy")
        sio.savemat('outnoise.mat', {'outnoise': mat})
        '''
        out_train = model(imgn_train)
        #print (out_train)
        loss = criterion(out_train, noise) / (imgn_train.size()[0] * 2)
        print(loss)
        loss.backward()
        optimizer.step()
        # results
        model.eval()
        imgn_train = imgn_train.cpu()
        out_train = out_train.cpu()
        out_effective = torch.clamp(imgn_train - out_train, 0., 1.)
        out_effective = out_effective.cpu()
        out_effective = out_effective.data.numpy()
        np.save("gra_effective.npy", out_effective)
        mat1 = np.load("gra_effective.npy")
        sio.savemat(r'E:/data/class2/xs_gra_effective_518.mat',
                    {'gra_effective': mat1})
        #out_train = out_train.cpu()
        out_train = out_train.data.numpy()
        np.save("gra_train.npy", out_train)
        mat = np.load("gra_train.npy")
        sio.savemat(r'E:/data/class2/xs_gra_train_518.mat', {'gra_train': mat})
    '''
    noise = noise.cpu()
    noise = noise.data.numpy()
    np.save("out_noise.npy",noise)
    mat = np.load("out_noise.npy")
    sio.savemat('outnoise.mat', {'outnoise': mat})
    '''
    #out_train = torch.clamp(imgn_train-model(imgn_train), 0., 1.)
    #psnr_train = batch_PSNR(out_train, img_train, 5)
    #print("[epoch %d][%d/100] loss: %.4f PSNR_train: %.4f" %
    #(epoch+1, i+1, loss.item(), psnr_train))
    # if you are using older version of PyTorch, you may need to change loss.item() to loss.data[0]
    #if step % 10 == 0:
    # Log the scalar values
    #writer.add_scalar('loss', loss.item(), step)
    #writer.add_scalar('PSNR on training data', psnr_train, step)
    #step += 1
    ## the end of each epoch
    #model.eval()
    #val
    '''
    dataset_val = sio.loadmat(r'E:/data/matlab/val_datawithnoise.mat')
    imgn_val = dataset_val['DX']
    imgn_val = np.expand_dims(imgn_val[:,:,:].copy(), 1)
    imgn_val = torch.Tensor(imgn_val)
    imgn_val = Variable(imgn_val)
    imgn_val = imgn_val.cuda()
    out_val = torch.clamp(imgn_val-model(imgn_val), 0., 1.)
    #print (out_val)
    out_val = out_val.cpu()
    out_val = out_val.data.numpy()
    np.save("out_val.npy",out_val)
    mat3 = np.load("out_val.npy")
    sio.savemat('out_val.mat', {'out_val': mat3})
    '''
    '''
コード例 #7
0
ファイル: USAID_train.py プロジェクト: zeyuxiao1997/USAID
def main():
    # Load dataset
    print('Loading dataset ...\n')
    # VOC dataset loading
    dataset_train = MultiDataSet(cropSize=opt.cropSize,
                                 testFlag=False,
                                 Scale=False)
    dataset_val = MultiDataSet(cropSize=opt.cropSize,
                               testFlag=True,
                               Scale=False)
    loader_train = DataLoader(dataset=dataset_train,
                              num_workers=4,
                              batch_size=opt.batchSize,
                              shuffle=True)
    loader_val = DataLoader(dataset=dataset_val,
                            num_workers=1,
                            batch_size=1,
                            shuffle=False)
    print("# of training samples: %d\n" % int(len(dataset_train)))
    print("# of validation samples: %d\n" % int(len(dataset_val)))

    # Denoiser
    net = DnCNN(channels=3, num_of_layers=opt.num_of_layers)
    net.apply(weights_init_kaiming)
    criterion = nn.MSELoss(size_average=False).cuda()
    model = nn.DataParallel(net).cuda()

    seg = fpn(opt.num_of_SegClass)
    seg_criterion = FocalLoss(gamma=2).cuda()
    seg = nn.DataParallel(seg).cuda()

    optimizer = optim.Adam(model.parameters(), lr=opt.lr)
    scheduler = optim.lr_scheduler.MultiStepLR(
        optimizer, milestones=[10, 40, 80, 120, 140], gamma=0.1)

    # training
    writer = SummaryWriter(save_dir)
    step = 0
    noiseL_B = [0, 55]  # ingnored when opt.mode=='S'
    for epoch in range(opt.epochs):

        scheduler.step()
        for param_group in optimizer.param_groups:
            current_lr = param_group["lr"]
        print('learning rate %f' % current_lr)
        # train
        for i, data in enumerate(loader_train, 0):
            img_train = data

            model.train()
            seg.train()
            model.zero_grad()
            seg.zero_grad()
            optimizer.zero_grad()

            # training step
            if opt.mode == 'S':
                noise = torch.FloatTensor(img_train.size()).normal_(
                    mean=0, std=opt.noiseL / 255.)
            if opt.mode == 'B':
                noise = torch.zeros(img_train.size())
                stdN = np.random.uniform(noiseL_B[0],
                                         noiseL_B[1],
                                         size=noise.size()[0])
                for n in range(noise.size()[0]):
                    sizeN = noise[0, :, :, :].size()
                    noise[n, :, :, :] = torch.FloatTensor(sizeN).normal_(
                        mean=0, std=stdN[n] / 255.)
            imgn_train = img_train + noise
            img_train, imgn_train = Variable(img_train.cuda()), Variable(
                imgn_train.cuda())
            noise = Variable(noise.cuda())
            out_train = model(imgn_train)
            loss = criterion(out_train, noise) / (imgn_train.size()[0] * 2)

            out_train = torch.clamp(imgn_train - model(imgn_train), 0., 1.)
            psnr_train = batch_PSNR(out_train, img_train, 1.)

            # demean segmentation inputs
            seg_input = out_train.data.cpu().numpy()
            for n in range(out_train.size()[0]):
                seg_input[n, :, :, :] = rgb_demean(seg_input[n, :, :, :])
            seg_input = Variable(torch.from_numpy(seg_input).cuda())

            seg_output = seg(seg_input)

            target = (get_NoGT_target(seg_output)).data.cpu()

            target_ = resize_target(target, seg_output.size(2))
            target_ = torch.from_numpy(target_).long()
            target_ = target_.cuda()
            seg_loss = seg_criterion(seg_output, target_)

            for param in seg.parameters():
                param.requires_grad = False

            totalLoss = opt.coef_MSE * loss + (1 - opt.coef_MSE) * seg_loss
            totalLoss.backward()
            optimizer.step()

            if (i + 1) % 1000 == 0:
                print(
                    "[epoch %d][%d/%d]  [SegClass: %d]  loss: %.4f  PSNR_train: %.4f"
                    % (epoch + 1, i + 1, len(loader_train),
                       opt.num_of_SegClass, loss.item(), psnr_train))
                # if you are using older version of PyTorch, you may need to change loss.item() to loss.data[0]
            if step % 10 == 0:
                # Log the scalar values
                writer.add_scalar('loss', loss.item(), step)
                writer.add_scalar('PSNR on training data', psnr_train, step)
            step += 1

        ## the end of every 20 epoch, do validation
        if (epoch + 1) % 20 == 0:
            model.eval()
            psnr_val = 0
            niqe_val = 0
            ssim_val = 0
            with torch.no_grad():
                for i, data in enumerate(loader_val, 0):

                    img_val = data
                    noise = torch.FloatTensor(img_val.size()).normal_(
                        mean=0, std=opt.noiseL / 255.)
                    imgn_val = img_val + noise
                    img_val, imgn_val = Variable(img_val.cuda()), Variable(
                        imgn_val.cuda())

                    out_val = torch.clamp(imgn_val - model(imgn_val), 0., 1.)
                    psnr_val += batch_PSNR(out_val, img_val, 1.)
                    ssim_val += batch_SSIM(out_val, img_val, 1.)

                    if epoch == opt.epochs - 1:
                        niqe_val += batch_NIQE(out_val)

                psnr_val /= len(loader_val)
                ssim_val /= len(loader_val)
                writer.add_scalar('PSNR on validation data', psnr_val, epoch)
                torch.save(
                    model.state_dict(),
                    os.path.join(
                        save_dir,
                        str(opt.num_of_SegClass) + '_USAID_epoch' +
                        str(epoch + 1) + '_' + str(psnr_val) + '.pth'))
                print(
                    "\n[epoch %d] [SegClass: %d] PSNR_val: %.2f SSIM_val: %.4f"
                    % (epoch + 1, opt.num_of_SegClass, psnr_val, ssim_val))
                print(
                    "**********************************************************************"
                )

                if epoch == opt.epochs - 1:
                    niqe_val /= len(loader_val)

                    torch.save(
                        model.state_dict(),
                        os.path.join(
                            save_dir,
                            str(opt.num_of_SegClass) + '_USAID_final.pth'))
                    print(
                        "\n[epoch %d] [SegClass: %d] PSNR_val: %.2f SSIM_val: %.4f NIQE_val: %.4f"
                        % (epoch + 1, opt.num_of_SegClass, psnr_val, ssim_val,
                           niqe_val))
                    print("\n==========  END  ===========")

        # log the images
        out_train = torch.clamp(imgn_train - model(imgn_train), 0., 1.)
        Img = utils.make_grid(img_train.data,
                              nrow=8,
                              normalize=True,
                              scale_each=True)
        Imgn = utils.make_grid(imgn_train.data,
                               nrow=8,
                               normalize=True,
                               scale_each=True)
        Irecon = utils.make_grid(out_train.data,
                                 nrow=8,
                                 normalize=True,
                                 scale_each=True)
        writer.add_image('clean image', Img, epoch)
        writer.add_image('noisy image', Imgn, epoch)
        writer.add_image('reconstructed image', Irecon, epoch)
        # save model
        torch.save(
            model.state_dict(),
            os.path.join(save_dir,
                         str(opt.num_of_SegClass) + '_USAID_lastest.pth'))
コード例 #8
0
def main():

    # creat_readme()
    # choose cpu or gpu
    if torch.cuda.is_available():
        args.device = torch.device('cuda')
    else:
        args.device = torch.device('cpu')

    print('Loading Dataset--')
    dataset_train = RootDataset(root_file=args.trainfile, sigma=args.sigma)
    loader_train = DataLoader(dataset=dataset_train, batch_size=args.batchSize)
    dataset_val = RootDataset(root_file=args.valfile, sigma=args.sigma)
    val_train = DataLoader(dataset=dataset_val, batch_size=args.batchSize)

    # Build model
    model = DnCNN(channels=1,
                  num_of_layers=args.num_of_layers,
                  ker_size=args.kernelSize,
                  o_k_size=args.outKerSize).to(device=args.device)
    if (args.model == None):
        model.apply(init_weights)
        print("Creating new model")
    else:
        print("Loading model from file" + args.model)
        model.load_state_dict(torch.load(args.model))
        model.eval()

    # Loss function
    criterion = PatchLoss()
    criterion.to(device=args.device)

    #Optimizer
    MyOptim = optim.Adam(model.parameters(), lr=args.lr)
    MyScheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer=MyOptim,
                                                       factor=0.1,
                                                       patience=10,
                                                       verbose=True)

    # training and validation
    step = 0
    training_losses = np.zeros(args.epochs)
    validation_losses = np.zeros(args.epochs)
    for epoch in range(args.epochs):
        print("Epoch #" + str(epoch))
        # training
        train_loss = 0
        for i, data in enumerate(loader_train, 0):
            model.train()
            model.zero_grad()
            MyOptim.zero_grad()
            truth, noise = data
            noise = noise.unsqueeze(1)
            output = model(noise.float().to(args.device), args.outKerSize)
            batch_loss = criterion(
                output.squeeze(1).to(args.device), truth.to(args.device),
                args.patchSize).to(args.device)
            batch_loss.backward()
            MyOptim.step()
            model.eval()
            train_loss += batch_loss.item()
        training_losses[epoch] = train_loss
        print("Training Loss: " + str(train_loss))

        val_loss = 0
        for i, data in enumerate(val_train, 0):
            val_truth, val_noise = data
            val_output = model(
                val_noise.unsqueeze(1).float().to(args.device),
                args.outKerSize)
            output_loss = criterion(
                val_output.squeeze(1).to(args.device),
                val_truth.to(args.device), args.patchSize).to(args.device)
            val_loss += output_loss.item()
        MyScheduler.step(torch.tensor([val_loss]))
        validation_losses[epoch] = val_loss
        print("Validation Loss: " + str(val_loss))
        # save the model
        model.eval()
        torch.save(model.state_dict(), os.path.join(args.outf, 'net.pth'))
    training = plt.plot(training_losses, label='Training')
    validation = plt.plot(validation_losses, label='Validation')
    plt.legend()
    plt.savefig(args.outf + "/lossplt.png")

    branch = get_all_histograms("./test.root")
    model.to('cpu')
    for image in range(10):

        data = get_bin_weights(branch, image).copy()
        np.savetxt(args.outf + '/truth#' + str(image) + '.txt', data)

        means = np.mean(data)
        stdevs = np.std(data)

        noisy = add_noise(data, args.sigma).copy()
        np.savetxt(args.outf + '/noisy#' + str(image) + '.txt', noisy)

        data_norm = (data - means) / stdevs
        np.savetxt(args.outf + '/truth_norm#' + str(image) + '.txt', data_norm)
        noisy_norm = (noisy - means) / stdevs
        np.savetxt(args.outf + '/noisy_norm#' + str(image) + '.txt',
                   noisy_norm)

        data_norm = torch.from_numpy(data_norm)
        noisy_norm = torch.from_numpy(noisy_norm)
        noisy_norm = noisy_norm.unsqueeze(0)
        noisy_norm = noisy_norm.unsqueeze(1)
        output_norm = model(
            noisy_norm.float(),
            args.outKerSize).squeeze(0).squeeze(0).detach().numpy()
        np.savetxt(args.outf + '/output_norm#' + str(image) + '.txt',
                   output_norm)
        output = (output_norm * stdevs) + means
        np.savetxt(args.outf + '/output#' + str(image) + '.txt', output)
        truth = data.numpy()
        noisy = noisy.numpy()
        diff = output - truth
        noisy_diff = noisy - truth
        np.savetxt(args.outf + '/diff#' + str(image) + '.txt', diff)
    model.to('cuda')
コード例 #9
0
def main():

    # Load dataset
    print('Loading dataset ...\n')
    dataset = Dataset(img_avg=opt.img_avg,
                      patch_size=opt.patch_size,
                      stride=opt.stride)
    loader = DataLoader(dataset=dataset,
                        num_workers=4,
                        batch_size=opt.batch_size,
                        shuffle=True)
    print(f'{len(dataset)} training sample pairs loaded.')

    # Build model
    print(f'** Creating {opt.net} network **\n')
    model_channels = 1

    if opt.net == 'D':
        net = DnCNN(channels=model_channels, num_of_layers=17)
#     elif opt.net == 'DF':
#         net = DnCNN_BUIFD(channels=model_channels, num_of_layers=17)
    elif opt.net == 'M':
        net = MemNet(in_channels=model_channels)
#     elif opt.net == 'MF':
#         net = MemNet_BUIFD(in_channels=model_channels)
    elif opt.net == 'R':
        net = RIDNET(in_channels=model_channels)
    else:
        raise NotImplemented('Network model not implemented.')
    net.apply(weights_init_kaiming)

    # Loss metric
    criterion = nn.MSELoss(size_average=False)

    # Move to GPU
    model = nn.DataParallel(net).cuda()
    criterion.cuda()
    print('Trainable parameters: ',
          sum(p.numel() for p in model.parameters() if p.requires_grad))

    # Optimizer
    optimizer = optim.Adam(model.parameters(), lr=opt.lr)

    # Training
    loss_log = np.zeros(opt.epochs)
    loss_batch_log = []

    for epoch in range(opt.epochs):
        start_time = timer()

        # Learning rate
        if epoch < opt.milestone:
            current_lr = opt.lr
        else:
            current_lr = opt.lr / (10.)
        for param_group in optimizer.param_groups:
            param_group["lr"] = current_lr
        print('\nLearning rate = %f' % current_lr)

        # Train
        for idx, (noisy, target) in enumerate(loader):

            model.train()
            model.zero_grad()
            optimizer.zero_grad()

            # Training step
            noise = noisy - target
            target, noisy = Variable(target.cuda()), Variable(noisy.cuda())
            noise = Variable(noise.cuda())

            #             if opt.net[-1] != 'F':
            if opt.net == 'R':
                predicted_noise = noisy - model(noisy)
            else:
                predicted_noise = model(noisy)
            loss_noise = criterion(predicted_noise,
                                   noise) / (noisy.size()[0] * 2)
            loss = loss_noise

            #             else:
            #                 out_train, out_noise_level_train = model(imgn_train)

            #                 loss_img = criterion(out_train, noise) / (imgn_train.size()[0]*2)
            #                 loss_noise_level = criterion(out_noise_level_train, noise_level_train) / (imgn_train.size()[0]*2)
            #                 loss = loss_img + loss_noise_level

            loss.backward()
            optimizer.step()

            loss_batch_log.append(loss.item())
            #             loss_image_log[epoch] += loss_img.item()
            #             loss_noise_level_log[epoch] += loss_noise_level.item()
            loss_log[epoch] += loss.item()

        # Average out over all batches in the epoch


#         loss_image_log[epoch] = loss_image_log[epoch] / len(loader_train)
#         loss_noise_level_log[epoch] = loss_noise_level_log[epoch] / len(loader_train)
        loss_log[epoch] = loss_log[epoch] / len(loader)

        # Save model
        model_name = f'{opt.net}_{opt.img_avg}'
        model_dir = os.path.join('../../net_data/trained_denoisers/',
                                 model_name)
        if not os.path.exists(model_dir):
            os.makedirs(model_dir)
        torch.save(model.state_dict(),
                   os.path.join(model_dir, f'epoch_{epoch}.pth'))

        # Save logs and settings
        if ((epoch + 1) % 10) == 0:
            log_dict = {
                'loss_log': loss_log,
                #'loss_image_log': loss_image_log,
                #'loss_noise_level_log': loss_noise_level_log,
                'loss_batch_log': np.asarray(loss_batch_log)
            }
            fname = os.path.join(model_dir, 'log_dict.pkl')
            with open(fname, 'wb') as f:
                pickle.dump(log_dict, f, protocol=pickle.HIGHEST_PROTOCOL)
            print('wrote', fname)

            settings_dict = {'opt': opt}
            fname = os.path.join(model_dir, 'settings_dict.pkl')
            with open(fname, 'wb') as f:
                pickle.dump(settings_dict, f, protocol=pickle.HIGHEST_PROTOCOL)
            print('wrote', fname)

        # Ending-epoch message
        end_time = timer()
        print(
            f'Epoch {epoch} ({(end_time - start_time)/60.0:.1f} min):    loss={loss_log[epoch]:.4f}'
        )

    print(f'Training {opt.net} complete for all epochs.')
コード例 #10
0
def main():
    # Load dataset
    print('Loading dataset ...\n')
    dataset_train = Dataset(train=True)
    dataset_val = Dataset(train=False)
    loader_train = DataLoader(dataset=dataset_train, num_workers=4, batch_size=opt.batchSize, shuffle=True)
    print("# of training samples: %d\n" % int(len(dataset_train)))
    # Build model
    net = DnCNN(channels=1, num_of_layers=opt.num_of_layers)
    net.apply(weights_init_kaiming)
    criterion = nn.MSELoss(size_average=False)
    # Move to GPU
    device_ids = [0]
    model = nn.DataParallel(net, device_ids=device_ids).cuda()
    criterion.cuda()
    # Optimizer
    optimizer = optim.Adam(model.parameters(), lr=opt.lr)
    # training
    writer = SummaryWriter(opt.outf)
    step = 0
    for epoch in range(opt.epochs):
        if epoch < opt.milestone:
            current_lr = opt.lr
        else:
            current_lr = opt.lr / 10.
        # set learning rate
        for param_group in optimizer.param_groups:
            param_group["lr"] = current_lr
        print('learning rate %f' % current_lr)
        # train
        for i, data in enumerate(loader_train, 0):
            # training step
            model.train()
            model.zero_grad()
            optimizer.zero_grad()
            img_A_train = data[:,0]
            img_B_train = data[:,1,:,:]

            difference = img_B_train - img_A_train
            print(difference.size())

            img_A_train, img_B_train = Variable(img_A_train.cuda()), Variable(img_B_train.cuda())
            difference = Variable(difference.cuda())
            out_train = model(img_B_train)
            loss = criterion(out_train, difference) / (img_B_train.size()[0]*2)
            loss.backward()
            optimizer.step()
            # results
            model.eval()
            out_train = torch.clamp(img_B_train-model(img_B_train), 0., 1.)
            psnr_train = batch_PSNR(out_train, img_A_train, 1.)
            print("[epoch %d][%d/%d] loss: %.4f PSNR_train: %.4f" %
                (epoch+1, i+1, len(loader_train), loss.item(), psnr_train))
            # if you are using older version of PyTorch, you may need to change loss.item() to loss.data[0]
            if step % 10 == 0:
                # Log the scalar values
                writer.add_scalar('loss', loss.item(), step)
                writer.add_scalar('PSNR on training data', psnr_train, step)
            step += 1
            torch.save(model.state_dict(), os.path.join(opt.outf,"epoch_%d_net.pth" %(epoch+1)))
        ## the end of each epoch
        model.eval()
        # validate
        psnr_val = 0
        
        for k in range(len(dataset_val)):
            img_val_A = torch.unsqueeze(dataset_val[k][0], 0)
            imgn_val_B = torch.unsqueeze(dataset_val[k][1], 0)
            difference = imgn_val_B - img_val_A
            img_val_A, imgn_val_B = Variable(img_val_A.cuda()), Variable(imgn_val_B.cuda())
            out_val = torch.clamp(model(imgn_val_B), 0., 1.)
            psnr_val += batch_PSNR(out_val, img_val_A, 1.)
        psnr_val /= len(dataset_val)
        print("\n[epoch %d] PSNR_val: %.4f" % (epoch+1, psnr_val))
        writer.add_scalar('PSNR on validation data', psnr_val, epoch)
        
        # log the images
        out_train = torch.clamp(img_B_train-model(img_B_train), 0., 1.)
        Img_A = utils.make_grid(img_A_train.data, nrow=8, normalize=True, scale_each=True)
        Img_B = utils.make_grid(img_B_train.data, nrow=8, normalize=True, scale_each=True)
        Irecon = utils.make_grid(out_train.data, nrow=8, normalize=True, scale_each=True)
        writer.add_image('clean image', Img_A, epoch)
        writer.add_image('input image', Img_B, epoch)
        writer.add_image('reconstructed image', Irecon, epoch)
        # save model
        torch.save(model.state_dict(), os.path.join(opt.outf, 'net.pth'))
コード例 #11
0
ファイル: train.py プロジェクト: Cyn199801/pytorch-learning
def main():
    # 加载训练集
    print('Loading dataset ...\n')
    dataset_train = Dataset(train=True)
    loader_train = DataLoader(dataset=dataset_train,
                              num_workers=4,
                              batch_size=opt.batchSize,
                              shuffle=True)
    print("# of training samples: %d\n" % int(len(dataset_train)))

    # 加载模型
    net = DnCNN(channels=1, num_of_layers=17)
    net.apply(weights_init_kaiming)  # 权重初始化

    # 使用GPU
    device_ids = [0]
    model = nn.DataParallel(net, device_ids=device_ids).cuda()
    #     criterion.cuda()

    # 定义损失和优化器
    criterion = nn.MSELoss(size_average=False)
    optimizer = optim.Adam(model.parameters(), lr=opt.lr)

    # 使用tensorboardx可视化训练曲线和指标
    time_now = datetime.now().isoformat()
    if not os.path.exists(opt.log_dir):
        os.mkdir(opt.log_dir)
    writer = SummaryWriter(log_dir=os.path.join(opt.log_dir, time_now))

    step = 0
    for epoch in range(opt.epochs):

        # 设置学习率
        if epoch < opt.milestone:
            current_lr = opt.lr
        else:
            #             current_lr = opt.lr / 10.
            current_lr = opt.lr
        for param_group in optimizer.param_groups:
            param_group["lr"] = current_lr
        print('learning rate %f' % current_lr)

        # 开始训练
        total_loss = 0
        psnr_train = 0
        for i, data in enumerate(loader_train, 0):
            # training step
            model.train()
            model.zero_grad()
            optimizer.zero_grad()
            img_train = data

            noise = torch.FloatTensor(img_train.size()).normal_(
                mean=0, std=opt.noiseL / 255.)
            imgn_train = img_train + noise
            #             print(imgn_train.shape)
            img_train, imgn_train = Variable(img_train.cuda()), Variable(
                imgn_train.cuda())
            noise = Variable(noise.cuda())
            out_train = model(imgn_train)
            loss = criterion(out_train, noise) / (imgn_train.size()[0] * 2)
            loss.backward()
            optimizer.step()

            # 统计loss和计算psnr,并显示
            out_train = torch.clamp(imgn_train - out_train, 0., 1.)
            psnr_train += batch_PSNR(out_train, img_train, 1.)
            total_loss += loss.item()
            print("[epoch %d][%d/%d] loss: %.4f PSNR_train: %.4f" %
                  (epoch + 1, i + 1, len(loader_train), total_loss /
                   (i + 1), psnr_train / (i + 1)))
            writer.add_scalar('loss', total_loss / (i + 1), step)
            writer.add_scalar('PSNR on training data', psnr_train / (i + 1),
                              step)

            # 保存训练图片和模型
            step += 1
            if step % 500 == 0:
                if not os.path.exists(opt.image_path):
                    os.mkdir(opt.image_path)
                cv2.imwrite(opt.image_path + '/' + "{}_pred.jpg".format(step),
                            save_image(out_train))
                cv2.imwrite(opt.image_path + '/' + "{}_input.jpg".format(step),
                            save_image(imgn_train))
                cv2.imwrite(opt.image_path + '/' + "{}_gt.jpg".format(step),
                            save_image(img_train))
        if not os.path.exists(opt.save_model):
            os.makedirs(opt.save_model)
        torch.save(model.state_dict(), os.path.join(opt.save_model, 'net.pth'))