# lowfreq3.axis('off') if n_count % 800 == 0: highfreq1.imshow(High_noise[0, 0].cpu(), cmap='jet') highfreq2.imshow(High_origin[0, 0].cpu(), cmap='jet') highfreq3.imshow(decompose_output[0, 0].cpu().detach().numpy(), cmap='jet') lowfreq1.imshow(y_[0, 0].cpu(), cmap='jet') lowfreq2.imshow(x_[0, 0].cpu(), cmap='jet') # lowfreq3.imshow(decompose_output[0,1].cpu().detach().numpy(), cmap='jet') plt.show() epoch_loss += decompose_loss.item() if n_count % 10 == 0: print('%4d %4d / %4d loss = %2.8f loss_hf = %2.8f ' % (epoch + 1, n_count, xs.size(0) // batch_size, decompose_loss.item() / batch_size, decompose_loss.item() / batch_size)) elapsed_time = time.time() - start_time log('epcoh = %4d , loss = %4.4f , time = %4.2f s' % (epoch + 1, epoch_loss / n_count, elapsed_time)) np.savetxt('train_result.txt', np.hstack((epoch + 1, epoch_loss / n_count, elapsed_time)), fmt='%2.4f') # torch.save(compose_model.state_dict(), os.path.join(save_dir, 'com_model_%03d.pth' % (epoch+1))) torch.save(decompose_model.state_dict(), os.path.join(save_dir, 'model_%03d.pth' % (epoch + 1))) # torch.save(model, os.path.join(save_dir, 'model_%03d.pth' % (epoch + 1)))
parser.add_argument('--gaussian_noise_level', type=int) parser.add_argument('--jpeg_quality', type=int) parser.add_argument('--downsampling_factor', type=int) opt = parser.parse_args() if not os.path.exists(opt.outputs_dir): os.makedirs(opt.outputs_dir) if opt.arch == 'DnCNN-S': model = DnCNN(num_layers=17) elif opt.arch == 'DnCNN-B': model = DnCNN(num_layers=20) elif opt.arch == 'DnCNN-3': model = DnCNN(num_layers=20) state_dict = model.state_dict() for n, p in torch.load(opt.weights_path, map_location=lambda storage, loc: storage).items(): if n in state_dict.keys(): state_dict[n].copy_(p) else: raise KeyError(n) model = model.to(device) model.eval() filename = os.path.basename(opt.image_path).split('.')[0] descriptions = '' input = pil_image.open(opt.image_path).convert('RGB')
if __name__ == "__main__": args = get_args() # device device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model = DnCNN().to(device) optimizer = optim.SGD(model.parameters(), lr=args.lr) criterion = nn.MSELoss() training_data_loader, testing_data_loader = dataloader( args.train_dir, args.test_dir, args.crop_size, args.batch_size) mean = args.noise_mean stddev = args.noise_std num_epochs = args.num_epochs for epoch in range(1, num_epochs + 1): train(epoch, model, optimizer, training_data_loader, mean, stddev, criterion) if epoch % args.eval_interval == 0: validate(model, testing_data_loader, mean, stddev, criterion) if epoch % args.save_interval == 0: save_checkpoint({ 'epoch': epoch + 1, 'arch': model, 'state_dict': model.state_dict(), 'optimizer': optimizer.state_dict(), })
original2 = fig.add_subplot(gs[1, 0]) original.axis('off') noised.axis('off') out.axis('off') final.axis('off') original2.axis('off') if n_count == 0: original.imshow(batch_x[0].cpu().detach().numpy().squeeze(0), cmap='rainbow') noised.imshow(batch_y[0].cpu().detach().numpy().squeeze(0), cmap='rainbow') out.imshow(output[0].cpu().detach().numpy().squeeze(0), cmap='rainbow') final.imshow((output+low_)[0].cpu().detach().numpy().squeeze(0), cmap='rainbow') original2.imshow(temp[0].cpu().detach().numpy().squeeze(0), cmap='rainbow') plt.show() epoch_loss += loss.item() loss.backward() optimizer.step() if n_count % 10 == 0: print('%4d %4d / %4d loss = %2.8f final_loss = %2.8f edge_loss = %2.8f' % ( epoch + 1, n_count, xs.size(0) // batch_size, loss.item() / batch_size, loss2.item() / batch_size, loss1.item() / batch_size)) elapsed_time = time.time() - start_time log('epcoh = %4d , loss = %4.4f , time = %4.2f s' % (epoch + 1, epoch_loss / n_count, elapsed_time)) np.savetxt('train_result.txt', np.hstack((epoch + 1, epoch_loss / n_count, elapsed_time)), fmt='%2.4f') # torch.save(model.state_dict(), os.path.join(save_dir, 'model_%03d.pth' % (epoch+1))) torch.save(model.state_dict(), os.path.join(save_dir, 'model_%03d.pth' % (epoch + 1)))
lowfreq3.axis('off') if n_count % 100 == 0: highfreq1.imshow(batch_y[0, 0].cpu(), cmap='rainbow') highfreq2.imshow(batch_x[0, 0].cpu(), cmap='rainbow') highfreq3.imshow(output[0, 0].cpu().detach().numpy(), cmap='rainbow') lowfreq1.imshow(batch_y[0, 1].cpu(), cmap='rainbow') lowfreq2.imshow(batch_x[0, 1].cpu(), cmap='rainbow') lowfreq3.imshow(output[0, 1].cpu().detach().numpy(), cmap='rainbow') plt.show() epoch_loss += loss.item() loss.backward() optimizer.step() if n_count % 10 == 0: print('%4d %4d / %4d loss = %2.8f' % (epoch + 1, n_count, xs.size(0) // batch_size, loss.item() / batch_size)) elapsed_time = time.time() - start_time log('epcoh = %4d , loss = %4.4f , time = %4.2f s' % (epoch + 1, epoch_loss / n_count, elapsed_time)) np.savetxt('train_result.txt', np.hstack((epoch + 1, epoch_loss / n_count, elapsed_time)), fmt='%2.4f') torch.save(model.state_dict(), os.path.join(save_dir, 'model_%03d.pth' % (epoch + 1))) # torch.save(model, os.path.join(save_dir, 'model_%03d.pth' % (epoch + 1)))