def test(net, loader_test, max_psnr, max_ssim, step): net.eval() torch.cuda.empty_cache() ssims = [] psnrs = [] s = True for i, (inputs, targets) in enumerate(loader_test): inputs = inputs.to(opt.device) targets = targets.to(opt.device) pred = net(inputs) # print(pred) tfs.ToPILImage()(torch.squeeze(targets.cpu())).save('111.png') vutils.save_image(targets.cpu(), 'target.png') vutils.save_image(pred.cpu(), 'pred.png') ssim1 = ssim(pred, targets).item() psnr1 = psnr(pred, targets) ssims.append(ssim1) psnrs.append(psnr1) if (psnr1 > max_psnr or ssim1 > max_ssim) and s: ts = vutils.make_grid([ torch.squeeze(inputs.cpu()), torch.squeeze(targets.cpu()), torch.squeeze(pred.clamp(0, 1).cpu()) ]) vutils.save_image( ts, f'samples/{model_name}/{step}_{psnr1:.4}_{ssim1:.4}.png') s = False return np.mean(ssims), np.mean(psnrs)
def evaluate(model_name, dataset, quadratic=False): print(f'[{time.ctime()}] Start evaluating {model_name} on {dataset}') # quadratic = 'quad' in model_name if model_name == 'sepconv': import utilities model = utilities.get_sepconv(weights='l1').cuda() elif model_name == 'qvi-lin' or model_name =='qvi-quad': from code.quadratic.interpolate import interpolate as model elif model_name == 'dain': from code.DAIN.interpolate import interpolate_efficient as model elif model_name == 'sepconv2': checkpoint= torch.load('models/checkpoint_1593886534_seed_0_optimizer=adamax_input_size=4_lr=0.001_lr2=0.0001_weights=None_kernel_size=45_loss=l1_pretrain=1_kernel_size_d=31_kernel_size_scale=4_kernel_size_qd=25_kernel_size_qd_scale=4') model = checkpoint['last_model'].cuda().eval() else: raise NotImplementedError() torch.manual_seed(42) np.random.seed(42) results = defaultdict(list) if dataset == 'lmd': ds = dataloader.large_motion_dataset2(quadratic=quadratic, fold='test', cropped=False) elif dataset == 'adobe240': ds = dataloader.adobe240_dataset(quadratic=quadratic, fold='test') elif dataset == 'gopro': ds = dataloader.gopro_dataset(quadratic=quadratic, fold='test') elif dataset == 'vimeo90k': ds = dataloader.vimeo90k_dataset(quadratic=quadratic, fold='test') else: raise NotImplementedError() ds = dataloader.TransformedDataset(ds, normalize=True, random_crop=False, flip_probs=0) _, _, test = dataloader.split_data(ds, [0, 0, 1]) data_loader = torch.utils.data.DataLoader(test, batch_size=1) with torch.no_grad(): for X,y in tqdm(data_loader, total=len(data_loader)): X = X.cuda() y = y.cuda() y_hat = model(X).clamp(0,1) y.mul_(255) y_hat.mul_(255) results['psnr'].extend(metrics.psnr(y_hat, y)) results['ie'].extend(metrics.interpolation_error(y_hat, y)) results['ssim'].extend(metrics.ssim(y_hat, y)) # store in dataframe results = pd.DataFrame(results) results['model'] = model_name results['dataset'] = dataset return results
def get_images_difference(true_image, pred_image, metric='psnr'): if metric == 'psnr': true_image_data = get_image_data(true_image) pred_image_data = get_image_data(pred_image) return metrics.psnr(true_image_data, pred_image_data) else: print('Unknown metric ' + str(metric)) exit()
def test_epoch(epoch, experiment): testloaders, testsets = experiment.create_test_dataloaders() use_cuda = experiment.use_cuda net = experiment.net summaries = experiment.summaries criterion = experiment.criterion net.eval() utils.set_random_seeds(1234) with torch.no_grad(): for i, (testloader, testname) in enumerate(testloaders): stats = get_stats() print("Testing on {}".format(testname)) for batch_idx, input_set in enumerate(testloader): experiment.step = epoch * len(experiment.trainloader) + int( batch_idx / len(testloader) * len(experiment.trainloader)) experiment.iter = batch_idx torch.cuda.empty_cache() inputs, targets = input_set if use_cuda: inputs = inputs.cuda() targets = targets.cuda() # inputs, targets = experiment.data_preprocessing(inputs) # inputs, targets = Variable(inputs, requires_grad=False), Variable(targets, requires_grad=False) pred = torch.clamp(net(inputs), 0.0, 1.0) batch_loss = criterion(pred, targets) loss = batch_loss.mean() stats["loss"].update(loss.data) psnr_iter = metrics.psnr(pred, targets, maxval=1).mean().data ssim_iter = metrics.ssim(pred, targets) stats["psnr"].update(psnr_iter, pred.size(0)) stats["ssim"].update(ssim_iter.data, pred.size(0)) progress_bar( batch_idx, len(testloader), 'Loss: %.5f | PSNR: %.2f | SSIM: %.3f' % (stats["loss"].avg, stats["psnr"].avg, stats["ssim"].avg)) # save predicted image learned_img = Image.fromarray( (255 * pred[0, 0].cpu().data.numpy()).astype(np.uint8)) filename = os.path.join( './n3net-results', testsets[0][i].at(batch_idx).split( '/home/pacole2/Projects/datasets/DeepLesionTestPreprocessed/miniStudies/' )[1]) directory = os.path.dirname(filename) if not os.path.exists(directory): os.makedirs(directory) learned_img.save(os.path.join(filename)) del pred, inputs, targets add_summary(experiment, summaries, testname + "/epoch", epoch) for k, stat in stats.items(): add_summary(experiment, summaries, testname + "/" + k, stat.avg)
def evaluate_metrics(model_path): model = Deblurrer() model.load_state_dict( torch.load(model_path, map_location=torch.device('cpu'))) model.eval() dataset = LFWC(["../data/test/faces_blurred"], "../data/test/faces") #dataset = FakeData(size=1000, image_size=(3, 128, 128), transform=transforms.ToTensor()) data_loader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=True) count = 0 avg0 = 0 avg1 = 0 avgs = 0 avgs1 = 0 for data in data_loader: blurred_img = Variable(data['blurred']) nonblurred = Variable(data['nonblurred']) #im = Image.open(image_path) #transform = transforms.ToTensor() transformback = transforms.ToPILImage() out = model(blurred_img) #print(out.shape) outIm = transformback(out[0]) nonblurred = transformback(nonblurred[0]) blurred = transformback(blurred_img[0]) ps = psnr(outIm, nonblurred) avg0 += ps ps1 = psnr(blurred, nonblurred) avg1 += ps1 similarity = ssim1(outIm, nonblurred) avgs += similarity sim1 = ssim1(blurred, nonblurred) avgs1 += sim1 count += 1 avg0 /= count avg1 /= count avgs /= count avgs1 /= count print(avg0) print(avg1) print(avgs) print(avgs1)
def train(params=None): os.makedirs(params['ckpt_path'], exist_ok=True) device = torch.device("cuda") train_dataset = HDRDataset(params['dataset'], params=params, suffix=params['dataset_suffix']) train_loader = DataLoader(train_dataset, batch_size=params['batch_size'], shuffle=True) model = HDRPointwiseNN(params=params) ckpt = get_latest_ckpt(params['ckpt_path']) if ckpt: print('Loading previous state:', ckpt) state_dict = torch.load(ckpt) state_dict, _ = load_params(state_dict) model.load_state_dict(state_dict) model.to(device) mseloss = torch.nn.MSELoss() optimizer = Adam(model.parameters(), params['lr']) count = 0 for e in range(params['epochs']): model.train() for i, (low, full, target) in enumerate(train_loader): optimizer.zero_grad() low = low.to(device) full = full.to(device) t = target.to(device) res = model(low, full) total_loss = mseloss(res, t) total_loss.backward() if (count + 1) % params['log_interval'] == 0: _psnr = psnr(res, t).item() loss = total_loss.item() print(e, count, loss, _psnr) optimizer.step() if (count + 1) % params['ckpt_interval'] == 0: print('@@ MIN:', torch.min(res), 'MAX:', torch.max(res)) model.eval().cpu() ckpt_model_filename = "ckpt_" + str(e) + '_' + str( count) + ".pth" ckpt_model_path = os.path.join(params['ckpt_path'], ckpt_model_filename) state = save_params(model.state_dict(), params) torch.save(state, ckpt_model_path) test(ckpt_model_path) model.to(device).train() count += 1
def test_epoch(epoch, experiment): testloaders = experiment.create_test_dataloaders() use_cuda = experiment.use_cuda net = experiment.net summaries = experiment.summaries criterion = experiment.criterion net.eval() utils.set_random_seeds(1234) with torch.no_grad(): for testloader, testname in testloaders: stats = get_stats() print("Testing on {}".format(testname)) for batch_idx, inputs in enumerate(testloader): experiment.step = epoch * len(experiment.trainloader) + int( batch_idx / len(testloader) * len(experiment.trainloader)) experiment.iter = batch_idx torch.cuda.empty_cache() if use_cuda: inputs = inputs.cuda() inputs, targets = experiment.data_preprocessing(inputs) # CLAMP values to [0,1] after adding noise inputs = torch.clamp(inputs, min=0, max=1) inputs, targets = Variable( inputs, requires_grad=False), Variable(targets, requires_grad=False) pred = net(inputs) batch_loss = criterion(pred, targets) loss = batch_loss.mean() stats["loss"].update(loss.data) psnr_iter = metrics.psnr(pred, targets, maxval=1).mean().data ssim_iter = metrics.ssim(pred, targets) stats["psnr"].update(psnr_iter, pred.size(0)) stats["ssim"].update(ssim_iter.data, pred.size(0)) progress_bar( batch_idx, len(testloader), 'Loss: %.5f | PSNR: %.2f | SSIM: %.3f' % (stats["loss"].avg, stats["psnr"].avg, stats["ssim"].avg)) del pred, inputs, targets add_summary(experiment, summaries, testname + "/epoch", epoch) for k, stat in stats.items(): add_summary(experiment, summaries, testname + "/" + k, stat.avg)
def val(args, epoch, G, criterion_l1, dataloader, device, writer): with torch.no_grad(): G.eval() l1_loss = [] ssim_loss = [] psnr_loss = [] for i, (vid, cls) in enumerate(dataloader): vid = vid.to(device) img = vid[:, :, 0, :, :] cls = cls.to(device) bs = vid.size(0) z = torch.randn(bs, args.dim_z).to(device) vid_recon = G(img, z, cls) # l1 loss err_l1 = criterion_l1(vid_recon, vid) l1_loss.append(err_l1) vid = vid.transpose(2, 1).contiguous().view(-1, 3, 64, 64) vid_recon = vid_recon.transpose(2, 1).contiguous().view( -1, 3, 64, 64) # ssim vid = (vid + 1) / 2 # [0, 1] vid_recon = (vid_recon + 1) / 2 # [0, 1] err_ssim = ssim(vid, vid_recon, data_range=1, size_average=False) ssim_loss.append(err_ssim.mean().item()) # psnr err_psnr = psnr(vid, vid_recon) psnr_loss.append(err_psnr.mean().item()) l1_avg = sum(l1_loss) / len(l1_loss) ssim_avg = sum(ssim_loss) / len(ssim_loss) psnr_avg = sum(psnr_loss) / len(psnr_loss) writer.add_scalar('val/l1_recon', l1_avg, epoch) writer.add_scalar('val/ssim', ssim_avg, epoch) writer.add_scalar('val/psnr', psnr_avg, epoch) writer.flush() print("[Epoch %d/%d] [l1: %f] [ssim: %f] [psnr: %f]" % (epoch, args.max_epoch, l1_avg, ssim_avg, psnr_avg))
def blend_sweep_blur(): with open('train_valid.json', 'r') as json_file: json_dict = json.load(json_file) test_files = json_dict['test_files'] gt = load_nib(pjoin(DATA_DIRS['datasets'], test_files[0])) gt = reduce_zdim(gt).transpose() reco_coronal = load_nib('testing/inv_sp3_coronal.nii.gz') reco_sagittal = load_nib('testing/inv_sp3_sagittal.nii.gz') # non-negativity constraint reco_coronal[reco_coronal < 0] = 0 reco_sagittal[reco_sagittal < 0] = 0 # [x, y, z] -> [z, y, x] reco_coronal = reco_coronal.transpose() reco_sagittal = reco_sagittal.transpose() all_blurs = np.linspace(1, 90, 10) all_metrics = {'nmse': [], 'psnr': [], 'ssim': []} for blur in all_blurs: wedge = create_wedge((512, 512), blur)[None, ...] # spectral blending reco_coronal_fft = np.fft.fftshift(np.fft.fft2(reco_coronal)) reco_sagittal_fft = np.fft.fftshift(np.fft.fft2(reco_sagittal)) blended_fft = (wedge) * reco_sagittal_fft + (1 - wedge) * reco_coronal_fft blended = np.real(np.fft.ifft2(np.fft.fftshift(blended_fft))) nmse_value = nmse(blended, gt) psnr_value = psnr(blended, gt) ssim_value = ssim(blended, gt) print(f'{blur} {nmse_value} {psnr_value} {ssim_value}') all_metrics['nmse'].append(nmse_value) all_metrics['psnr'].append(psnr_value) all_metrics['ssim'].append(ssim_value) with open('spectral_blur_sweep.csv', 'w', newline='') as csv_file: writer = csv.writer(csv_file) writer.writerow(['blur'] + all_blurs) writer.writerow(['nmse'] + all_metrics['nmse']) writer.writerow(['psnr'] + all_metrics['psnr']) writer.writerow(['ssim'] + all_metrics['ssim'])
def do_epoch(dataloader, fold, epoch, train=False): assert fold in FOLDS if verbose: pb = tqdm(desc=f'{fold} {epoch+1}/{n_epochs}', total=len(dataloader), leave=True, position=0) for i, (X, y) in enumerate(dataloader): X = X.cuda() y = y.cuda() y_hat = G(X) l1_loss = L1_loss(y_hat, y) feature_loss = Perc_loss(y_hat, y) lf_loss = l1_loss + feature_loss if train: optimizer.zero_grad() lf_loss.backward() optimizer.step() # compute metrics y_hat = (y_hat * 255).clamp(0, 255) y = (y * 255).clamp(0, 255) psnr = metrics.psnr(y_hat, y) ssim = metrics.ssim(y_hat, y) ie = metrics.interpolation_error(y_hat, y) results.store( fold, epoch, { 'L1_loss': l1_loss.item(), 'psnr': psnr, 'ssim': ssim, 'ie': ie, 'lf': lf_loss.item() }) if verbose: pb.update() # update tensorboard results.write_tensorboard(fold, epoch) sys.stdout.flush()
def train_epoch(experiment): use_cuda = experiment.use_cuda net = experiment.net optimizer = experiment.optimizer summaries = experiment.summaries criterion = experiment.criterion epoch = experiment.epoch lr = experiment.base_lr * experiment.learning_rate_decay(epoch) for group in experiment.optimizer.param_groups: group['lr'] = lr print('\nEpoch: %d, Learning rate: %f, Expdir %s' % (epoch, lr, experiment.expname)) net.train() stats = get_stats() trainloader = experiment.trainloader for batch_idx, inputs in enumerate(trainloader): experiment.epoch_frac = float(batch_idx) / len(trainloader) experiment.step = epoch * len(trainloader) + batch_idx experiment.iter = batch_idx if use_cuda: inputs = inputs.cuda() optimizer.zero_grad() inputs, targets = experiment.data_preprocessing_Blind_SFM(inputs) inputs, targets = Variable(inputs, requires_grad=False), Variable( targets, requires_grad=False) pred = net(inputs) batch_loss = criterion(pred, targets) loss = batch_loss.mean() psnr_iter = metrics.psnr(pred, targets, maxval=1).mean().data ssim_iter = metrics.ssim(pred, targets) stats["loss"].update(loss.data, pred.size(0)) stats["psnr"].update(psnr_iter, pred.size(0)) stats["ssim"].update(ssim_iter.data, pred.size(0)) loss.backward() del (loss) optimizer.step() if batch_idx % (len(trainloader) // 10) == 0: progress_bar(batch_idx, len(trainloader), "") print("Batch {:05d}, ".format(batch_idx), end='') for k, stat in stats.items(): print("{}: {:.4f}, ".format(stat.name, stat.avg), end='') print("") progress_bar( batch_idx, len(trainloader), 'Loss: %.5f | PSNR: %.2f | SSIM: %.3f' % (stats["loss"].ema, stats["psnr"].ema, stats["ssim"].ema)) stop = (lr == 0) progress_bar( batch_idx, len(trainloader), 'Loss: %.5f | PSNR: %.2f | SSIM: %.3f' % (stats["loss"].avg, stats["psnr"].avg, stats["ssim"].avg)) add_summary(experiment, summaries, "train/epoch", epoch) for k, stat in stats.items(): add_summary(experiment, summaries, "train/" + k, stat.avg) print("") return stop
gen = dataloader.get_datagenerator(dataset, quadratic=is_quadratic) for inputs, ii in tqdm(gen, total=N_TEST): cornercrops1 = cropper.crop(inputs[0].numpy()) cornercrops2 = cropper.crop(inputs[1].numpy()) cropped_results = [] for corner1, corner2 in zip(cornercrops1, cornercrops2): corner1 = torch.Tensor(np.array(corner1)) corner2 = torch.Tensor(np.array(corner2)) result = interpolate([corner1, corner2]) cropped_results.append(result) result = torch.Tensor(cropper.decrop(*cropped_results)).int() result = result.unsqueeze(0) ii = ii.unsqueeze(0) # compute metrics ssim = metrics.ssim(result, ii).item() psnr = metrics.psnr(result, ii).item() ie = metrics.interpolation_error(result, ii).item() results.store(method=method, dataset=dataset, values=[ssim, psnr, ie]) k+= 1 results.save()
for i in tqdm(range(len(dataset))): sample = dataset[i] rays = sample['rays'].cuda() results = batched_inference(models, embeddings, rays, args.N_samples, args.N_importance, args.use_disp, args.chunk, dataset.white_back) img_pred = results['rgb_fine'].view(h, w, 3).cpu().numpy() if args.save_depth: depth_pred = results['depth_fine'].view(h, w).cpu().numpy() depth_pred = np.nan_to_num(depth_pred) save_pfm(os.path.join(dir_name, f'depth_{i:03d}.pfm'), depth_pred) img_pred_ = (img_pred * 255).astype(np.uint8) imgs += [img_pred_] imageio.imwrite(os.path.join(dir_name, f'{i:03d}.png'), img_pred_) if 'rgbs' in sample: rgbs = sample['rgbs'] img_gt = rgbs.view(h, w, 3) psnrs += [metrics.psnr(img_gt, img_pred).item()] imageio.mimsave(os.path.join(dir_name, f'{args.scene_name}.gif'), imgs, fps=30) if psnrs: mean_psnr = np.mean(psnrs) print(f'Mean PSNR : {mean_psnr:.2f}')
def train_model(self, num_epochs=25, resume=False): if resume: train_losses = list( np.load("{}/train_losses.npy".format(self.output_name))) train_psnr = list( np.load("{}/train_psnr.npy".format(self.output_name))) #val_losses = list(np.load("{}/val_losses.npy".format(self.output_name))) else: train_losses = [] train_psnr = [] #val_losses = [] for epoch in range(num_epochs): print('Epoch {}/{}'.format(epoch, num_epochs - 1)) print('-' * 10) for batch_idx, (lr, hr, target) in tqdm(enumerate(self.train_loader)): self.model.train() lr = lr.to(self.device) hr = hr.to(self.device) target = target.to(self.device) self.optimizer.zero_grad() preds = self.model(lr, hr) # NOTE: MAKING THIS 1 - self.criterion FOR SSIM SPECIFICALLY!!!! #loss = 1 - self.criterion(preds, target) loss = self.criterion(preds, target) loss.backward() self.optimizer.step() if (batch_idx + 1) % 10 == 0: model_psnr = psnr(target, preds).item() print( 'Train Epoch: {} [{}/{} ({:.0f}%)]Perceptual Loss: {:.6f}\t PSNR: {:.6f}\n' .format(epoch, batch_idx * len(lr), len(self.train_loader.dataset), 100. * batch_idx / len(self.train_loader), loss.item(), model_psnr)) train_psnr.append(model_psnr) train_losses.append(loss.item()) torch.save(self.model.state_dict(), '{}/model.pth'.format(self.output_name)) torch.save(self.optimizer.state_dict(), '{}/optimizer.pth'.format(self.output_name)) #val_loss = self.validate_model() #val_losses.append(val_loss) np.save('{}/train_losses.npy'.format(self.output_name), train_losses) np.save('{}/train_psnr.npy'.format(self.output_name), train_psnr) plot_epoch(num_epochs, train_losses, xlabel='Epochs', legend=['Training Loss'], save_name="{}/history.png".format(self.output_name)) plot_epoch(num_epochs, train_psnr, xlabel='Epochs', legend=['Train PSNR'], save_name="{}/psnr.png".format(self.output_name))
classic_time = time() - t0 #Fuzzy approach print('> Run fuzzy approach') t0 = time() fuzzy_img, fcms = fuzzy_clusterize(img, n_clusters) fuzzy_time = time() - t0 print('> Evaluating') mse_incremental = mse(img, incremental_img) mse_classic = mse(img, classic_img) mse_fuzzy = mse(img, fuzzy_img) print('> MSE Incremental: %.4f'%(mse_incremental)) print('> MSE Classic: %.4f'%(mse_classic)) print('> MSE Fuzzy: %.4f'%(mse_fuzzy)) psnr_incremental = psnr(img, incremental_img) psnr_classic = psnr(img, classic_img) psnr_fuzzy = psnr(img, fuzzy_img) print('> PSNR Incremental: %.4f'%(psnr_incremental)) print('> PSNR Classic: %.4f'%(psnr_classic)) print('> PSNR Fuzzy: %.4f'%(psnr_fuzzy)) print('> Elapsed time') print('> Incremental: %.4f'%(incremental_time)) print('> Classic: %.4f'%(classic_time)) print('> Fuzzy: %.4f'%(fuzzy_time)) print('> Saving images') cv2.imwrite('%s/incremental_r%dk%d%s'%(dest_folder, n_regions, n_clusters, filename), incremental_img) cv2.imwrite('%s/classick%d%s'%(dest_folder, n_clusters, filename), classic_img)
def train(self): prev_time = time.time() batch_size = self.batch_size # max_iter = self.max_iter val_interval = self.val_interval log_interval = self.log_interval save_model_interval = self.save_model_interval for epoch in range(self.loaded_epoch, self.epochs): self.epoch = epoch for i, batch in enumerate(self.train_loader): image, mask, full_mask, weight_map, segment_mask, quality, heart_state, view = batch mask = mask.to(self.device) image = image.to(self.device) full_mask = full_mask.to(self.device) weight_map = weight_map.to(self.device) segment_mask = segment_mask.to(self.device) # Adversarial ground truths for discriminator losses patch_real = torch.tensor(np.ones((mask.size(0), *self.patch)), dtype=torch.float32, device=self.device) patch_fake = torch.tensor(np.zeros((mask.size(0), *self.patch)), dtype=torch.float32, device=self.device) # Train Discriminator self.generator.eval() self.discriminator.train() self.optimizer_D.zero_grad() fake_echo = self.generator(full_mask) # * segment_mask # mask # Real loss pred_real = self.discriminator(image, mask) loss_real = self.criterion_GAN(pred_real, patch_real) # Fake loss pred_fake = self.discriminator(fake_echo.detach(), mask) loss_fake = self.criterion_GAN(pred_fake, patch_fake) # Total loss loss_D = 0.5 * (loss_real + loss_fake) with amp.scale_loss(loss_D, self.optimizer_D) as scaled_loss: scaled_loss.backward() self.optimizer_D.step() # Train Generator self.generator.train() self.discriminator.eval() self.optimizer_G.zero_grad() # GAN loss fake_echo = self.generator(full_mask) pred_fake = self.discriminator(fake_echo, mask) loss_GAN = self.criterion_GAN(pred_fake, patch_fake) # loss_GAN = loss_fake # Pixel-wise loss loss_pixel = torch.mean(self.criterion_pixelwise(fake_echo, image) * weight_map) # * segment_mask # Total loss loss_G = self.loss_weight_d * loss_GAN + self.loss_weight_g * loss_pixel # 1 100 with amp.scale_loss(loss_G, self.optimizer_G) as scaled_loss: scaled_loss.backward() self.optimizer_G.step() # Log Progress # Determine approximate time left batches_done = self.epoch * len(self.train_loader) + i batches_left = self.epochs * len(self.train_loader) - batches_done time_left = datetime.timedelta(seconds=batches_left * (time.time() - prev_time)) prev_time = time.time() # metrics psnr = metrics.psnr(mask, fake_echo) # * segment_mask ssim = metrics.ssim(mask, fake_echo, window_size=11, size_average=True) # * segment_mask # print log sys.stdout.write( "\r[Epoch %d/%d] [Batch %d/%d] [D loss: %f patch_fake: %f real: %f] [G loss: %f, pixel: %f, adv: %f] PSNR: %f SSIM: %f ETA: %s" % ( self.epoch, self.epochs, i, len(self.train_loader), loss_D.item(), loss_fake.item(), loss_real.item(), loss_G.item(), loss_pixel.item(), loss_GAN.item(), psnr, ssim, time_left, ) ) # save images if batches_done % self.log_interval == 0: self.generator.eval() self.discriminator.eval() self.sample_images(batches_done) self.sample_images2(batches_done) # log wandb self.step += 1 if self.use_wandb: import wandb wandb.log({'loss_D': loss_D, 'loss_real_D': loss_real, 'loss_fake_D': loss_fake, 'loss_G': loss_G, 'loss_pixel': loss_pixel, 'loss_GAN': loss_GAN, 'PSNR': psnr, 'SSIM': ssim}, step=self.step) # save models if (epoch + 1) % save_model_interval == 0: self.save(f'{self.base_dir}/generator_last_checkpoint.bin', model='generator') self.save(f'{self.base_dir}/discriminator_last_checkpoint.bin', model='discriminator')
def eval(args): w, h = args.img_wh kwargs = { 'root_dir': args.root_dir, 'split': args.split, 'img_wh': tuple(args.img_wh) } if args.dataset_name == 'llff': kwargs['spheric_poses'] = args.spheric_poses dataset = dataset_dict[args.dataset_name](**kwargs) embedding_xyz = Embedding(3, 10) embedding_dir = Embedding(3, 4) nerf_coarse = NeRF() nerf_fine = NeRF() load_ckpt(nerf_coarse, args.eval_ckpt_path, model_name='nerf_coarse') load_ckpt(nerf_fine, args.eval_ckpt_path, model_name='nerf_fine') nerf_coarse.cuda().eval() nerf_fine.cuda().eval() models = [nerf_coarse, nerf_fine] embeddings = [embedding_xyz, embedding_dir] imgs_gif = [] imgs = [] depths = [] psnrs = [] dir_name = f'results/{args.dataset_name}/{args.scene_name}' os.makedirs(dir_name, exist_ok=True) print("output:" + dir_name) for i in tqdm(range(len(dataset))): sample = dataset[i] rays = sample['rays'].cuda() results = batched_inference(models, embeddings, rays, args.N_samples, args.N_importance, args.use_disp, args.chunk, dataset.white_back) #* depth # typ = 'fine' if 'rgb_fine' in results else 'coarse' depth = visualize_depth(results['depth_fine'].view(h, w)).view( 3, h, w).cpu().numpy() # (3, H, W) depth = (depth * 255).astype(np.uint8) depths.append(depth) # imageio.imwrite(os.path.join(dir_name, f'{i:03d}_depth.png'), depth) img_pred = results['rgb_fine'].view(h, w, 3).cpu().numpy() if args.save_depth: depth_pred = results['depth_fine'].view(h, w).cpu().numpy() depth_pred = np.nan_to_num(depth_pred) if args.depth_format == 'pfm': save_pfm(os.path.join(dir_name, f'depth_{i:03d}.pfm'), depth_pred) else: with open(f'depth_{i:03d}', 'wb') as f: f.write(depth_pred.tobytes()) img_pred_ = (img_pred * 255).astype(np.uint8) imgs_gif.append(img_pred_) imgs.append(img_pred_.transpose(2, 0, 1)) imageio.imwrite(os.path.join(dir_name, f'{i:03d}.png'), img_pred_) if 'rgbs' in sample: rgbs = sample['rgbs'] img_gt = rgbs.view(h, w, 3) psnrs += [metrics.psnr(img_gt, img_pred).item()] if i == 1: break # imageio.mimsave(os.path.join(dir_name, f'{args.scene_name}_depth.gif'), depths, fps=30) imageio.mimsave(os.path.join(dir_name, f'{args.scene_name}.gif'), imgs_gif, fps=30) if psnrs: mean_psnr = np.mean(psnrs) print(f'Mean PSNR : {mean_psnr:.2f}') return np.array(imgs), np.array(depths)
def main(args, model_params, data_params): procname = os.path.basename(args.checkpoint_dir) setproctitle.setproctitle('hdrnet_{}'.format(procname)) log.info('Preparing summary and checkpoint directory {}'.format( args.checkpoint_dir)) if not os.path.exists(args.checkpoint_dir): os.makedirs(args.checkpoint_dir) tf.set_random_seed(1234) # Make experiments repeatable # Select an architecture mdl = getattr(models, args.model_name) # Add model parameters to the graph (so they are saved to disk at checkpoint) for p in model_params: p_ = tf.convert_to_tensor(model_params[p], name=p) tf.add_to_collection('model_params', p_) # --- Train/Test datasets --------------------------------------------------- data_pipe = getattr(dp, args.data_pipeline) with tf.variable_scope('train_data'): train_data_pipeline = data_pipe( args.data_dir, shuffle=True, batch_size=args.batch_size, nthreads=args.data_threads, fliplr=args.fliplr, flipud=args.flipud, rotate=args.rotate, random_crop=args.random_crop, params=data_params, output_resolution=args.output_resolution) train_samples = train_data_pipeline.samples if args.eval_data_dir is not None: with tf.variable_scope('eval_data'): eval_data_pipeline = data_pipe( args.eval_data_dir, shuffle=False, batch_size=1, nthreads=1, fliplr=False, flipud=False, rotate=False, random_crop=False, params=data_params, output_resolution=args.output_resolution) eval_samples = train_data_pipeline.samples # --------------------------------------------------------------------------- # Training graph with tf.name_scope('train'): with tf.variable_scope('inference'): prediction = mdl.inference(train_samples['lowres_input'], train_samples['image_input'], model_params, is_training=True) loss = metrics.l2_loss(train_samples['image_output'], prediction) psnr = metrics.psnr(train_samples['image_output'], prediction) # Evaluation graph if args.eval_data_dir is not None: with tf.name_scope('eval'): with tf.variable_scope('inference', reuse=True): eval_prediction = mdl.inference(eval_samples['lowres_input'], eval_samples['image_input'], model_params, is_training=False) eval_psnr = metrics.psnr(eval_samples['image_output'], prediction) # Optimizer global_step = tf.contrib.framework.get_or_create_global_step() with tf.name_scope('optimizer'): update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) updates = tf.group(*update_ops, name='update_ops') log.info("Adding {} update ops".format(len(update_ops))) with tf.control_dependencies([updates]): opt = tf.train.AdamOptimizer(args.learning_rate) minimize = opt.minimize(loss, name='optimizer', global_step=global_step) # # Average loss and psnr for display # with tf.name_scope("moving_averages"): # ema = tf.train.ExponentialMovingAverage(decay=0.99) # update_ma = ema.apply([loss, psnr]) # loss = ema.average(loss) # psnr = ema.average(psnr) # Training stepper operation train_op = tf.group(minimize) # , update_ma) # Save a few graphs to tensorboard summaries = [ tf.summary.scalar('loss', loss), tf.summary.scalar('psnr', psnr), tf.summary.scalar('learning_rate', args.learning_rate), tf.summary.scalar('batch_size', args.batch_size), ] log_fetches = {"step": global_step, "loss": loss, "psnr": psnr} # Train config config = tf.ConfigProto() config.gpu_options.allow_growth = True # Do not canibalize the entire GPU sv = tf.train.Supervisor(logdir=args.checkpoint_dir, save_summaries_secs=args.summary_interval, save_model_secs=args.checkpoint_interval) # Train loop with sv.managed_session(config=config) as sess: sv.loop(args.log_interval, log_hook, (sess, log_fetches)) last_eval = time.time() while True: if sv.should_stop(): log.info("stopping supervisor") break try: step, _ = sess.run([global_step, train_op]) since_eval = time.time() - last_eval if args.eval_data_dir is not None and since_eval > args.eval_interval: log.info("Evaluating on {} images at step {}".format( eval_data_pipeline.nsamples, step)) p_ = 0 for it in range(eval_data_pipeline.nsamples): p_ += sess.run(eval_psnr) p_ /= eval_data_pipeline.nsamples sv.summary_writer.add_summary(tf.Summary(value=[ tf.Summary.Value(tag="psnr/eval", simple_value=p_) ]), global_step=step) log.info(" Evaluation PSNR = {:.1f} dB".format(p_)) last_eval = time.time() except tf.errors.AbortedError: log.error("Aborted") break except KeyboardInterrupt: break chkpt_path = os.path.join(args.checkpoint_dir, 'on_stop.ckpt') log.info("Training complete, saving chkpt {}".format(chkpt_path)) sv.saver.save(sess, chkpt_path) sv.request_stop()
def train(self, config): if config.is_train: input_setup(self.sess, config) else: nx, ny, img_name = input_setup(self.sess, config) if config.is_train: data_dir = os.path.join('./{}'.format(config.checkpoint_dir), "train.h5") else: data_dir = os.path.join('./{}'.format(config.checkpoint_dir), "test.h5") train_data, train_label = read_data(data_dir) if self.load(self.checkpoint_dir): print(" [*] Load SUCCESS") else: print(" [!] Load failed...") if config.is_train: print("Training...") # Stochastic gradient descent with the standard backpropagation self.train_op = tf.train.GradientDescentOptimizer( config.learning_rate).minimize(self.loss) tf.initialize_all_variables().run() counter = 0 start_time = time.time() for ep in xrange(config.epoch): # Run by batch images batch_idxs = len(train_data) // config.batch_size for idx in xrange(0, batch_idxs): batch_images = train_data[idx * config.batch_size:(idx + 1) * config.batch_size] batch_labels = train_label[idx * config.batch_size:(idx + 1) * config.batch_size] counter += 1 _, err = self.sess.run([self.train_op, self.loss], feed_dict={ self.images: batch_images, self.labels: batch_labels }) if counter % 10 == 0: print("Epoch: [%2d], step: [%2d], time: [%4.4f], loss: [%.8f]" \ % ((ep+1), counter, time.time()-start_time, err)) if counter % 500 == 0: self.save(config.checkpoint_dir, counter) else: print("Testing...") print("shape:", train_data.shape) result = self.pred.eval({ self.images: train_data, self.labels: train_label }) #print("res shape:", result.shape) result = merge(result, [nx, ny]) result = result.squeeze() result = (result * 65535.).astype( np.uint16 ) #added for writing tiff image and restore back the original color range #print("res is:", result[0:5,0:5]) output_path = os.path.join(os.getcwd(), config.sample_dir) image_path = os.path.join(output_path, "test_srcnn_" + img_name + ".tiff") #changed from png imsave(result, image_path) # this part added for directly comparing the PSNR label_path = os.path.join(output_path, "test_ori_" + img_name + ".tiff") bicubic_path = os.path.join(output_path, "test_bicubic_" + img_name + ".tiff") bicubic_img = imread(bicubic_path, is_grayscale=True) label_img = imread(label_path, is_grayscale=True) output_img = imread(image_path, is_grayscale=True) #compute psnr bicubic_psnr = psnr(label_img, bicubic_img) srcnn_psnr = psnr(label_img, output_img) #bicubic_img = bicubic_img.astype(np.float) #output_img = output_img.astype(np.float) #label_img = label_img.astype(np.float) #compute ssim bicubic_ssim = ssim(label_img, bicubic_img) srcnn_ssim = ssim(label_img, output_img) print("bicubic PSNR for " + img_name + ": [{}]".format(bicubic_psnr)) print("SRCNN PSNR for " + img_name + ": [{}]".format(srcnn_psnr)) print("bicubic SSIM for " + img_name + ": [{}]".format(bicubic_ssim)) print("SRCNN SSIM for" + img_name + ": [{}]".format(srcnn_ssim))
def evaluate(model,data_handler,criterion): # Turn on evaluation mode which disables dropout. model.eval() total_mse = 0. total_mae = 0. total_psnr = 0. l1_loss,l2_loss = criterion output_length = args.seq_length-args.input_length if args.test: i = 0 pred_dir = os.path.join(args.test,'pred_imgs') gt_dir = os.path.join(args.test,'gt_imgs') if not Path(pred_dir).exists(): os.mkdir(pred_dir) if not Path(gt_dir).exists(): os.mkdir(gt_dir) data_handler.begin(do_shuffle = False) with torch.no_grad(): while not data_handler.no_batch_left(): inputs = data_handler.get_batch() data_handler.next() targets = inputs[:,args.input_length:args.seq_length,:,:,:] model.zero_grad() outputs = model(inputs) mae = l1_loss(outputs, targets)/(args.batch_size* output_length) mse = l2_loss(outputs, targets)/(args.batch_size* output_length) psnr_now = psnr(outputs, targets)/(args.batch_size* output_length) total_mae += mae.item() total_mse += mse.item() total_psnr += psnr_now.item() print('outputs max pixel:'+str(torch.max(outputs))) print('targets max pixel:'+str(torch.max(targets))) outputs = outputs /torch.max(outputs) if args.test and i <= 20: targets = targets.cpu() outputs = outputs.cpu() for k in range(args.batch_size): img_pred_dir = os.path.join(pred_dir,'{:5d}'.format(i)) img_gt_dir = os.path.join(gt_dir,'{:5d}'.format(i)) if not Path(img_pred_dir).exists(): os.mkdir(img_pred_dir) if not Path(img_gt_dir).exists(): os.mkdir(img_gt_dir) i += 1 for j in range(output_length): img_pred = outputs[k,j,:,:,:].numpy() img_pred = np.transpose(img_pred,(1,2,0)) img_pred = np.reshape(img_pred,(64,64)) img_pred_name = os.path.join(img_pred_dir,'{:5d}.jpg'.format(j)) imsave(img_pred_name,img_pred) img_gt = targets[k,j,:,:,:].numpy() img_gt = np.transpose(img_gt,(1,2,0)) img_gt = np.reshape(img_gt,(64,64)) img_gt_name = os.path.join(img_gt_dir,'{:5d}.jpg'.format(j+10)) imsave(img_gt_name,img_gt) img_input = inputs[k,j,:,:,:].cpu().numpy() img_input = np.transpose(img_input,(1,2,0)) img_input = np.reshape(img_input,(64,64)) img_input_name = os.path.join(img_gt_dir,'{:5d}.jpg'.format(j)) imsave(img_input_name,img_input) n = data_handler.total_batches() return total_mae/n, total_mse/n, total_psnr/n
def test_model(model, test_loader, args): print("Metric evaluation on {}...".format(args.testset)) savedir = "runs/{}/snapshots/test_images/{}/".format( args.exp_name, args.testset) # storing metrics ssim_sample = [] ssim_0 = [] ssim_05 = [] ssim_07 = [] ssim_08 = [] ssim_1 = [] psnr_sample = [] psnr_0 = [] psnr_05 = [] psnr_07 = [] psnr_08 = [] psnr_1 = [] model.eval() with torch.no_grad(): for idx, item in enumerate(test_loader): y = item[0] x = item[1] orig_shape = item[2] w, h = orig_shape # Push tensors to GPU y = y.to("cuda") x = x.to("cuda") # plt.figure(figsize=(6, 6)) # plt.imshow(x[0, :, :, :].permute(1, 2, 0).contiguous().detach().cpu().numpy()) # plt.margins(5) # plt.gcf() # plt.savefig('x_{}.png'.format(idx)) # plt.close() # plt.figure(figsize=(6, 6)) # plt.imshow(y[0, :, :, :].permute(1, 2, 0).contiguous().detach().cpu().numpy()) # plt.margins(5) # plt.gcf() # plt.savefig('y_{}.png'.format(idx)) # plt.close() if args.modeltype == "flow": mu0 = model._sample(x=x, eps=0) mu05 = model._sample(x=x, eps=0.5) mu07 = model._sample(x=x, eps=0.7) mu08 = model._sample(x=x, eps=0.8) mu1 = model._sample(x=x, eps=1) ssim_0.append(metrics.ssim(y, mu0, orig_shape)) ssim_05.append(metrics.ssim(y, mu05, orig_shape)) ssim_07.append(metrics.ssim(y, mu07, orig_shape)) ssim_08.append(metrics.ssim(y, mu08, orig_shape)) ssim_1.append(metrics.ssim(y, mu1, orig_shape)) psnr_0.append(metrics.psnr(y, mu0, orig_shape)) psnr_05.append(metrics.psnr(y, mu05, orig_shape)) psnr_07.append(metrics.psnr(y, mu07, orig_shape)) psnr_08.append(metrics.psnr(y, mu08, orig_shape)) psnr_1.append(metrics.psnr(y, mu1, orig_shape)) # ---------------------- Visualize Samples--------------------- if args.visual: os.makedirs(savedir, exist_ok=True) w, h = orig_shape torchvision.utils.save_image( x, savedir + "{}_x.png".format(idx), nrow=1, padding=2, normalize=False, ) torchvision.utils.save_image( y[:, :, :h, :w], savedir + "{}_y.png".format(idx), nrow=1, padding=2, normalize=False, ) torchvision.utils.save_image( mu0[:, :, :h, :w], savedir + "{}_eps{}.png".format(idx, 0), nrow=1, padding=2, normalize=False, ) torchvision.utils.save_image( mu05[:, :, :h, :w], savedir + "{}_eps{}.png".format(idx, 0.5), nrow=1, padding=2, normalize=False, ) torchvision.utils.save_image( mu07[:, :, :h, :w], savedir + "{}_eps{}.png".format(idx, 0.7), nrow=1, padding=2, normalize=False, ) torchvision.utils.save_image( mu08[:, :, :h, :w], savedir + "{}_eps{}.png".format(idx, 0.8), nrow=1, padding=2, normalize=False, ) torchvision.utils.save_image( mu1[:, :, :h, :w], savedir + "{}_eps{}.png".format(idx, 1), nrow=1, padding=2, normalize=False, ) elif args.modeltype == "dlogistic": # sample from model sample, means = model._sample(x=x) ssim_0.append(metrics.ssim(y, means, orig_shape)) psnr_0.append(metrics.psnr(y, means, orig_shape)) ssim_sample.append(metrics.ssim(y, means, orig_shape)) psnr_sample.append(metrics.psnr(y, means, orig_shape)) # ---------------------- Visualize Samples------------- if args.visual: # only for testing, delete snippet later torchvision.utils.save_image( x[:, :, :h, :w], "{}_x.png".format(idx), nrow=1, padding=2, normalize=False, ) torchvision.utils.save_image( y[:, :, :h, :w], "{}_y.png".format(idx), nrow=1, padding=2, normalize=False, ) torchvision.utils.save_image( means[:, :, :h, :w], "{}_mu.png".format(idx), nrow=1, padding=2, normalize=False, ) torchvision.utils.save_image( sample[:, :, :h, :w], "{}_sample.png".format(idx), nrow=1, padding=2, normalize=False, ) # store metrics file = open( "runs/{}/metric_eval_{}_{}.txt".format(args.exp_name, args.testset, args.modelname), "w", ) file.write("ssim mu: {} \n".format(np.mean(ssim_0))) # file.write('ssim sample (dlog): {} \n'.format(np.mean(ssim_sample))) file.write("ssim mu+05:{} \n".format(np.mean(ssim_05))) file.write("ssim mu+07:{} \n".format(np.mean(ssim_07))) file.write("ssim mu+08:{} \n".format(np.mean(ssim_08))) file.write("ssim mu+1:{} \n".format(np.mean(ssim_1))) file.write("psnr mu: {} \n".format(np.mean(psnr_0))) # file.write('psnr sample (dlog): {} \n'.format(np.mean(psnr_sample))) file.write("psnr mu+05: {} \n".format(np.mean(psnr_05))) file.write("psnr mu+07:{} \n".format(np.mean(psnr_07))) file.write("psnr mu+08: {} \n".format(np.mean(psnr_08))) file.write("psnr mu+1: {} \n".format(np.mean(psnr_1))) file.close() print("Done testing {} model {} on {} !".format( args.modeltype, args.modelname, args.testset))
def main(args, data_params): procname = os.path.basename(args.checkpoint_dir) #setproctitle.setproctitle('hdrnet_{}'.format(procname)) log.info('Preparing summary and checkpoint directory {}'.format( args.checkpoint_dir)) if not os.path.exists(args.checkpoint_dir): os.makedirs(args.checkpoint_dir) tf.set_random_seed(1234) # Make experiments repeatable # Select an architecture # Add model parameters to the graph (so they are saved to disk at checkpoint) # --- Train/Test datasets --------------------------------------------------- data_pipe = getattr(dp, args.data_pipeline) with tf.variable_scope('train_data'): train_data_pipeline = data_pipe( args.data_dir, shuffle=True, batch_size=args.batch_size, nthreads=args.data_threads, fliplr=args.fliplr, flipud=args.flipud, rotate=args.rotate, random_crop=args.random_crop, params=data_params, output_resolution=args.output_resolution,scale=args.scale) train_samples = train_data_pipeline.samples train_samples['high_input'] = Getfilter(5,train_samples['image_input']) train_samples['lowres_input1'] = blur(5,train_samples['lowres_input']) train_samples['low_input'] = tf.image.resize_images(train_samples['lowres_input1'], [args.output_resolution[0]/args.scale, args.output_resolution[1]/args.scale], method = tf.image.ResizeMethod.BICUBIC) if args.eval_data_dir is not None: with tf.variable_scope('eval_data'): eval_data_pipeline = data_pipe( args.eval_data_dir, shuffle=False, batch_size=1, nthreads=1, fliplr=False, flipud=False, rotate=False, random_crop=False, params=data_params, output_resolution=args.output_resolution,scale=args.scale) eval_samples = train_data_pipeline.samples # --------------------------------------------------------------------------- swaps = np.reshape(np.random.randint(0, 2, args.batch_size), [args.batch_size, 1]) swaps = tf.convert_to_tensor(swaps) swaps = tf.cast(swaps, tf.float32) swaps1 = np.reshape(np.random.randint(0, 2, args.batch_size), [args.batch_size, 1]) swaps1 = tf.convert_to_tensor(swaps1) swaps1 = tf.cast(swaps1, tf.float32) # Training graph with tf.variable_scope('inference'): prediction = models.Resnet(train_samples['low_input'],train_samples['high_input'],train_samples['image_input']) loss,loss_content,loss_color,loss_filter,loss_texture,loss_tv,discim_accuracy,discim_accuracy1 =\ metrics.l2_loss(train_samples['image_output'], prediction, swaps, swaps1, args.batch_size) psnr = metrics.psnr(train_samples['image_output'], prediction) loss_ssim = MultiScaleSSIM(train_samples['image_output'],prediction) # Evaluation graph if args.eval_data_dir is not None: with tf.name_scope('eval'): with tf.variable_scope('inference', reuse=True): eval_prediction = models.Resnet( eval_samples['low_input'],eval_samples['high_input'],eval_samples['image_input']) eval_psnr = metrics.psnr(eval_samples['image_output'], eval_prediction) # Optimizer model_vars = [v for v in tf.global_variables() if not v.name.startswith("inference/l2_loss/discriminator") or v.name.startswith("inference/l2_loss/discriminator1")] discriminator_vars = [v for v in tf.global_variables() if v.name.startswith("inference/l2_loss/discriminator")] discriminator_vars1 = [v for v in tf.global_variables() if v.name.startswith("inference/l2_loss/discriminator1")] global_step = tf.contrib.framework.get_or_create_global_step() with tf.name_scope('optimizer'): update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) updates = tf.group(*update_ops, name='update_ops') log.info("Adding {} update ops".format(len(update_ops))) reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES) if reg_losses and args.weight_decay is not None and args.weight_decay > 0: print("Regularization losses:") for rl in reg_losses: print(" ", rl.name) opt_loss = loss + args.weight_decay*sum(reg_losses) else: print("No regularization.") opt_loss = loss with tf.control_dependencies([updates]): opt = tf.train.AdamOptimizer(args.learning_rate) minimize = opt.minimize(opt_loss, name='optimizer', global_step=global_step,var_list=model_vars) minimize1 = opt.minimize(-loss_filter, name='optimizer1', global_step=global_step,var_list=discriminator_vars) minimize2 = opt.minimize(-loss_texture, name='optimizer2', global_step=global_step, var_list=discriminator_vars1) # Average loss and psnr for display with tf.name_scope("moving_averages"): ema = tf.train.ExponentialMovingAverage(decay=0.99) update_ma = ema.apply([loss,loss_content,loss_color,loss_filter,loss_texture,loss_tv,discim_accuracy,discim_accuracy1,psnr,loss_ssim]) loss = ema.average(loss) loss_content=ema.average(loss_content) loss_color=ema.average(loss_color) loss_filter=ema.average(loss_filter) loss_texture=ema.average(loss_texture) loss_tv=ema.average(loss_tv) discim_accuracy = ema.average(discim_accuracy) discim_accuracy1 = ema.average(discim_accuracy1) psnr = ema.average(psnr) loss_ssim = ema.average(loss_ssim) # Training stepper operation train_op = tf.group(minimize,minimize1,minimize2,update_ma) # Save a few graphs to tensorboard summaries = [ tf.summary.scalar('loss', loss), tf.summary.scalar('loss_content',loss_content), tf.summary.scalar('loss_color',loss_color), tf.summary.scalar('loss_filter', loss_filter), tf.summary.scalar('loss_texture', loss_texture), tf.summary.scalar('loss_tv', loss_tv), tf.summary.scalar('discim_accuracy',discim_accuracy), tf.summary.scalar('discim_accuracy1', discim_accuracy1), tf.summary.scalar('psnr', psnr), tf.summary.scalar('ssim', loss_ssim), tf.summary.scalar('learning_rate', args.learning_rate), tf.summary.scalar('batch_size', args.batch_size), ] log_fetches = { "loss_content":loss_content, "loss_color":loss_color, "loss_filter":loss_filter, "loss_texture": loss_texture, "loss_tv":loss_tv, "discim_accuracy":discim_accuracy, "discim_accuracy1": discim_accuracy1, "step": global_step, "loss": loss, "psnr": psnr, "loss_ssim":loss_ssim} model_vars = [v for v in tf.global_variables() if not v.name.startswith("inference/l2_loss/discriminator" or "inference/l2_loss/discriminator1")] discriminator_vars = [v for v in tf.global_variables() if v.name.startswith("inference/l2_loss/discriminator")] discriminator_vars1 = [v for v in tf.global_variables() if v.name.startswith("inference/l2_loss/discriminator1")] # Train config config = tf.ConfigProto() config.gpu_options.allow_growth = True # Do not canibalize the entire GPU sv = tf.train.Supervisor( local_init_op=tf.initialize_variables(discriminator_vars), saver=tf.train.Saver(var_list=model_vars,max_to_keep=100), logdir=args.checkpoint_dir, save_summaries_secs=args.summary_interval, save_model_secs=args.checkpoint_interval) # Train loop with sv.managed_session(config=config) as sess: sv.loop(args.log_interval, log_hook, (sess,log_fetches)) last_eval = time.time() while True: if sv.should_stop(): log.info("stopping supervisor") break try: step, _ = sess.run([global_step, train_op]) since_eval = time.time()-last_eval if args.eval_data_dir is not None and since_eval > args.eval_interval: log.info("Evaluating on {} images at step {}".format( eval_data_pipeline.nsamples, step)) p_ = 0 eval_data_pipeline.nsamples = 3 for it in range(eval_data_pipeline.nsamples): p_ += sess.run(eval_psnr) p_ /= eval_data_pipeline.nsamples sv.summary_writer.add_summary(tf.Summary(value=[ tf.Summary.Value(tag="psnr/eval", simple_value=p_)]), global_step=step) log.info(" Evaluation PSNR = {:.1f} dB".format(p_)) last_eval = time.time() except tf.errors.AbortedError: log.error("Aborted") break except KeyboardInterrupt: break chkpt_path = os.path.join(args.checkpoint_dir, 'on_stop.ckpt') log.info("Training complete, saving chkpt {}".format(chkpt_path)) sv.saver.save(sess, chkpt_path) sv.request_stop()
def train_epoch(experiment): use_cuda = experiment.use_cuda net = experiment.net optimizer = experiment.optimizer summaries = experiment.summaries criterion = experiment.criterion epoch = experiment.epoch lr = experiment.base_lr * experiment.learning_rate_decay(epoch) for group in experiment.optimizer.param_groups: group['lr'] = lr print('\nEpoch: %d, Learning rate: %f, Expdir %s' % (epoch, lr, experiment.expname)) net.train() stats = get_stats() trainloader = experiment.trainloader for batch_idx, inputs in enumerate(trainloader): experiment.epoch_frac = float(batch_idx) / len(trainloader) experiment.step = epoch * len(trainloader) + batch_idx experiment.iter = batch_idx ''' B = inputs.shape[0] for b in range(B): max_v = torch.max(inputs[b, :, :, :]) inputs[b, :,:,:] *= 1.0/max_v ''' if use_cuda: inputs = inputs.cuda() optimizer.zero_grad() inputs, targets = experiment.data_preprocessing(inputs) #np.save('/mnt/Lab-Kellman/Share/temp/inputs.npy', inputs.cpu().detach().numpy()) #np.save('/mnt/Lab-Kellman/Share/temp/targets.npy', targets.cpu().detach().numpy()) inputs, targets = Variable(inputs, requires_grad=False), Variable( targets, requires_grad=False) pred = net(inputs) batch_loss = criterion(pred, targets) loss = batch_loss.mean() psnr_iter = metrics.psnr(pred, targets, maxval=torch.max(targets)).mean().data ssim_iter = metrics.ssim(pred, targets) loss_v = loss.data stats["loss"].update(loss.data, pred.size(0)) stats["psnr"].update(psnr_iter, pred.size(0)) stats["ssim"].update(ssim_iter.data, pred.size(0)) loss.backward() del (loss) optimizer.step() if batch_idx % 10 == 0: experiment.writer.add_scalars('train/psnr', { 'psnr': stats["psnr"].ema, 'loss': stats["loss"].ema }, epoch * len(trainloader) + batch_idx) progress_bar( batch_idx, len(trainloader), 'Batch: %05d | Loss: %.5f | PSNR: %.2f | SSIM: %.3f' % (batch_idx, stats["loss"].ema, stats["psnr"].ema, stats["ssim"].ema)) if batch_idx % (len(trainloader) // 20) == 0: #progress_bar(batch_idx, len(trainloader),"") #print("Batch {:05d}, ".format(batch_idx), end='') #for k,stat in stats.items(): # print("{}: {:.4f}, ".format(stat.name, stat.avg), end='') #print("") dump_dir = '/mnt/Lab-Kellman/RawData/MachinLearning_Labelled_data/denoising/perf_training_record' fname = 'inputs_epoch_%d__batch_%d.npy' % (epoch, batch_idx) np.save(os.path.join(dump_dir, fname), inputs.detach().cpu().numpy()) fname = 'targets_epoch_%d__batch_%d.npy' % (epoch, batch_idx) np.save(os.path.join(dump_dir, fname), targets.detach().cpu().numpy()) fname = 'pred_epoch_%d__batch_%d.npy' % (epoch, batch_idx) np.save(os.path.join(dump_dir, fname), pred.detach().cpu().numpy()) stop = (lr == 0) progress_bar( batch_idx, len(trainloader), 'Loss: %.5f | PSNR: %.2f | SSIM: %.3f' % (stats["loss"].avg, stats["psnr"].avg, stats["ssim"].avg)) # test the network #add_summary(experiment, summaries, "train/epoch", epoch) #for k,stat in stats.items(): # add_summary(experiment, summaries, "train/" + k, stat.avg) print("") return stop
def main(args): setproctitle.setproctitle('hdrnet_run') inputs = get_input_list(args.input) # -------- Load params ---------------------------------------------------- config = tf.ConfigProto() config.gpu_options.allow_growth = True with tf.Session(config=config) as sess: checkpoint_path = tf.train.latest_checkpoint(args.checkpoint_dir) if checkpoint_path is None: log.error('Could not find a checkpoint in {}'.format(args.checkpoint_dir)) return # -------- Setup graph ---------------------------------------------------- tf.reset_default_graph() t_fullres_input = tf.placeholder(tf.float32, (1, width, heighth, 3)) target = tf.placeholder(tf.float32, (1, width, heighth, 3)) t_lowres_input = utils.blur(5,t_fullres_input) img_low = tf.image.resize_images( t_lowres_input, [width/args.scale, heighth/args.scale], method=tf.image.ResizeMethod.BICUBIC) img_high = utils.Getfilter(5,t_fullres_input) with tf.variable_scope('inference'): prediction = models.Resnet(img_low,img_high,t_fullres_input) ssim = MultiScaleSSIM(target,prediction) psnr = metrics.psnr(target, prediction) saver = tf.train.Saver() start = time.clock() with tf.Session(config=config) as sess: log.info('Restoring weights from {}'.format(checkpoint_path)) saver.restore(sess, checkpoint_path) SSIM = 0 PSNR = 0 for idx, input_path in enumerate(inputs): target_path = args.target + input_path.split('/')[2] log.info("Processing {}".format(input_path,target_path)) im_input = cv2.imread(input_path, -1) # -1 means read as is, no conversions. im_target = cv2.imread(target_path, -1) if im_input.shape[2] == 4: log.info("Input {} has 4 channels, dropping alpha".format(input_path)) im_input = im_input[:, :, :3] im_target = im_target[:, :, :3] im_input = np.flip(im_input, 2) # OpenCV reads BGR, convert back to RGB. im_target = np.flip(im_target, 2) im_input = skimage.img_as_float(im_input) im_target = skimage.img_as_float(im_target) im_input = im_input[np.newaxis, :, :, :] im_target = im_target[np.newaxis, :, :, :] feed_dict = { t_fullres_input: im_input, target:im_target } ssim1,psnr1 = sess.run([ssim,psnr], feed_dict=feed_dict) SSIM = SSIM + ssim1 PSNR = PSNR + psnr1 if idx>=1000: break print("SSIM:%s,PSNR:%s"%(SSIM/1000,PSNR/1000)) end = time.clock() print("耗时%s秒"%str(end-start))