Example #1
0
    def __init__(self,
                 args,
                 from_pretrained="",
                 generator_path=None,
                 discriminator_path=None,
                 generator_optimizer_path=None,
                 discriminator_optimizer_path=None):
        super().__init__()
        self.args = args
        self.relu = nn.ReLU()
        self.l1_loss = nn.L1Loss()
        self.ssim = SSIM()
        self.generator = Generator()
        self.discriminator = Discriminator()

        self.discriminator_optimizer = torch.optim.Adam(
            self.discriminator.parameters(),
            lr=args.lr,
            betas=(args.b1, args.b2))
        self.generator_optimizer = torch.optim.Adam(
            self.generator.parameters(), lr=args.lr, betas=(args.b1, args.b2))
        if from_pretrained:
            if generator_path is None or discriminator_path is None or \
                generator_optimizer_path is None or discriminator_optimizer_path is None:
                raise ValueError("To train from pretrain provide all paths.")
            checkpoint = torch.load(from_pretrained)
            self.generator.load_state_dict(checkpoint['generator'])
            self.discriminator.load_state_dict(checkpoint['discriminator'])
            self.generator_optimizer.load_state_dict(
                checkpoint['generator_optimizer'])
            self.discriminator_optimizer.load_state_dict(
                checkpoint['discriminator_optimizer'])
Example #2
0
                gt_path = input_path.replace("test", "ground_truth").replace(
                    ".png", "_mask.png")
                # input = cv2.imread(input_path)
                gt = cv2.imread(gt_path, cv2.IMREAD_GRAYSCALE)

                # cv2.namedWindow('image')
                # cv2.imshow('image',input)
                filename = os.path.basename(os.path.normpath(input_path))
                filename, ext = filename.split(".")

                input = Image.open(input_path)
                input = torchvision.transforms.ToTensor()(input).unsqueeze(
                    0).to(device)
                # input_torch = torch.from_numpy(input).permute(2,0,1).unsqueeze(0)
                output = model(input)
                diff_avg = 1 - SSIM(input, output)[1]
                diff_avg = torch.mean(diff_avg, dim=1, keepdim=True)
                diff_avg = channelwised_normalize(diff_avg).detach().cpu()
                # enhanced_avg = enhanceMorph(diff_avg.numpy())

                # folder = f"{eval_folder}/{defect}"
                # cv2.imwrite(f"{folder}/{filename}_gt.{ext}", gt)
                # os.makedirs(folder, exist_ok=True)
                # torchvision.utils.save_image(diff_avg, f"{folder}/{filename}.{ext}")
                # torchvision.utils.save_image(enhanced_avg, f"{folder}/{filename}_enh.{ext}")

                gt = (gt / 255.0).reshape(-1)
                diff_avg = diff_avg.numpy().squeeze(0).squeeze(0).reshape(-1)
                auc = roc_auc_score(gt, diff_avg)
                print(f"file {input_path}, {defect}: {auc}")
                log.write(f"file {input_path}, {defect}: {auc}\n")
Example #3
0
def test_on_mixed_samples(model,
                          test_loader,
                          loss_op,
                          writer,
                          results_folder,
                          n_saved_results=5,
                          epoch=0):
    """
        Perform evaluation on the test set
        Returns: average MSE error on the whole test set
    """
    print("Testing on mixed set...")
    model.eval()
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    test_epoch_loss = 0
    test_images = None
    test_images2 = None
    if len(test_loader) > n_saved_results:
        chosen_sample_i = torch.multinomial(torch.Tensor(
            range(len(test_loader))),
                                            num_samples=n_saved_results,
                                            replacement=False)
    else:
        chosen_sample_i = range(len(test_loader))
    n_output_channels = 3
    with torch.no_grad():
        for index, (img, gt) in enumerate(tqdm(test_loader)):
            # img, _ = data
            img = img.to(device)
            n_output_channels = img.shape[1]
            gt = gt.to(device)

            output = model(img)
            # diff = torch.abs(img - output)

            # Grayscaled diff image (average of 3 channels)
            diff_avg = 1 - SSIM(img, output)[1]
            diff_avg = torch.mean(diff_avg, dim=1, keepdim=True)
            diff_avg = channelwised_normalize(diff_avg)
            # diff = channelwised_normalize(diff)
            th_diff, gth_diff, otsu_diff = binarize(diff_avg,
                                                    n_output_channels)

            # Make the grayscale image 3-channeled
            # diff_avg = diff_avg
            loss = 1 - loss_op(diff_avg, gt)
            test_epoch_loss += loss.item()

            # Save the results if requested
            if index in chosen_sample_i:
                io_pair = torch.cat((img, output), dim=3)
                gt_pair = torch.cat((gt, diff_avg), dim=3)
                gt_pair = gt_pair.squeeze(0)
                gt_pair = transforms.ToPILImage()(gt_pair.cpu())
                draw = ImageDraw.Draw(gt_pair)
                font = ImageFont.truetype(font="BebasNeue-Regular.ttf",
                                          size=150)
                # font = ImageFont.truetype("sans-serif.ttf", 16)

                draw.text((0, 0), f"{loss.item():.3f}", (0), font=font)
                draw.text((0, 25), f"{loss.item():.3f}", (255), font=font)
                gt_pair = transforms.ToTensor()(gt_pair).unsqueeze(0).expand(
                    -1, n_output_channels, -1, -1).to(device)
                image = torch.cat((io_pair.to(device), gt_pair.to(device),
                                   th_diff.to(device), gth_diff.to(device),
                                   otsu_diff.to(device)), 0)
                if test_images is None:
                    test_images = image
                else:
                    test_images = torch.cat((test_images, image), dim=0)

                #####DIRTY ADDITION DIFF MAP RESULT#######
                # diff = torch.abs(img - output)
                # # Grayscaled diff image (average of 3 channels)
                # diff_avg = torch.mean(diff, dim=1, keepdim=True)
                # diff_avg = channelwised_normalize(diff_avg)
                # # diff = channelwised_normalize(diff)
                # th_diff, gth_diff, otsu_diff = binarize(diff_avg, n_output_channels)

                # # Make the grayscale image 3-channeled
                # # diff_avg = diff_avg
                # loss = nn.MSELoss()(diff_avg, gt)

                # # Save the results if requested
                # if index in chosen_sample_i:
                #     gt_pair = torch.cat((gt, diff_avg), dim=3)
                #     gt_pair = gt_pair.squeeze(0)
                #     gt_pair = transforms.ToPILImage()(gt_pair.cpu())
                #     draw = ImageDraw.Draw(gt_pair)
                #     font = ImageFont.truetype(font="BebasNeue-Regular.ttf", size=150)
                #     # font = ImageFont.truetype("sans-serif.ttf", 16)

                #     draw.text((0,0),f"{loss.item():.3f}", (0), font=font)
                #     draw.text((0,25),f"{loss.item():.3f}",(255), font=font)
                #     gt_pair = transforms.ToTensor()(gt_pair).unsqueeze(0).expand(-1, n_output_channels, -1, -1).to(device)
                #     image = torch.cat((io_pair.to(device), gt_pair.to(device), th_diff.to(device), gth_diff.to(device), otsu_diff.to(device)), 0)
                #     if test_images2 is None:
                #         test_images2 = image
                #     else:
                #         test_images2 = torch.cat((test_images2, image), dim=0)

        test_epoch_loss = test_epoch_loss / len(test_loader)
        test_images = torchvision.utils.make_grid(test_images, nrow=5)
        test_images = test_images.unsqueeze(0)
        test_images = F.interpolate(test_images, scale_factor=0.1)
        result_image = os.path.join(results_folder, f"val_{epoch}.png")
        torchvision.utils.save_image(test_images, result_image)
        print(f"Test images saved at {results_folder}")

        # test_images2 = torchvision.utils.make_grid(test_images2, nrow=5)
        # test_images2 = test_images2.unsqueeze(0)
        # test_images2 = F.interpolate(test_images2, scale_factor=0.1)
        # result_image = os.path.join(results_folder, f"val_{epoch}_more.png")
        # torchvision.utils.save_image(test_images2, result_image)
        # print(f"Additional diff map images saved at {results_folder}")

        # write to tensorboard
        if writer:
            test_images = test_images.squeeze(0)
            writer.add_image('Test images', test_images, global_step=epoch)
            writer.add_scalar(f"{loss_op.__class__.__name__}/Test",
                              test_epoch_loss,
                              global_step=epoch)

    return test_epoch_loss
def train():
    if not os.path.exists(Config['checkpoint_path']):
        os.makedirs(Config['checkpoint_path'])
        os.makedirs(os.path.join(Config['checkpoint_path'], 'generators'))
        os.makedirs(os.path.join(Config['checkpoint_path'], 'discriminators'))

    device = torch.device('cuda:0' if torch.cuda.is_available()
                          and Config['use_cuda'] else 'cpu')

    # print(Config['img_size']*Config['scale'])
    transform = torchvision.transforms.Compose([
        torchvision.transforms.RandomCrop(
            (Config['img_size'][0] * Config['scale'],
             Config['img_size'][1] * Config['scale'])),
        torchvision.transforms.ToTensor()
    ])

    normalize = torchvision.transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                                 std=[0.229, 0.224, 0.225])
    #print(Config['img_size'])
    scale = torchvision.transforms.Compose([
        torchvision.transforms.ToPILImage(),
        torchvision.transforms.Resize(Config['img_size']),
        torchvision.transforms.ToTensor(),
        torchvision.transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                         std=[0.229, 0.224, 0.225])
    ])
    dataset = torchvision.datasets.ImageFolder(root=Config['train_set_path'],
                                               transform=transform)

    data_loader = torch.utils.data.DataLoader(
        dataset,
        batch_size=Config['batch_size'],
        shuffle=True,
        num_workers=Config['num_workers'])

    generator = Generator(Config)
    if Config['generator_checkpoint']:
        generator.load_state_dict(torch.load(Config['generator_checkpoint']))

    discriminator = Discriminator(Config)
    if Config['discriminator_checkpoint']:
        discriminator.load_state_dict(
            torch.load(Config['discriminator_checkpoint']))

    feat_extractor = get_feat_extractor()

    content_loss = nn.MSELoss()
    adversarial_loss = nn.BCELoss()

    ones_const = torch.autograd.Variable(torch.ones(Config['batch_size'], 1))

    generator.to(device)
    discriminator.to(device)
    feat_extractor.to('cpu')
    ones_const.to(device)

    opt_generator = torch.optim.Adam(generator.parameters(),
                                     lr=Config['generator_lr'])
    opt_discriminator = torch.optim.Adam(discriminator.parameters(),
                                         lr=Config['discriminator_lr'])

    if Config['tensorboard_log']:
        writer_pretrain = SummaryWriter(
            os.path.join(Config['checkpoint_path'], 'pretrain'))
        writer = SummaryWriter(Config['checkpoint_path'])

    low_res = torch.FloatTensor(Config['batch_size'], 3, Config['img_size'][0],
                                Config['img_size'][1])

    for epoch in range(int(Config['batch_size'] * 0.3)):

        mean_generator_content_loss = 0.0

        for i, data in enumerate(data_loader):
            high_res_real, _ = data

            if high_res_real.shape[0] != Config['batch_size']: continue
            for j in range(Config['batch_size']):
                low_res[j] = scale(high_res_real[j])
                high_res_real[j] = normalize(high_res_real[j])

            high_res_real = torch.autograd.Variable(high_res_real.to(device))
            high_res_fake = generator(
                torch.autograd.Variable(low_res).to(device))

            generator.zero_grad()
            #print(high_res_fake.shape, high_res_real.shape)
            generator_content_loss = content_loss(high_res_fake, high_res_real)
            #print(generator_content_loss)
            mean_generator_content_loss += generator_content_loss.data

            generator_content_loss.backward()
            opt_generator.step()
            print(
                f'Epoch {epoch} Iter {i/len(data_loader)}: MSE Loss {generator_content_loss.data}'
            )
            writer.add_figure('Pretrain SR Generator',
                              visualize(low_res,
                                        high_res_real.cpu().data,
                                        high_res_fake.cpu().data),
                              global_step=epoch)
        writer.add_scalar('generator_mse_loss',
                          mean_generator_content_loss / len(data_loader),
                          epoch)
    torch.save(
        generator.state_dict(),
        os.path.join(Config['checkpoint_path'],
                     'generators/generator_pretrain.pth'))

    opt_generator = torch.optim.Adam(generator.parameters(),
                                     lr=Config['generator_lr'] * 0.1)
    opt_discriminator = torch.optim.Adam(discriminator.parameters(),
                                         lr=Config['discriminator_lr'] * 0.1)

    for epoch in range(Config['epochs']):
        mean_generator_content_loss = 0.0
        mean_generator_adversarial_loss = 0.0
        mean_generator_total_loss = 0.0
        mean_discriminator_loss = 0.0

        for i, data in enumerate(data_loader):
            high_res_real, _ = data
            psnrs = []
            ssims = []

            for j in range(Config['batch_size']):
                low_res[j] = scale(high_res_real[j])
                high_res_real[j] = normalize(high_res_real[j])

            low_res.to(device)
            high_res_real = torch.autograd.Variable(high_res_real.to(device))
            target_real = torch.autograd.Variable(
                torch.rand(Config['batch_size'], 1) * 0.5 + 0.7).to(device)
            target_fake = torch.autograd.Variable(
                torch.rand(Config['batch_size'], 1) * 0.3).to(device)
            high_res_fake = generator(
                torch.autograd.Variable(low_res).to(device))

            discriminator.zero_grad()
            discriminator_loss = adversarial_loss(
                discriminator(high_res_real), target_real) + adversarial_loss(
                    discriminator(high_res_fake), target_fake)

            mean_discriminator_loss += discriminator_loss.data

            discriminator_loss.backward(retain_graph=True)
            opt_discriminator.step()

            generator.zero_grad()

            real_features = torch.autograd.Variable(
                feat_extractor(high_res_real.to('cpu')).to(device).data)
            fake_features = feat_extractor(high_res_fake.to('cpu')).to(device)

            generator_content_loss = content_loss(
                high_res_fake, high_res_real) + 0.006 * content_loss(
                    fake_features, real_features)
            mean_generator_content_loss += generator_content_loss.data

            generator_adversarial_loss = adversarial_loss(
                discriminator(high_res_fake), ones_const.to(device))
            mean_generator_adversarial_loss += generator_adversarial_loss.data

            generator_total_loss = generator_content_loss + 1e-3 * generator_adversarial_loss
            mean_generator_total_loss += generator_total_loss.data

            generator_total_loss.backward()
            opt_generator.step()

            for j in range(Config['batch_size']):
                psnrs.append(PSNR()(high_res_real[j], high_res_fake[j]))
                ssims.append(SSIM()(high_res_real[j], high_res_fake[j]))

            print(
                f'Epoch {epoch} Iter {i/len(data_loader)}: Discriminator Loss {discriminator_loss.data}, Generator Loss: {generator_content_loss.data}/{generator_adversarial_loss.data}/{generator_total_loss.data}'
            )
            writer.add_figure('Training Super Resolution',
                              visualize(low_res,
                                        high_res_real.cpu().data,
                                        high_res_fake.cpu().data),
                              global_step=epoch)
        psnr = np.array(psnrs).mean()
        ssim = np.array(ssims).mean()
        writer.add_scalar('discriminator_loss',
                          mean_discriminator_loss / len(data_loader),
                          global_step=epoch)
        writer.add_scalar('generator_content_loss',
                          mean_generator_content_loss / len(data_loader),
                          global_step=epoch)
        writer.add_scalar('generator_adversarial_loss',
                          mean_generator_adversarial_loss / len(data_loader),
                          global_step=epoch)
        writer.add_scalar('generator_total_loss',
                          mean_generator_total_loss / len(data_loader),
                          global_step=epoch)
        writer.add_scalar('PSNR', psnr, global_step=epoch)
        writer.add_scalar('SSIM', ssim, global_step=epoch)
    torch.save(
        generator.state_dict(),
        os.path.join(Config['checkpoint_path'],
                     'generators/generator_final.pth'))
    torch.save(
        discriminator.state_dict(),
        os.path.join(Config['checkpoint_path'],
                     'discriminators/discriminator_final.pth'))
Example #5
0
    # predict by pretrained model
    result = model.predict(input_data, batch_size=1, verbose=1)
    result = np.squeeze(result)
    # inverse Wavelet transform
    rA, rH, rV, rD = result[:, :, 0], result[:, :, 1], result[:, :,
                                                              2], result[:, :,
                                                                         3]
    x_sr = pywt.idwt2((rA, (rH, rV, rD)), 'haar')

    # compute metrics, remove border first
    psnr_val = PSNR(
        x[scale:-scale, scale:-scale],
        x_sr[scale:-scale, scale:-scale] + x_bic[scale:-scale, scale:-scale])
    ssim_val = SSIM(
        x[scale:-scale, scale:-scale],
        x_sr[scale:-scale, scale:-scale] + x_bic[scale:-scale, scale:-scale])
    psnr_list.append(psnr_val)
    ssim_list.append(ssim_val)
    print('%s\tPSNR: %.4f\tSSIM: %.4f' % (image_name, psnr_val, ssim_val))

    # display images
    if option.display:
        plt.figure()
        plt.subplot(221)
        plt.imshow(x_bic)
        plt.title('bicubic'), plt.axis('off')
        plt.subplot(222)
        plt.imshow(x_sr)
        plt.title('SR'), plt.axis('off')
        plt.subplot(223)
Example #6
0
	def __init__(self, CONFIG):

		if CONFIG.mode==1 :
		
			gen_model = get_model('G', CONFIG.gen_model)
			dis_model = get_model('D', CONFIG.dis_model)
			VGG = tl.models.vgg19(pretrained=True, end_with='pool4', mode='static')

			lr_init = 1e-4
			lr_v = tf1.Variable(lr_init)
			beta1 = 0.9
			n_epoch_init = CONFIG.init_epoch # n_epoch_init =20 # 
			n_epoch = CONFIG.total_epoch # n_epoch = 100 # 
			batch_size = CONFIG.batch_size # batch_size = 8 # 
			decay_every = int(n_epoch/ 2)
			lr_decay = 0.1
			resume_epoch = 0
			 
			if CONFIG.load_weights:
			
				resume_epoch = CONFIG.model_epoch
			
				if CONFIG.gan_init:
					gen_model.load_weights('Checkpoints/GAN_INIT_{}_EPID_{}.h5'.format(CONFIG.gen_model, CONFIG.model_epoch))
					resume_epoch = 0
				else:	
					gen_model.load_weights('Checkpoints/GAN_{}_EPID_{}.h5'.format(CONFIG.gen_model, CONFIG.model_epoch))
					dis_model.load_weights('Checkpoints/DIS_{}_GAN_{}_EPID_{}.h5'.format(CONFIG.dis_model, CONFIG.gen_model, CONFIG.model_epoch))
				
		
			g_optimizer_init = tf2.optimizers.Adam(lr_v, beta_1=0.9, beta_2=0.999, epsilon=1e-07)
			g_optimizer = tf2.optimizers.Adam(lr_v, beta_1=0.9, beta_2=0.999, epsilon=1e-07)
			d_optimizer = tf2.optimizers.Adam(lr_v, beta_1=0.9, beta_2=0.999, epsilon=1e-07)

			gen_model.train()
			dis_model.train()
			VGG.train()

			train_ds = get_train_dataset(CONFIG)

			if not CONFIG.load_weights or CONFIG.gan_init:
			
				print('##	initial learning (G)')
				
				for epoch in range(n_epoch_init):
					for step, (lr_patchs, hr_patchs) in enumerate(train_ds):
						if lr_patchs.shape[0] != batch_size:  
							break
						step_time = time.time()
						with tf1.GradientTape() as tape:
							out_bicu = generate_bicubic_samples(lr_patchs.numpy(), CONFIG)
							#print( lr_patchs.shape, out_bicu.shape)
							fake_patchs = gen_model([lr_patchs, out_bicu])
							
							mse_loss = tl.cost.mean_squared_error(fake_patchs, hr_patchs, is_mean=True)
						grad = tape.gradient(mse_loss, gen_model.trainable_weights)
						g_optimizer_init.apply_gradients(zip(grad, gen_model.trainable_weights))
						print("Epoch: [{}/{}] step: [{}/{}] time: {:.3f}s, mse: {:.6f} ".format(
						epoch+1+resume_epoch, resume_epoch+n_epoch_init, step+1, CONFIG.no_of_batches, time.time() - step_time, mse_loss))
					
					path = 'Training_outputs/gan_init_{}_train_{}.png'.format(CONFIG.gen_model, epoch+1+resume_epoch)
					tl.vis.save_images(fake_patchs.numpy(), [2, CONFIG.batch_size//2], path)
					
					if ((epoch+1+resume_epoch) % CONFIG.save_interval) == 0:
						gen_model.save_weights('Checkpoints/GAN_INIT_{}_EPID_{}.h5'.format(CONFIG.gen_model, epoch+1+resume_epoch))
				
				gen_model.save_weights('Checkpoints/GAN_INIT_{}_EPID_{}.h5'.format(CONFIG.gen_model, n_epoch_init + resume_epoch))
			
			
			
			print('##	adversarial learning (G, D)')
				
			for epoch in range(n_epoch):

				for step, (lr_patchs, hr_patchs) in enumerate(train_ds):
					if lr_patchs.shape[0] != batch_size: # if the remaining data in this epoch < batch_size
						break
					step_time = time.time()
					with tf1.GradientTape(persistent=True) as tape:
		      
						#out_bicu = generate_bicubic_samples(np.squeeze(lr_patchs,axis=3), CONFIG.scale)
						out_bicu = generate_bicubic_samples(lr_patchs.numpy(), CONFIG)
						#print( lr_patchs.shape, out_bicu.shape)
						fake_patchs = gen_model([lr_patchs, out_bicu])

						logits_fake = dis_model(fake_patchs)
						logits_real = dis_model(hr_patchs)

						feature_fake = VGG((fake_patchs+1)/2.) # the pre-trained VGG uses the input range of [0, 1]
						feature_real = VGG((hr_patchs+1)/2.)
		      
						d_loss1 = tl.cost.sigmoid_cross_entropy(logits_real, tf1.ones_like(logits_real))
						d_loss2 = tl.cost.sigmoid_cross_entropy(logits_fake, tf1.zeros_like(logits_fake))
						d_loss = d_loss1 + d_loss2
			      
						g_gan_loss = 1e-3 * tl.cost.sigmoid_cross_entropy(logits_fake, tf1.ones_like(logits_fake))
						mse_loss = tl.cost.mean_squared_error(fake_patchs, hr_patchs, is_mean=True)

						vgg_loss = 2e-6 * tl.cost.mean_squared_error(feature_fake, feature_real, is_mean=True)
						g_loss = mse_loss + vgg_loss + g_gan_loss

					grad = tape.gradient(g_loss, gen_model.trainable_weights)
					g_optimizer.apply_gradients(zip(grad, gen_model.trainable_weights))
					grad = tape.gradient(d_loss, dis_model.trainable_weights)
					d_optimizer.apply_gradients(zip(grad, dis_model.trainable_weights))
					print("Epoch: [{}/{}] step: [{}/{}] time: {:.3f}s, g_loss(mse:{:.6f}, vgg:{:.6f}, adv:{:.6f}) d_loss: {:.6f}".format(
					epoch+1+resume_epoch, resume_epoch + n_epoch, step+1, CONFIG.no_of_batches, time.time() - step_time, mse_loss, vgg_loss, g_gan_loss, d_loss))
					
				# update the learning rate
				'''if (epoch+resume_epoch) % decay_every == 0:
					new_lr_decay = lr_decay**((epoch+resume_epoch)// decay_every)
					lr_v.assign(lr_init * new_lr_decay)
					log = " ** new learning rate: %f (for GAN)" % (lr_init * new_lr_decay)
					print(log)
                		'''
				if (epoch+1+resume_epoch)%  CONFIG.save_interval == 0:
					gen_model.save_weights('Checkpoints/GAN_{}_EPID_{}.h5'.format(CONFIG.gen_model, epoch+1+resume_epoch))
					dis_model.save_weights('Checkpoints/DIS_{}_GAN_{}_EPID_{}.h5'.format(CONFIG.dis_model, CONFIG.gen_model, epoch+1+resume_epoch))
					print("Save time: {}content".format(time.asctime( time.localtime(time.time()))))
					for i in range(CONFIG.batch_size):
						if CONFIG.gen_model==1:
							lrimg = np.squeeze(lr_patchs[i], axis =-1)
							lrimg = np.pad(lrimg, ((64, 64), (64, 64)), constant_values=(255.0))
							#opimg = cast_uint8(fake_patchs[i].numpy())
							opimg = fake_patchs[i].numpy()
							combine_imgs = np.concatenate((lrimg[:,:,np.newaxis], out_bicu[i], opimg, hr_patchs[i]), axis = 1)
						else:
							lrimg = np.pad(lr_patchs[i], ((192, 192), (192, 192), (0, 0)), constant_values=(255.0))
							#opimg = cast_uint8(fake_patchs[i].numpy())
							opimg = fake_patchs[i].numpy()
							combine_imgs = np.concatenate((lrimg, out_bicu[i], opimg, hr_patchs[i]), axis = 1)
						path = 'Training_outputs/id_{}_gan_{}_train_{}.png'.format(i+1, CONFIG.gen_model, epoch+1+resume_epoch)
						tl.vis.save_image(combine_imgs,path)

			gen_model.save_weights('Checkpoints/GAN_{}_FINAL.h5'.format(CONFIG.gen_model))
			dis_model.save_weights('Checkpoints/DIS_{}_GAN_{}_FINAL.h5'.format(CONFIG.dis_model, CONFIG.gen_model))
					

				  

		elif CONFIG.mode==2:	## Validation

			model = get_model('G', CONFIG.gen_model)
			model.load_weights('Checkpoints/GAN_{}_EPID_{}.h5'.format(CONFIG.gen_model, CONFIG.model_epoch))
			model.eval()  ## disable dropout, batch norm moving avg ...

			save_time = time.time()
			
			## Reading Validation dataset
			lrimg_file_list = tl.files.load_file_list(path=CONFIG.dir_val_in, regx='.*.png', printable=False)
			hrimg_file_list = tl.files.load_file_list(path=CONFIG.dir_val_target, regx='.*.png', printable=False)
			lrimg_file_list.sort(key=tl.files.natural_keys)
			hrimg_file_list.sort(key=tl.files.natural_keys)
			lrimg_list = np.array(tl.vis.read_images(lrimg_file_list, path=CONFIG.dir_val_in, n_threads=32))
			hrimg_list = np.array(tl.vis.read_images(hrimg_file_list, path=CONFIG.dir_val_target, n_threads=32)) 
			
			if CONFIG.gen_model==1:
				lrimg_list = lrimg_list[:,:,:,np.newaxis] 
				hrimg_list = hrimg_list[:,:,:,np.newaxis]

			bcimg_list = generate_bicubic_samples(lrimg_list,CONFIG)
			opimg_list = model([tf1.cast(lrimg_list,tf1.float32), tf1.cast(bcimg_list,tf1.float32)]) 
			opimg_list = opimg_list.numpy()

			bicubic_psnr, model_psnr = PSNR (hrimg_list , bcimg_list, opimg_list)
			bicubic_ssim, model_ssim = SSIM (hrimg_list , bcimg_list, opimg_list)
			
			for i in range(lrimg_list.shape[0]):
				name= lrimg_file_list[i].split('/')[-1].split('.')[0]
				if CONFIG.gen_model==1:
					lrimg = np.pad(lrimg_list[i], ((64, 64), (64, 64),(0, 0)), constant_values=(255.0))
				else:
					lrimg = np.pad(lrimg_list[i], ((192, 192), (192, 192), (0, 0)), constant_values=(255.0))
				combine_imgs = np.concatenate((lrimg, bcimg_list[i], opimg_list[i], hrimg_list[i]), axis = 1)

				path = 'Validation_outputs/{}_gan_{}_val_{}.png'.format(name, CONFIG.gen_model, CONFIG.model_epoch)
				tl.vis.save_image(combine_imgs, path)
            
			print(np.stack((model_psnr, bicubic_psnr), axis=-1))
			print(np.stack((model_ssim, bicubic_ssim), axis=-1))
			print(np.subtract(model_psnr, bicubic_psnr))
			print('SUM(PSNR DIFF): {}'.format(np.sum(np.subtract(model_psnr, bicubic_psnr))))
			print('AVG MODEL PSNR: {}, AVG BICUBIC PSNR: {}'.format(np.sum(model_psnr)/lrimg_list.shape[0], np.sum(bicubic_psnr)/lrimg_list.shape[0]))
			print('SUM(SSIM DIFF): {}'.format(np.sum(np.subtract(model_ssim, bicubic_ssim))))
			print('AVG MODEL SSIM: {}, AVG BICUBIC SSIM: {}'.format(np.sum(model_ssim)/lrimg_list.shape[0], np.sum(bicubic_ssim)/lrimg_list.shape[0]))
			print((time.time()-save_time)/10)
Example #7
0
def run():
    torch.ops.load_library("./Renderer.dll")

    #########################
    # CONFIGURATION
    #########################

    if 0:
        OUTPUT_FOLDER = "../result-stats/adaptiveIso2/"
        DATASET_PREFIX = "D:/VolumeSuperResolution-InputData/"
        DATASETS = [
            #("Ejecta", "gt-rendering-ejecta-v2-test.hdf5"),
            ("RM", "gt-rendering-rm-v1.hdf5"),
            #("Human", "gt-rendering-human-v1.hdf5"),
            #("Thorax", "gt-rendering-thorax-v1.hdf5"),
        ]

        NETWORK_DIR = "D:/VolumeSuperResolution/adaptive-modeldir/"
        NETWORKS = [  #suffixed with _importance.pt and _recon.pt
            #("adaptive011", "adaptive011_epoch500"), #title, file prefix
            ("adaptive019", "adaptive019_epoch470"),
            ("adaptive023", "adaptive023_epoch300")
        ]

        SAMPLING_FILE = "D:/VolumeSuperResolution-InputData/samplingPattern.hdf5"
        SAMPLING_PATTERNS = ['halton', 'plastic', 'random']

        HEATMAP_MIN = [0.01, 0.05, 0.2]
        HEATMAP_MEAN = [0.05, 0.1, 0.2, 0.5]

        UPSCALING = 8  # = networkUp * postUp

        IMPORTANCE_BORDER = 8
        LOSS_BORDER = 32
        BATCH_SIZE = 8

    elif 0:
        OUTPUT_FOLDER = "../result-stats/adaptiveIsoEnhance6/"
        DATASET_PREFIX = "D:/VolumeSuperResolution-InputData/"
        DATASETS = [
            ("Ejecta", "gt-rendering-ejecta-v2-test.hdf5"),
            #("RM", "gt-rendering-rm-v1.hdf5"),
            #("Human", "gt-rendering-human-v1.hdf5"),
            #("Thorax", "gt-rendering-thorax-v1.hdf5"),
            #("Head", "gt-rendering-head.hdf5"),
        ]

        NETWORK_DIR = "D:/VolumeSuperResolution/adaptive-modeldir/"
        NETWORKS = [  #suffixed with _importance.pt and _recon.pt
            #title, file prefix
            #("U-Net (5-4)", "sizes/size5-4_epoch500"),
            #("Enhance-Net (epoch 50)", "enhance2_imp050_epoch050"),
            #("Enhance-Net (epoch 400)", "enhance2_imp050_epoch400"),
            #("Enhance-Net (Thorax)", "enhance_imp050_Thorax_epoch200"),
            #("Enhance-Net (RM)", "enhance_imp050_RM_epoch200"),
            #("Imp100", "enhance4_imp100_epoch300"),
            #("Imp100res", "enhance4_imp100res_epoch230"),
            ("Imp100res+N", "enhance4_imp100res+N_epoch300"),
            ("Imp100+N", "enhance4_imp100+N_epoch300"),
            #("Imp100+N-res", "enhance4_imp100+N-res_epoch300"),
            #("Imp100+N-resInterp", "enhance4_imp100+N-resInterp_epoch300"),
            #("U-Net (5-4)", "size5-4_epoch500"),
            #("U-Net (5-3)", "size5-3_epoch500"),
            #("U-Net (4-4)", "size4-4_epoch500"),
        ]

        # Test if it is better to post-train with dense networks and PDE inpainting
        POSTTRAIN_NETWORK_DIR = "D:/VolumeSuperResolution/dense-modeldir/"
        POSTTRAIN_NETWORKS = [
            # title, file suffix to POSTTRAIN_NETWORK_DIR, inpainting {'fast', 'pde'}
            #("Enhance PDE (post)", "inpHv2-pde05-epoch200.pt", "pde")
        ]

        SAMPLING_FILE = "D:/VolumeSuperResolution-InputData/samplingPattern.hdf5"
        SAMPLING_PATTERNS = ['plastic']

        HEATMAP_MIN = [0.002]
        HEATMAP_MEAN = [
            0.05
        ]  #[0.01, 0.02, 0.03, 0.04, 0.06, 0.08, 0.1, 0.2, 0.3, 0.5, 0.8, 1.0]
        USE_BINARY_SEARCH_ON_MEAN = True

        UPSCALING = 8  # = networkUp * postUp

        IMPORTANCE_BORDER = 8
        LOSS_BORDER = 32
        BATCH_SIZE = 4

    elif 0:
        OUTPUT_FOLDER = "../result-stats/adaptiveIsoEnhance5Sampling/"
        DATASET_PREFIX = "D:/VolumeSuperResolution-InputData/"
        DATASETS = [
            ("Ejecta", "gt-rendering-ejecta-v2-test.hdf5"),
        ]

        NETWORK_DIR = "D:/VolumeSuperResolution/adaptive-modeldir/"
        NETWORKS = [  #suffixed with _importance.pt and _recon.pt
            #title, file prefix
            ("Enhance-Net (epoch 400)", "enhance2_imp050_epoch400"),
        ]

        # Test if it is better to post-train with dense networks and PDE inpainting
        POSTTRAIN_NETWORK_DIR = "D:/VolumeSuperResolution/dense-modeldir/"
        POSTTRAIN_NETWORKS = [
            # title, file suffix to POSTTRAIN_NETWORK_DIR, inpainting {'fast', 'pde'}
            #("Enhance PDE (post)", "inpHv2-pde05-epoch200.pt", "pde")
        ]

        SAMPLING_FILE = "D:/VolumeSuperResolution-InputData/samplingPattern.hdf5"
        SAMPLING_PATTERNS = ['halton', 'plastic', 'random', 'regular']
        #SAMPLING_PATTERNS = ['regular']

        HEATMAP_MIN = [0.002]
        HEATMAP_MEAN = [0.05]
        USE_BINARY_SEARCH_ON_MEAN = True

        UPSCALING = 8  # = networkUp * postUp

        IMPORTANCE_BORDER = 8
        LOSS_BORDER = 32
        BATCH_SIZE = 4

    elif 1:
        OUTPUT_FOLDER = "../result-stats/adaptiveIsoEnhance8Sampling/"
        DATASET_PREFIX = "D:/VolumeSuperResolution-InputData/"
        DATASETS = [
            ("Ejecta", "gt-rendering-ejecta-v2-test.hdf5"),
            #("RM", "gt-rendering-rm-v1.hdf5"),
            #("Human", "gt-rendering-human-v1.hdf5"),
            #("Thorax", "gt-rendering-thorax-v1.hdf5"),
            #("Head", "gt-rendering-head.hdf5"),
        ]

        NETWORK_DIR = "D:/VolumeSuperResolution/adaptive-modeldir/"
        NETWORKS = [  #suffixed with _importance.pt and _recon.pt
            #title, file prefix
            ("regular", "enhance7_regular_epoch190"),
            ("random", "enhance7_random_epoch190"),
            ("halton", "enhance7_halton_epoch190"),
            ("plastic", "enhance7_plastic_epoch190"),
        ]

        # Test if it is better to post-train with dense networks and PDE inpainting
        POSTTRAIN_NETWORK_DIR = "D:/VolumeSuperResolution/dense-modeldir/"
        POSTTRAIN_NETWORKS = [
            # title, file suffix to POSTTRAIN_NETWORK_DIR, inpainting {'fast', 'pde'}
            #("Enhance PDE (post)", "inpHv2-pde05-epoch200.pt", "pde")
        ]

        SAMPLING_FILE = "D:/VolumeSuperResolution-InputData/samplingPattern.hdf5"
        SAMPLING_PATTERNS = ['regular', 'random', 'halton', 'plastic']

        HEATMAP_MIN = [0.002]
        HEATMAP_MEAN = [
            0.05
        ]  #[0.01, 0.02, 0.03, 0.04, 0.06, 0.08, 0.1, 0.2, 0.3, 0.5, 0.8, 1.0]
        USE_BINARY_SEARCH_ON_MEAN = True

        UPSCALING = 8  # = networkUp * postUp

        IMPORTANCE_BORDER = 8
        LOSS_BORDER = 32
        BATCH_SIZE = 4

    elif 0:
        OUTPUT_FOLDER = "../result-stats/adaptiveIsoImp/"
        DATASET_PREFIX = "D:/VolumeSuperResolution-InputData/"
        DATASETS = [
            ("Ejecta", "gt-rendering-ejecta-v2-test.hdf5"),
            #("RM", "gt-rendering-rm-v1.hdf5"),
            #("Human", "gt-rendering-human-v1.hdf5"),
            #("Thorax", "gt-rendering-thorax-v1.hdf5"),
        ]

        NETWORK_DIR = "D:/VolumeSuperResolution/adaptive-modeldir/imp/"
        NETWORKS = [  #suffixed with _importance.pt and _recon.pt
            #("adaptive011", "adaptive011_epoch500"), #title, file prefix
            ("imp005", "imp005_epoch500"),
            ("imp010", "imp010_epoch500"),
            ("imp020", "imp020_epoch500"),
            ("imp050", "imp050_epoch500"),
        ]

        SAMPLING_FILE = "D:/VolumeSuperResolution-InputData/samplingPattern.hdf5"
        SAMPLING_PATTERNS = ['halton']

        HEATMAP_MIN = [0.002]
        HEATMAP_MEAN = [0.005, 0.01, 0.02, 0.05, 0.1]
        USE_BINARY_SEARCH_ON_MEAN = True

        UPSCALING = 8  # = networkUp * postUp

        IMPORTANCE_BORDER = 8
        LOSS_BORDER = 32
        BATCH_SIZE = 16

    #########################
    # LOADING
    #########################

    device = torch.device("cuda")

    # Load Networks
    IMPORTANCE_BASELINE1 = "ibase1"
    IMPORTANCE_BASELINE2 = "ibase2"
    IMPORTANCE_BASELINE3 = "ibase3"
    RECON_BASELINE = "rbase"

    # load importance model
    print("load importance networks")

    class ImportanceModel:
        def __init__(self, file):
            if file == IMPORTANCE_BASELINE1:
                self._net = importance.UniformImportanceMap(1, 0.5)
                self._upscaling = 1
                self._name = "constant"
                self.disableTemporal = True
                self._requiresPrevious = False
            elif file == IMPORTANCE_BASELINE2:
                self._net = importance.GradientImportanceMap(
                    1, (1, 1), (2, 1), (3, 1))
                self._upscaling = 1
                self._name = "curvature"
                self.disableTemporal = True
                self._requiresPrevious = False
            else:
                self._name = file[0]
                file = os.path.join(NETWORK_DIR, file[1] + "_importance.pt")
                extra_files = torch._C.ExtraFilesMap()
                extra_files['settings.json'] = ""
                self._net = torch.jit.load(file,
                                           map_location=device,
                                           _extra_files=extra_files)
                settings = json.loads(extra_files['settings.json'])
                self._upscaling = settings['networkUpscale']
                self._requiresPrevious = settings.get("requiresPrevious",
                                                      False)
                self.disableTemporal = settings.get("disableTemporal", True)

        def networkUpscaling(self):
            return self._upscaling

        def name(self):
            return self._name

        def __repr__(self):
            return self.name()

        def call(self, input, prev_warped_out):
            if self._requiresPrevious:
                input = torch.cat([
                    input,
                    models.VideoTools.flatten_high(prev_warped_out,
                                                   self._upscaling)
                ],
                                  dim=1)
            input = F.pad(input, [IMPORTANCE_BORDER] * 4, 'constant', 0)
            output = self._net(input)  # the network call
            output = F.pad(output, [-IMPORTANCE_BORDER * self._upscaling] * 4,
                           'constant', 0)
            return output

    class LuminanceImportanceModel:
        def __init__(self):
            self.disableTemporal = True

        def setTestFile(self, filename):
            importance_file = filename[:-5] + "-luminanceImportance.hdf5"
            if os.path.exists(importance_file):
                self._exist = True
                self._file = h5py.File(importance_file, 'r')
                self._dset = self._file['importance']
            else:
                self._exist = False
                self._file = None
                self._dset = None

        def isAvailable(self):
            return self._exist

        def setIndices(self, indices: torch.Tensor):
            assert len(indices.shape) == 1
            self._indices = list(indices.cpu().numpy())

        def setTime(self, time):
            self._time = time

        def networkUpscaling(self):
            return UPSCALING

        def name(self):
            return "luminance-contrast"

        def __repr__(self):
            return self.name()

        def call(self, input, prev_warped_out):
            B, C, H, W = input.shape
            if not self._exist:
                return torch.ones(B,
                                  1,
                                  H,
                                  W,
                                  dtype=input.dtype,
                                  device=input.device)
            outputs = []
            for idx in self._indices:
                outputs.append(
                    torch.from_numpy(self._dset[idx, self._time,
                                                ...]).to(device=input.device))
            return torch.stack(outputs, dim=0)

    importanceBaseline1 = ImportanceModel(IMPORTANCE_BASELINE1)
    importanceBaseline2 = ImportanceModel(IMPORTANCE_BASELINE2)
    importanceBaselineLuminance = LuminanceImportanceModel()
    importanceModels = [ImportanceModel(f) for f in NETWORKS]

    # load reconstruction networks
    print("load reconstruction networks")

    class ReconstructionModel:
        def __init__(self, file):
            if file == RECON_BASELINE:

                class Inpainting(nn.Module):
                    def forward(self, x, mask):
                        input = x[:, 0:6, :, :].contiguous(
                        )  # mask, normal xyz, depth, ao
                        mask = x[:, 6, :, :].contiguous()
                        return torch.ops.renderer.fast_inpaint(mask, input)

                self._net = Inpainting()
                self._upscaling = 1
                self._name = "inpainting"
                self.disableTemporal = True
            else:
                self._name = file[0]
                file = os.path.join(NETWORK_DIR, file[1] + "_recon.pt")
                extra_files = torch._C.ExtraFilesMap()
                extra_files['settings.json'] = ""
                self._net = torch.jit.load(file,
                                           map_location=device,
                                           _extra_files=extra_files)
                self._settings = json.loads(extra_files['settings.json'])
                self.disableTemporal = False
                requiresMask = self._settings.get('expectMask', False)
                if self._settings.get("interpolateInput", False):
                    self._originalNet = self._net

                    class Inpainting2(nn.Module):
                        def __init__(self, orignalNet, requiresMask):
                            super().__init__()
                            self._n = orignalNet
                            self._requiresMask = requiresMask

                        def forward(self, x, mask):
                            input = x[:, 0:6, :, :].contiguous(
                            )  # mask, normal xyz, depth, ao
                            mask = x[:, 6, :, :].contiguous()
                            inpainted = torch.ops.renderer.fast_inpaint(
                                mask, input)
                            x[:, 0:6, :, :] = inpainted
                            if self._requiresMask:
                                return self._n(x, mask)
                            else:
                                return self._n(x)

                    self._net = Inpainting2(self._originalNet, requiresMask)

        def name(self):
            return self._name

        def __repr__(self):
            return self.name()

        def call(self, input, mask, prev_warped_out):
            input = torch.cat([input, prev_warped_out], dim=1)
            output = self._net(input, mask)
            return output

    class ReconstructionModelPostTrain:
        """
        Reconstruction model that are trained as dense reconstruction networks
        after the adaptive training.
        They don't recive the sampling mask as input, but can start with PDE-based inpainting
        """
        def __init__(self, name: str, model_path: str, inpainting: str):
            assert inpainting == 'fast' or inpainting == 'pde', "inpainting must be either 'fast' or 'pde', but got %s" % inpainting
            self._inpainting = inpainting

            self._name = name
            file = os.path.join(POSTTRAIN_NETWORK_DIR, model_path)
            extra_files = torch._C.ExtraFilesMap()
            extra_files['settings.json'] = ""
            self._net = torch.jit.load(file,
                                       map_location=device,
                                       _extra_files=extra_files)
            self._settings = json.loads(extra_files['settings.json'])
            assert self._settings.get(
                'upscale_factor', None) == 1, "selected file is not a 1x SRNet"
            self.disableTemporal = False

        def name(self):
            return self._name

        def __repr__(self):
            return self.name()

        def call(self, input, prev_warped_out):
            # no sampling and no AO
            input_no_sampling = input[:, 0:5, :, :].contiguous(
            )  # mask, normal xyz, depth
            sampling_mask = input[:, 6, :, :].contiguous()
            # perform inpainting
            if self._inpainting == 'pde':
                inpainted = torch.ops.renderer.pde_inpaint(
                    sampling_mask,
                    input_no_sampling,
                    200,
                    1e-4,
                    5,
                    2,  # m0, epsilon, m1, m2
                    0,  # mc -> multigrid recursion count. =0 disables the multigrid hierarchy
                    9,
                    0)  # ms, m3
            else:
                inpainted = torch.ops.renderer.fast_inpaint(
                    sampling_mask, input_no_sampling)
            # run network
            input = torch.cat([inpainted, prev_warped_out], dim=1)
            output = self._net(input)
            if isinstance(output, tuple):
                output = output[0]
            return output

    reconBaseline = ReconstructionModel(RECON_BASELINE)
    reconModels = [ReconstructionModel(f) for f in NETWORKS]
    reconPostModels = [
        ReconstructionModelPostTrain(name, file, inpainting)
        for (name, file, inpainting) in POSTTRAIN_NETWORKS
    ]
    allReconModels = reconModels + reconPostModels

    NETWORK_COMBINATIONS = \
        [(importanceBaseline1, reconBaseline), (importanceBaseline2, reconBaseline)] + \
        [(importanceBaselineLuminance, reconBaseline)] + \
        [(importanceBaseline1, reconNet) for reconNet in allReconModels] + \
        [(importanceBaseline2, reconNet) for reconNet in allReconModels] + \
        [(importanceBaselineLuminance, reconNet) for reconNet in allReconModels] + \
        [(importanceNet, reconBaseline) for importanceNet in importanceModels] + \
        list(zip(importanceModels, reconModels)) + \
        [(importanceNet, reconPostModel) for importanceNet in importanceModels for reconPostModel in reconPostModels]
    #NETWORK_COMBINATIONS = list(zip(importanceModels, reconModels))
    print("Network combinations:")
    for (i, r) in NETWORK_COMBINATIONS:
        print("  %s - %s" % (i.name(), r.name()))

    # load sampling patterns
    print("load sampling patterns")
    with h5py.File(SAMPLING_FILE, 'r') as f:
        sampling_pattern = dict([(name, torch.from_numpy(f[name][...]).to(device)) \
            for name in SAMPLING_PATTERNS])

    # create shading
    shading = ScreenSpaceShading(device)
    shading.fov(30)
    shading.ambient_light_color(np.array([0.1, 0.1, 0.1]))
    shading.diffuse_light_color(np.array([1.0, 1.0, 1.0]))
    shading.specular_light_color(np.array([0.0, 0.0, 0.0]))
    shading.specular_exponent(16)
    shading.light_direction(np.array([0.1, 0.1, 1.0]))
    shading.material_color(np.array([1.0, 0.3, 0.0]))
    AMBIENT_OCCLUSION_STRENGTH = 1.0
    shading.ambient_occlusion(1.0)
    shading.inverse_ao = False

    #heatmap
    HEATMAP_CFG = [(min, mean) for min in HEATMAP_MIN for mean in HEATMAP_MEAN
                   if min < mean]
    print("heatmap configs:", HEATMAP_CFG)

    #########################
    # DEFINE STATISTICS
    #########################
    ssimLoss = SSIM(size_average=False)
    ssimLoss.to(device)
    psnrLoss = PSNR()
    psnrLoss.to(device)
    lpipsColor = lpips.PerceptualLoss(model='net-lin',
                                      net='alex',
                                      use_gpu=True)
    MIN_FILLING = 0.05
    NUM_BINS = 200

    class Statistics:
        def __init__(self):
            self.histogram_color_withAO = np.zeros(NUM_BINS, dtype=np.float64)
            self.histogram_color_noAO = np.zeros(NUM_BINS, dtype=np.float64)
            self.histogram_depth = np.zeros(NUM_BINS, dtype=np.float64)
            self.histogram_normal = np.zeros(NUM_BINS, dtype=np.float64)
            self.histogram_mask = np.zeros(NUM_BINS, dtype=np.float64)
            self.histogram_ao = np.zeros(NUM_BINS, dtype=np.float64)
            self.histogram_counter = 0

        def create_datasets(self, hdf5_file: h5py.File, stats_name: str,
                            histo_name: str, num_samples: int,
                            extra_info: dict):

            self.expected_num_samples = num_samples
            stats_shape = (num_samples, len(list(StatField)))
            self.stats_file = hdf5_file.require_dataset(stats_name,
                                                        stats_shape,
                                                        dtype='f',
                                                        exact=True)
            self.stats_file.attrs['NumFields'] = len(list(StatField))
            for field in list(StatField):
                self.stats_file.attrs['Field%d' % field.value] = field.name
            for key, value in extra_info.items():
                self.stats_file.attrs[key] = value
            self.stats_index = 0

            histo_shape = (NUM_BINS, len(list(HistoField)))
            self.histo_file = hdf5_file.require_dataset(histo_name,
                                                        histo_shape,
                                                        dtype='f',
                                                        exact=True)
            self.histo_file.attrs['NumFields'] = len(list(HistoField))
            for field in list(HistoField):
                self.histo_file.attrs['Field%d' % field.value] = field.name
            for key, value in extra_info.items():
                self.histo_file.attrs[key] = value

        def add_timestep_sample(self, pred_mnda, gt_mnda, sampling_mask):
            """
            adds a timestep sample:
            pred_mnda: prediction: mask, normal, depth, AO
            gt_mnda: ground truth: mask, normal, depth, AO
            """
            B = pred_mnda.shape[0]

            #shading
            shading.ambient_occlusion(AMBIENT_OCCLUSION_STRENGTH)
            pred_color_withAO = shading(pred_mnda)
            gt_color_withAO = shading(gt_mnda)
            shading.ambient_occlusion(0.0)
            pred_color_noAO = shading(pred_mnda)
            gt_color_noAO = shading(gt_mnda)

            #apply border
            pred_mnda = pred_mnda[:, :, LOSS_BORDER:-LOSS_BORDER,
                                  LOSS_BORDER:-LOSS_BORDER]
            pred_color_withAO = pred_color_withAO[:, :,
                                                  LOSS_BORDER:-LOSS_BORDER,
                                                  LOSS_BORDER:-LOSS_BORDER]
            pred_color_noAO = pred_color_noAO[:, :, LOSS_BORDER:-LOSS_BORDER,
                                              LOSS_BORDER:-LOSS_BORDER]
            gt_mnda = gt_mnda[:, :, LOSS_BORDER:-LOSS_BORDER,
                              LOSS_BORDER:-LOSS_BORDER]
            gt_color_withAO = gt_color_withAO[:, :, LOSS_BORDER:-LOSS_BORDER,
                                              LOSS_BORDER:-LOSS_BORDER]
            gt_color_noAO = gt_color_noAO[:, :, LOSS_BORDER:-LOSS_BORDER,
                                          LOSS_BORDER:-LOSS_BORDER]

            mask = gt_mnda[:, 0:1, :, :] * 0.5 + 0.5

            # PSNR
            psnr_mask = psnrLoss(pred_mnda[:, 0:1, :, :],
                                 gt_mnda[:, 0:1, :, :]).cpu().numpy()
            psnr_normal = psnrLoss(pred_mnda[:, 1:4, :, :],
                                   gt_mnda[:, 1:4, :, :],
                                   mask=mask).cpu().numpy()
            psnr_depth = psnrLoss(pred_mnda[:, 4:5, :, :],
                                  gt_mnda[:, 4:5, :, :],
                                  mask=mask).cpu().numpy()
            psnr_ao = psnrLoss(pred_mnda[:, 5:6, :, :],
                               gt_mnda[:, 5:6, :, :],
                               mask=mask).cpu().numpy()
            psnr_color_withAO = psnrLoss(pred_color_withAO,
                                         gt_color_withAO,
                                         mask=mask).cpu().numpy()
            psnr_color_noAO = psnrLoss(pred_color_noAO,
                                       gt_color_noAO,
                                       mask=mask).cpu().numpy()

            # SSIM
            ssim_mask = ssimLoss(pred_mnda[:, 0:1, :, :],
                                 gt_mnda[:, 0:1, :, :]).cpu().numpy()
            pred_mnda = gt_mnda + mask * (pred_mnda - gt_mnda)
            ssim_normal = ssimLoss(pred_mnda[:, 1:4, :, :],
                                   gt_mnda[:, 1:4, :, :]).cpu().numpy()
            ssim_depth = ssimLoss(pred_mnda[:, 4:5, :, :],
                                  gt_mnda[:, 4:5, :, :]).cpu().numpy()
            ssim_ao = ssimLoss(pred_mnda[:, 5:6, :, :],
                               gt_mnda[:, 5:6, :, :]).cpu().numpy()
            ssim_color_withAO = ssimLoss(pred_color_withAO,
                                         gt_color_withAO).cpu().numpy()
            ssim_color_noAO = ssimLoss(pred_color_noAO,
                                       gt_color_noAO).cpu().numpy()

            # Perceptual
            lpips_color_withAO = torch.cat([
                lpipsColor(
                    pred_color_withAO[b], gt_color_withAO[b], normalize=True)
                for b in range(B)
            ],
                                           dim=0).cpu().numpy()
            lpips_color_noAO = torch.cat([
                lpipsColor(
                    pred_color_noAO[b], gt_color_noAO[b], normalize=True)
                for b in range(B)
            ],
                                         dim=0).cpu().numpy()

            # Samples
            samples = torch.mean(sampling_mask, dim=(1, 2, 3)).cpu().numpy()

            # Write samples to file
            for b in range(B):
                assert self.stats_index < self.expected_num_samples, "Adding more samples than specified"
                self.stats_file[self.stats_index, :] = np.array([
                    psnr_mask[b], psnr_normal[b], psnr_depth[b], psnr_ao[b],
                    psnr_color_noAO[b], psnr_color_withAO[b], ssim_mask[b],
                    ssim_normal[b], ssim_depth[b], ssim_ao[b],
                    ssim_color_noAO[b], ssim_color_withAO[b],
                    lpips_color_noAO[b], lpips_color_withAO[b], samples[b]
                ],
                                                                dtype='f')
                self.stats_index += 1

            # Histogram
            self.histogram_counter += 1

            mask_diff = F.l1_loss(gt_mnda[:, 0, :, :],
                                  pred_mnda[:, 0, :, :],
                                  reduction='none')
            histogram, _ = np.histogram(mask_diff.cpu().numpy(),
                                        bins=NUM_BINS,
                                        range=(0, 1),
                                        density=True)
            self.histogram_mask += (
                histogram /
                (NUM_BINS * B) - self.histogram_mask) / self.histogram_counter

            #normal_diff = (-F.cosine_similarity(gt_mnda[0,1:4,:,:], pred_mnda[0,1:4,:,:], dim=0)+1)/2
            normal_diff = F.l1_loss(gt_mnda[:, 1:4, :, :],
                                    pred_mnda[:, 1:4, :, :],
                                    reduction='none').sum(dim=0) / 6
            histogram, _ = np.histogram(normal_diff.cpu().numpy(),
                                        bins=NUM_BINS,
                                        range=(0, 1),
                                        density=True)
            self.histogram_normal += (histogram /
                                      (NUM_BINS * B) - self.histogram_normal
                                      ) / self.histogram_counter

            depth_diff = F.l1_loss(gt_mnda[:, 4, :, :],
                                   pred_mnda[:, 4, :, :],
                                   reduction='none')
            histogram, _ = np.histogram(depth_diff.cpu().numpy(),
                                        bins=NUM_BINS,
                                        range=(0, 1),
                                        density=True)
            self.histogram_depth += (
                histogram /
                (NUM_BINS * B) - self.histogram_depth) / self.histogram_counter

            ao_diff = F.l1_loss(gt_mnda[:, 5, :, :],
                                pred_mnda[:, 5, :, :],
                                reduction='none')
            histogram, _ = np.histogram(ao_diff.cpu().numpy(),
                                        bins=NUM_BINS,
                                        range=(0, 1),
                                        density=True)
            self.histogram_ao += (histogram / (NUM_BINS * B) -
                                  self.histogram_ao) / self.histogram_counter

            color_diff = F.l1_loss(gt_color_withAO[:, 0, :, :],
                                   pred_color_withAO[:, 0, :, :],
                                   reduction='none')
            histogram, _ = np.histogram(color_diff.cpu().numpy(),
                                        bins=NUM_BINS,
                                        range=(0, 1),
                                        density=True)
            self.histogram_color_withAO += (
                histogram / (NUM_BINS * B) -
                self.histogram_color_withAO) / self.histogram_counter

            color_diff = F.l1_loss(gt_color_noAO[:, 0, :, :],
                                   pred_color_noAO[:, 0, :, :],
                                   reduction='none')
            histogram, _ = np.histogram(color_diff.cpu().numpy(),
                                        bins=NUM_BINS,
                                        range=(0, 1),
                                        density=True)
            self.histogram_color_noAO += (
                histogram / (NUM_BINS * B) -
                self.histogram_color_noAO) / self.histogram_counter

        def close_stats_file(self):
            self.stats_file.attrs['NumEntries'] = self.stats_index

        def write_histogram(self):
            """
            After every sample for the current dataset was processed, write
            a histogram of the errors in a new file
            """
            for i in range(NUM_BINS):
                self.histo_file[i, :] = np.array([
                    i / NUM_BINS, (i + 1) / NUM_BINS, self.histogram_mask[i],
                    self.histogram_normal[i], self.histogram_depth[i],
                    self.histogram_ao[i], self.histogram_color_withAO[i],
                    self.histogram_color_noAO[i]
                ])

    #########################
    # DATASET
    #########################
    class FullResDataset(torch.utils.data.Dataset):
        def __init__(self, file):
            self.hdf5_file = h5py.File(file, 'r')
            self.dset = self.hdf5_file['gt']
            print("Dataset shape:", self.dset.shape)

        def __len__(self):
            return self.dset.shape[0]

        def num_timesteps(self):
            return self.dset.shape[1]

        def __getitem__(self, idx):
            return (self.dset[idx, ...], np.array(idx))

    #########################
    # COMPUTE STATS for each dataset
    #########################
    for dataset_name, dataset_file in DATASETS:
        dataset_file = os.path.join(DATASET_PREFIX, dataset_file)
        print("Compute statistics for", dataset_name)

        # init luminance importance map
        importanceBaselineLuminance.setTestFile(dataset_file)
        if importanceBaselineLuminance.isAvailable():
            print("Luminance-contrast importance map is available")

        # create output file
        os.makedirs(OUTPUT_FOLDER, exist_ok=True)
        output_file = os.path.join(OUTPUT_FOLDER, dataset_name + '.hdf5')
        print("Save to", output_file)
        with h5py.File(output_file, 'a') as output_hdf5_file:

            # load dataset
            set = FullResDataset(dataset_file)
            data_loader = torch.utils.data.DataLoader(set,
                                                      batch_size=BATCH_SIZE,
                                                      shuffle=False)

            # define statistics
            StatsCfg = collections.namedtuple(
                "StatsCfg", "stats importance recon heatmin heatmean pattern")
            statistics = []
            for (inet, rnet) in NETWORK_COMBINATIONS:
                for (heatmin, heatmean) in HEATMAP_CFG:
                    for pattern in SAMPLING_PATTERNS:
                        stats_info = {
                            'importance': inet.name(),
                            'reconstruction': rnet.name(),
                            'heatmin': heatmin,
                            'heatmean': heatmean,
                            'pattern': pattern
                        }
                        stats_filename = "Stats_%s_%s_%03d_%03d_%s" % (
                            inet.name(), rnet.name(), heatmin * 100,
                            heatmean * 100, pattern)
                        histo_filename = "Histogram_%s_%s_%03d_%03d_%s" % (
                            inet.name(), rnet.name(), heatmin * 100,
                            heatmean * 100, pattern)
                        s = Statistics()
                        s.create_datasets(output_hdf5_file, stats_filename,
                                          histo_filename,
                                          len(set) * set.num_timesteps(),
                                          stats_info)
                        statistics.append(
                            StatsCfg(stats=s,
                                     importance=inet,
                                     recon=rnet,
                                     heatmin=heatmin,
                                     heatmean=heatmean,
                                     pattern=pattern))
            print(len(statistics),
                  " different combinations are performed per sample")

            # compute statistics
            try:
                with torch.no_grad():
                    num_minibatch = len(data_loader)
                    pg = ProgressBar(num_minibatch, 'Evaluation', length=50)
                    for iteration, (batch, batch_indices) in enumerate(
                            data_loader, 0):
                        pg.print_progress_bar(iteration)
                        batch = batch.to(device)
                        importanceBaselineLuminance.setIndices(batch_indices)
                        B, T, C, H, W = batch.shape

                        # try out each combination
                        for s in statistics:
                            #print(s)
                            # get input to evaluation
                            importanceNetUpscale = s.importance.networkUpscaling(
                            )
                            importancePostUpscale = UPSCALING // importanceNetUpscale
                            crop_low = torch.nn.functional.interpolate(
                                batch.reshape(B * T, C, H, W),
                                scale_factor=1 / UPSCALING,
                                mode='area').reshape(B, T, C, H // UPSCALING,
                                                     W // UPSCALING)
                            pattern = sampling_pattern[s.pattern][:H, :W]
                            crop_high = batch

                            # loop over timesteps
                            pattern = pattern.unsqueeze(0).unsqueeze(0)
                            previous_importance = None
                            previous_output = None
                            reconstructions = []
                            for j in range(T):
                                importanceBaselineLuminance.setTime(j)
                                # extract flow (always the last two channels of crop_high)
                                flow = crop_high[:, j, C - 2:, :, :]

                                # compute importance map
                                importance_input = crop_low[:, j, :5, :, :]
                                if j == 0 or s.importance.disableTemporal:
                                    previous_input = torch.zeros(
                                        B,
                                        1,
                                        importance_input.shape[2] *
                                        importanceNetUpscale,
                                        importance_input.shape[3] *
                                        importanceNetUpscale,
                                        dtype=crop_high.dtype,
                                        device=crop_high.device)
                                else:
                                    flow_low = F.interpolate(
                                        flow,
                                        scale_factor=1 / importancePostUpscale)
                                    previous_input = models.VideoTools.warp_upscale(
                                        previous_importance, flow_low, 1,
                                        False)
                                importance_map = s.importance.call(
                                    importance_input, previous_input)
                                if len(importance_map.shape) == 3:
                                    importance_map = importance_map.unsqueeze(
                                        1)
                                previous_importance = importance_map

                                target_mean = s.heatmean
                                if USE_BINARY_SEARCH_ON_MEAN:
                                    # For regular sampling, the normalization does not work properly,
                                    # use binary search on the heatmean instead
                                    def f(x):
                                        postprocess = importance.PostProcess(
                                            s.heatmin, x,
                                            importancePostUpscale,
                                            LOSS_BORDER //
                                            importancePostUpscale, 'basic')
                                        importance_map2 = postprocess(
                                            importance_map)[0].unsqueeze(1)
                                        sampling_mask = (
                                            importance_map2 >= pattern).to(
                                                dtype=importance_map.dtype)
                                        samples = torch.mean(
                                            sampling_mask).item()
                                        return samples

                                    target_mean = binarySearch(
                                        f, s.heatmean, s.heatmean, 10, 0, 1)
                                    #print("Binary search for #samples, mean start={}, result={} with samples={}, original={}".
                                    #      format(s.heatmean, s.heatmean, f(target_mean), f(s.heatmean)))

                                # normalize and upscale importance map
                                postprocess = importance.PostProcess(
                                    s.heatmin, target_mean,
                                    importancePostUpscale,
                                    LOSS_BORDER // importancePostUpscale,
                                    'basic')
                                importance_map = postprocess(
                                    importance_map)[0].unsqueeze(1)
                                #print("mean:", torch.mean(importance_map).item())

                                # create samples
                                sample_mask = (importance_map >= pattern).to(
                                    dtype=importance_map.dtype)

                                reconstruction_input = torch.cat(
                                    (
                                        sample_mask *
                                        crop_high[:, j, 0:
                                                  5, :, :],  # mask, normal x, normal y, normal z, depth
                                        sample_mask * torch.ones(
                                            B,
                                            1,
                                            H,
                                            W,
                                            dtype=crop_high.dtype,
                                            device=crop_high.device),  # ao
                                        sample_mask),  # sample mask
                                    dim=1)

                                # warp previous output
                                if j == 0 or s.recon.disableTemporal:
                                    previous_input = torch.zeros(
                                        B,
                                        6,
                                        H,
                                        W,
                                        dtype=crop_high.dtype,
                                        device=crop_high.device)
                                else:
                                    previous_input = models.VideoTools.warp_upscale(
                                        previous_output, flow, 1, False)

                                # run reconstruction network
                                reconstruction = s.recon.call(
                                    reconstruction_input, sample_mask,
                                    previous_input)

                                # clamp
                                reconstruction_clamped = torch.cat(
                                    [
                                        torch.clamp(
                                            reconstruction[:, 0:1, :, :], -1,
                                            +1),  # mask
                                        ScreenSpaceShading.normalize(
                                            reconstruction[:, 1:4, :, :],
                                            dim=1),
                                        torch.clamp(
                                            reconstruction[:, 4:5, :, :], 0,
                                            +1),  # depth
                                        torch.clamp(reconstruction[:,
                                                                   5:6, :, :],
                                                    0, +1)  # ao
                                    ],
                                    dim=1)
                                reconstructions.append(reconstruction_clamped)

                                # save for next frame
                                previous_output = reconstruction_clamped

                            #endfor: timesteps

                            # compute statistics
                            reconstructions = torch.cat(reconstructions, dim=0)
                            crops_high = torch.cat(
                                [crop_high[:, j, :6, :, :] for j in range(T)],
                                dim=0)
                            sample_masks = torch.cat([sample_mask] * T, dim=0)
                            s.stats.add_timestep_sample(
                                reconstructions, crops_high, sample_masks)

                        # endfor: statistic
                    # endfor: batch

                    pg.print_progress_bar(num_minibatch)
                # end no_grad()
            finally:
                # close files
                for s in statistics:
                    s.stats.write_histogram()
                    s.stats.close_stats_file()
def run():
    torch.ops.load_library("./Renderer.dll")

    #########################
    # CONFIGURATION
    #########################

    if 1:
        OUTPUT_FOLDER = "../result-stats/adaptiveDvr3/"
        DATASET_PREFIX = "D:/VolumeSuperResolution-InputData/"
        DATASETS = [
            ("Ejecta", "gt-dvr-ejecta6-test.hdf5",
             "gt-dvr-ejecta6-test-screen8x.hdf5"),
            ("RM", "gt-dvr-rm1-test.hdf5", "gt-dvr-rm1-test-screen8x.hdf5"),
            ("Thorax", "gt-dvr-thorax2-test.hdf5",
             "gt-dvr-thorax2-test-screen8x.hdf5"),
        ]

        NETWORK_DIR = "D:/VolumeSuperResolution/adaptive-dvr-modeldir/"
        NETWORKS = [  #suffixed with _importance.pt and _recon.pt
            #title, file prefix
            #("v5-temp001", "adapDvr5-rgb-temp001-epoch300"),
            #("v5-temp010", "adapDvr5-rgb-temp010-epoch300"),
            #("v5-temp100", "adapDvr5-rgb-temp100-epoch300"),
            #("v5-temp001-perc", "adapDvr5-rgb-temp001-perc01-epoch300"),
            ("v5-perc01+bn", "adapDvr5-rgb-perc01-bn-epoch500"),
            ("v5-perc01-bn", "adapDvr5-rgb-temp001-perc01-epoch500")
        ]

        # Test if it is better to post-train with dense networks and PDE inpainting
        POSTTRAIN_NETWORK_DIR = "D:/VolumeSuperResolution/adaptive-dvr-modeldir/"
        POSTTRAIN_NETWORKS = [
            # title, file suffix to POSTTRAIN_NETWORK_DIR, inpainting {'fast', 'pde'}
            #("Enhance PDE (post)", "inpHv2-pde05-epoch200.pt", "pde")
            ("v6pr2-noTemp",
             "ejecta6pr2-plastic05-lpips-noTempCon-epoch500_recon.pt", "fast",
             False),
            ("v6pr2-tl2-100",
             "ejecta6pr2-plastic05-lpips-tl2-100-epoch500_recon.pt", "fast",
             True)
        ]

        SAMPLING_FILE = "D:/VolumeSuperResolution-InputData/samplingPattern.hdf5"
        SAMPLING_PATTERNS = ['plastic']

        HEATMAP_MIN = [0.002]
        HEATMAP_MEAN = [
            0.02, 0.05, 0.1, 0.2
        ]  #[0.01, 0.02, 0.03, 0.04, 0.06, 0.08, 0.1, 0.2, 0.3, 0.5, 0.8, 1.0]
        USE_BINARY_SEARCH_ON_MEAN = True

        UPSCALING = 8  # = networkUp * postUp

        IMPORTANCE_BORDER = 8
        LOSS_BORDER = 32
        BATCH_SIZE = 1  #2

    #########################
    # LOADING
    #########################

    device = torch.device("cuda")

    # Load Networks
    IMPORTANCE_BASELINE1 = "ibase1"
    IMPORTANCE_BASELINE2 = "ibase2"
    RECON_BASELINE = "rbase"

    # load importance model
    print("load importance networks")

    class ImportanceModel:
        def __init__(self, file):
            if file == IMPORTANCE_BASELINE1:
                self._net = importance.UniformImportanceMap(1, 0.5)
                self._upscaling = 1
                self._name = "constant"
                self.disableTemporal = True
                self._requiresPrevious = False
            elif file == IMPORTANCE_BASELINE2:
                self._net = importance.GradientImportanceMap(
                    1, (0, 1), (1, 1), (2, 1))
                self._upscaling = 1
                self._name = "curvature"
                self.disableTemporal = True
                self._requiresPrevious = False
            else:
                self._name = file[0]
                file = os.path.join(NETWORK_DIR, file[1] + "_importance.pt")
                extra_files = torch._C.ExtraFilesMap()
                extra_files['settings.json'] = ""
                self._net = torch.jit.load(file,
                                           map_location=device,
                                           _extra_files=extra_files)
                settings = json.loads(extra_files['settings.json'])
                self._upscaling = settings['networkUpscale']
                self._requiresPrevious = settings.get("requiresPrevious",
                                                      False)
                self.disableTemporal = settings.get("disableTemporal", True)

        def networkUpscaling(self):
            return self._upscaling

        def name(self):
            return self._name

        def __repr__(self):
            return self.name()

        def call(self, input, prev_warped_out):
            if self._requiresPrevious:
                input = torch.cat([
                    input,
                    models.VideoTools.flatten_high(prev_warped_out,
                                                   self._upscaling)
                ],
                                  dim=1)
            input = F.pad(input, [IMPORTANCE_BORDER] * 4, 'constant', 0)
            output = self._net(input)  # the network call
            output = F.pad(output, [-IMPORTANCE_BORDER * self._upscaling] * 4,
                           'constant', 0)
            return output

    importanceBaseline1 = ImportanceModel(IMPORTANCE_BASELINE1)
    importanceBaseline2 = ImportanceModel(IMPORTANCE_BASELINE2)
    importanceModels = [ImportanceModel(f) for f in NETWORKS]

    # load reconstruction networks
    print("load reconstruction networks")

    class ReconstructionModel:
        def __init__(self, file):
            if file == RECON_BASELINE:

                class Inpainting(nn.Module):
                    def forward(self, x, mask):
                        input = x[:, 0:4, :, :].contiguous(
                        )  # rgba, don't use normal xyz, depth
                        mask = x[:, 8, :, :].contiguous()
                        return torch.ops.renderer.fast_inpaint(mask, input)

                self._net = Inpainting()
                self._upscaling = 1
                self._name = "inpainting"
                self.disableTemporal = True
            else:
                self._name = file[0]
                file = os.path.join(NETWORK_DIR, file[1] + "_recon.pt")
                extra_files = torch._C.ExtraFilesMap()
                extra_files['settings.json'] = ""
                self._net = torch.jit.load(file,
                                           map_location=device,
                                           _extra_files=extra_files)
                self._settings = json.loads(extra_files['settings.json'])
                self.disableTemporal = False
                requiresMask = self._settings.get('expectMask', False)
                if self._settings.get("interpolateInput", False):
                    self._originalNet = self._net

                    class Inpainting2(nn.Module):
                        def __init__(self, orignalNet, requiresMask):
                            super().__init__()
                            self._n = orignalNet
                            self._requiresMask = requiresMask

                        def forward(self, x, mask):
                            input = x[:, 0:8, :, :].contiguous(
                            )  # rgba, normal xyz, depth
                            mask = x[:, 8, :, :].contiguous()
                            inpainted = torch.ops.renderer.fast_inpaint(
                                mask, input)
                            x[:, 0:8, :, :] = inpainted
                            if self._requiresMask:
                                return self._n(x, mask)
                            else:
                                return self._n(x)

                    self._net = Inpainting2(self._originalNet, requiresMask)

        def name(self):
            return self._name

        def __repr__(self):
            return self.name()

        def call(self, input, mask, prev_warped_out):
            input = torch.cat([input, prev_warped_out], dim=1)
            output = self._net(input, mask)
            return output

    class ReconstructionModelPostTrain:
        """
        Reconstruction model that are trained as dense reconstruction networks
        after the adaptive training.
        They don't recive the sampling mask as input, but can start with PDE-based inpainting
        """
        def __init__(self, name: str, model_path: str, inpainting: str,
                     has_temporal: bool):
            assert inpainting == 'fast' or inpainting == 'pde', "inpainting must be either 'fast' or 'pde', but got %s" % inpainting
            self._inpainting = inpainting

            self._name = name
            file = os.path.join(POSTTRAIN_NETWORK_DIR, model_path)
            extra_files = torch._C.ExtraFilesMap()
            extra_files['settings.json'] = ""
            self._net = torch.jit.load(file,
                                       map_location=device,
                                       _extra_files=extra_files)
            self._settings = json.loads(extra_files['settings.json'])
            assert self._settings.get(
                'upscale_factor', None) == 1, "selected file is not a 1x SRNet"
            self.disableTemporal = not has_temporal

        def name(self):
            return self._name

        def __repr__(self):
            return self.name()

        def call(self, input, mask, prev_warped_out):
            # no sampling and no AO
            input_no_sampling = input[:, 0:8, :, :].contiguous(
            )  # rgba, normal xyz, depth
            sampling_mask = mask[:, 0, :, :].contiguous()
            # perform inpainting
            if self._inpainting == 'pde':
                inpainted = torch.ops.renderer.pde_inpaint(
                    sampling_mask,
                    input_no_sampling,
                    200,
                    1e-4,
                    5,
                    2,  # m0, epsilon, m1, m2
                    0,  # mc -> multigrid recursion count. =0 disables the multigrid hierarchy
                    9,
                    0)  # ms, m3
            else:
                inpainted = torch.ops.renderer.fast_inpaint(
                    sampling_mask, input_no_sampling)
            # run network
            if self.disableTemporal:
                prev_warped_out = torch.zeros_like(prev_warped_out)
            input = torch.cat([inpainted, prev_warped_out], dim=1)
            output = self._net(input)
            if isinstance(output, tuple):
                output = output[0]
            return output

    reconBaseline = ReconstructionModel(RECON_BASELINE)
    reconModels = [ReconstructionModel(f) for f in NETWORKS]
    reconPostModels = [
        ReconstructionModelPostTrain(name, file, inpainting, has_temporal)
        for (name, file, inpainting, has_temporal) in POSTTRAIN_NETWORKS
    ]
    allReconModels = reconModels + reconPostModels

    NETWORK_COMBINATIONS = \
        [(importanceBaseline1, reconBaseline), (importanceBaseline2, reconBaseline)] + \
        [(importanceBaseline1, reconNet) for reconNet in allReconModels] + \
        [(importanceBaseline2, reconNet) for reconNet in allReconModels] + \
        [(importanceNet, reconBaseline) for importanceNet in importanceModels] + \
        list(zip(importanceModels, reconModels)) + \
        [(importanceNet, reconPostModel) for importanceNet in importanceModels for reconPostModel in reconPostModels]
    #NETWORK_COMBINATIONS = list(zip(importanceModels, reconModels))
    print("Network combinations:")
    for (i, r) in NETWORK_COMBINATIONS:
        print("  %s - %s" % (i.name(), r.name()))

    # load sampling patterns
    print("load sampling patterns")
    with h5py.File(SAMPLING_FILE, 'r') as f:
        sampling_pattern = dict([(name, torch.from_numpy(f[name][...]).to(device)) \
            for name in SAMPLING_PATTERNS])

    #heatmap
    HEATMAP_CFG = [(min, mean) for min in HEATMAP_MIN for mean in HEATMAP_MEAN
                   if min < mean]
    print("heatmap configs:", HEATMAP_CFG)

    #########################
    # DEFINE STATISTICS
    #########################
    ssimLoss = SSIM(size_average=False)
    ssimLoss.to(device)
    psnrLoss = PSNR()
    psnrLoss.to(device)
    lpipsColor = lpips.PerceptualLoss(model='net-lin',
                                      net='alex',
                                      use_gpu=True)
    MIN_FILLING = 0.05
    NUM_BINS = 200

    class Statistics:
        def __init__(self):
            self.histogram_color = np.zeros(NUM_BINS, dtype=np.float64)
            self.histogram_alpha = np.zeros(NUM_BINS, dtype=np.float64)
            self.histogram_counter = 0

        def create_datasets(self, hdf5_file: h5py.File, stats_name: str,
                            histo_name: str, num_samples: int,
                            extra_info: dict):

            self.stats_name = stats_name
            self.expected_num_samples = num_samples
            stats_shape = (num_samples, len(list(StatFieldDvr)))
            self.stats_file = hdf5_file.require_dataset(stats_name,
                                                        stats_shape,
                                                        dtype='f',
                                                        exact=True)
            self.stats_file.attrs['NumFields'] = len(list(StatFieldDvr))
            for field in list(StatFieldDvr):
                self.stats_file.attrs['Field%d' % field.value] = field.name
            for key, value in extra_info.items():
                self.stats_file.attrs[key] = value
            self.stats_index = 0

            histo_shape = (NUM_BINS, len(list(HistoFieldDvr)))
            self.histo_file = hdf5_file.require_dataset(histo_name,
                                                        histo_shape,
                                                        dtype='f',
                                                        exact=True)
            self.histo_file.attrs['NumFields'] = len(list(HistoFieldDvr))
            for field in list(HistoFieldDvr):
                self.histo_file.attrs['Field%d' % field.value] = field.name
            for key, value in extra_info.items():
                self.histo_file.attrs[key] = value

        def add_timestep_sample(self, pred_rgba, gt_rgba, sampling_mask):
            """
            adds a timestep sample:
            pred_rgba: prediction rgba
            gt_rgba: ground truth rgba
            """
            B = pred_rgba.shape[0]

            #apply border
            pred_rgba = pred_rgba[:, :, LOSS_BORDER:-LOSS_BORDER,
                                  LOSS_BORDER:-LOSS_BORDER]
            gt_rgba = gt_rgba[:, :, LOSS_BORDER:-LOSS_BORDER,
                              LOSS_BORDER:-LOSS_BORDER]

            # PSNR
            psnr_color = psnrLoss(pred_rgba[:, 0:3, :, :],
                                  gt_rgba[:, 0:3, :, :]).cpu().numpy()
            psnr_alpha = psnrLoss(pred_rgba[:, 3:4, :, :],
                                  gt_rgba[:, 3:4, :, :]).cpu().numpy()

            # SSIM
            ssim_color = ssimLoss(pred_rgba[:, 0:3, :, :],
                                  gt_rgba[:, 0:3, :, :]).cpu().numpy()
            ssim_alpha = ssimLoss(pred_rgba[:, 3:4, :, :],
                                  gt_rgba[:, 3:4, :, :]).cpu().numpy()

            # Perceptual
            lpips_color = torch.cat([ \
                lpipsColor(pred_rgba[b, 0:3, :, :], gt_rgba[b, 0:3, :, :], normalize=True) \
                    for b in range(B)], dim=0).cpu().numpy()

            # Samples
            samples = torch.mean(sampling_mask, dim=(1, 2, 3)).cpu().numpy()

            # Write samples to file
            for b in range(B):
                assert self.stats_index < self.expected_num_samples, "Adding more samples than specified"
                self.stats_file[self.stats_index, :] = np.array([
                    psnr_color[b], psnr_alpha[b], ssim_color[b], ssim_alpha[b],
                    lpips_color[b], samples[b]
                ],
                                                                dtype='f')
                self.stats_index += 1

            # Histogram
            self.histogram_counter += 1

            color_diff = F.l1_loss(gt_rgba[:, 0:3, :, :],
                                   pred_rgba[:, 0:3, :, :],
                                   reduction='none').sum(dim=0) / 6
            histogram, _ = np.histogram(color_diff.cpu().numpy(),
                                        bins=NUM_BINS,
                                        range=(0, 1),
                                        density=True)
            self.histogram_color += (
                histogram /
                (NUM_BINS * B) - self.histogram_color) / self.histogram_counter

            alpha_diff = F.l1_loss(gt_rgba[:, 3, :, :],
                                   pred_rgba[:, 3, :, :],
                                   reduction='none')
            histogram, _ = np.histogram(alpha_diff.cpu().numpy(),
                                        bins=NUM_BINS,
                                        range=(0, 1),
                                        density=True)
            self.histogram_alpha += (
                histogram /
                (NUM_BINS * B) - self.histogram_alpha) / self.histogram_counter

        def close_stats_file(self):
            self.stats_file.attrs['NumEntries'] = self.stats_index

        def write_histogram(self):
            """
            After every sample for the current dataset was processed, write
            a histogram of the errors in a new file
            """
            for i in range(NUM_BINS):
                self.histo_file[i, :] = np.array([
                    i / NUM_BINS, (i + 1) / NUM_BINS, self.histogram_color[i],
                    self.histogram_alpha[i]
                ])

    #########################
    # DATASET
    #########################
    class FullResDataset(torch.utils.data.Dataset):
        def __init__(self, file_high, file_low):
            self.hdf5_file_high = h5py.File(file_high, 'r')
            self.dset_high = self.hdf5_file_high['gt']
            self.hdf5_file_low = h5py.File(file_low, 'r')
            self.dset_low = self.hdf5_file_low['gt']
            print("Dataset shape:", self.dset_high.shape)

        def __len__(self):
            return self.dset_high.shape[0]

        def num_timesteps(self):
            return self.dset_high.shape[1]

        def __getitem__(self, idx):
            return self.dset_high[idx, ...], self.dset_low[idx, ...]

    #########################
    # COMPUTE STATS for each dataset
    #########################
    for dataset_name, dataset_file_high, dataset_file_low in DATASETS:
        dataset_file_high = os.path.join(DATASET_PREFIX, dataset_file_high)
        dataset_file_low = os.path.join(DATASET_PREFIX, dataset_file_low)
        print("Compute statistics for", dataset_name)

        # create output file
        os.makedirs(OUTPUT_FOLDER, exist_ok=True)
        output_file = os.path.join(OUTPUT_FOLDER, dataset_name + '.hdf5')
        print("Save to", output_file)
        with h5py.File(output_file, 'a') as output_hdf5_file:

            # load dataset
            set = FullResDataset(dataset_file_high, dataset_file_low)
            data_loader = torch.utils.data.DataLoader(set,
                                                      batch_size=BATCH_SIZE,
                                                      shuffle=False)

            # define statistics
            StatsCfg = collections.namedtuple(
                "StatsCfg", "stats importance recon heatmin heatmean pattern")
            statistics = []
            for (inet, rnet) in NETWORK_COMBINATIONS:
                for (heatmin, heatmean) in HEATMAP_CFG:
                    for pattern in SAMPLING_PATTERNS:
                        stats_info = {
                            'importance': inet.name(),
                            'reconstruction': rnet.name(),
                            'heatmin': heatmin,
                            'heatmean': heatmean,
                            'pattern': pattern
                        }
                        stats_filename = "Stats_%s_%s_%03d_%03d_%s" % (
                            inet.name(), rnet.name(), heatmin * 1000,
                            heatmean * 1000, pattern)
                        histo_filename = "Histogram_%s_%s_%03d_%03d_%s" % (
                            inet.name(), rnet.name(), heatmin * 1000,
                            heatmean * 1000, pattern)
                        s = Statistics()
                        s.create_datasets(output_hdf5_file, stats_filename,
                                          histo_filename,
                                          len(set) * set.num_timesteps(),
                                          stats_info)
                        statistics.append(
                            StatsCfg(stats=s,
                                     importance=inet,
                                     recon=rnet,
                                     heatmin=heatmin,
                                     heatmean=heatmean,
                                     pattern=pattern))
            print(len(statistics),
                  " different combinations are performed per sample")

            # compute statistics
            try:
                with torch.no_grad():
                    num_minibatch = len(data_loader)
                    pg = ProgressBar(num_minibatch, 'Evaluation', length=50)
                    for iteration, (crop_high,
                                    crop_low) in enumerate(data_loader, 0):
                        pg.print_progress_bar(iteration)
                        crop_high = crop_high.to(device)
                        crop_low = crop_low.to(device)
                        B, T, C, H, W = crop_high.shape
                        _, _, _, Hlow, Wlow = crop_low.shape
                        assert Hlow * UPSCALING == H

                        # try out each combination
                        for s in statistics:
                            #print(s)
                            # get input to evaluation
                            importanceNetUpscale = s.importance.networkUpscaling(
                            )
                            importancePostUpscale = UPSCALING // importanceNetUpscale
                            pattern = sampling_pattern[s.pattern][:H, :W]

                            # loop over timesteps
                            pattern = pattern.unsqueeze(0).unsqueeze(0)
                            previous_importance = None
                            previous_output = None
                            reconstructions = []
                            for j in range(T):
                                # extract flow (always the last two channels of crop_high)
                                flow = crop_high[:, j, C - 2:, :, :]

                                # compute importance map
                                importance_input = crop_low[:, j, :8, :, :]
                                if j == 0 or s.importance.disableTemporal:
                                    previous_input = torch.zeros(
                                        B,
                                        1,
                                        importance_input.shape[2] *
                                        importanceNetUpscale,
                                        importance_input.shape[3] *
                                        importanceNetUpscale,
                                        dtype=crop_high.dtype,
                                        device=crop_high.device)
                                else:
                                    flow_low = F.interpolate(
                                        flow,
                                        scale_factor=1 / importancePostUpscale)
                                    previous_input = models.VideoTools.warp_upscale(
                                        previous_importance, flow_low, 1,
                                        False)
                                importance_map = s.importance.call(
                                    importance_input, previous_input)
                                if len(importance_map.shape) == 3:
                                    importance_map = importance_map.unsqueeze(
                                        1)
                                previous_importance = importance_map

                                target_mean = s.heatmean
                                if USE_BINARY_SEARCH_ON_MEAN:
                                    # For regular sampling, the normalization does not work properly,
                                    # use binary search on the heatmean instead
                                    def f(x):
                                        postprocess = importance.PostProcess(
                                            s.heatmin, x,
                                            importancePostUpscale,
                                            LOSS_BORDER //
                                            importancePostUpscale, 'basic')
                                        importance_map2 = postprocess(
                                            importance_map)[0].unsqueeze(1)
                                        sampling_mask = (
                                            importance_map2 >= pattern).to(
                                                dtype=importance_map.dtype)
                                        samples = torch.mean(
                                            sampling_mask).item()
                                        return samples

                                    target_mean = binarySearch(
                                        f, s.heatmean, s.heatmean, 10, 0, 1)
                                    #print("Binary search for #samples, mean start={}, result={} with samples={}, original={}".
                                    #      format(s.heatmean, s.heatmean, f(target_mean), f(s.heatmean)))

                                # normalize and upscale importance map
                                postprocess = importance.PostProcess(
                                    s.heatmin, target_mean,
                                    importancePostUpscale,
                                    LOSS_BORDER // importancePostUpscale,
                                    'basic')
                                importance_map = postprocess(
                                    importance_map)[0].unsqueeze(1)
                                #print("mean:", torch.mean(importance_map).item())

                                # create samples
                                sample_mask = (importance_map >= pattern).to(
                                    dtype=importance_map.dtype)

                                reconstruction_input = torch.cat(
                                    (
                                        sample_mask *
                                        crop_high[:, j, 0:
                                                  8, :, :],  # rgba, normal xyz, depth
                                        sample_mask),  # sample mask
                                    dim=1)

                                # warp previous output
                                if j == 0 or s.recon.disableTemporal:
                                    previous_input = torch.zeros(
                                        B,
                                        4,
                                        H,
                                        W,
                                        dtype=crop_high.dtype,
                                        device=crop_high.device)
                                else:
                                    previous_input = models.VideoTools.warp_upscale(
                                        previous_output, flow, 1, False)

                                # run reconstruction network
                                reconstruction = s.recon.call(
                                    reconstruction_input, sample_mask,
                                    previous_input)

                                # clamp
                                reconstruction_clamped = torch.clamp(
                                    reconstruction, 0, 1)
                                reconstructions.append(reconstruction_clamped)

                                ## test
                                #if j==0:
                                #    plt.figure()
                                #    plt.imshow(reconstruction_clamped[0,0:3,:,:].cpu().numpy().transpose((1,2,0)))
                                #    plt.title(s.stats.stats_name)
                                #    plt.show()

                                # save for next frame
                                previous_output = reconstruction_clamped

                            #endfor: timesteps

                            # compute statistics
                            reconstructions = torch.cat(reconstructions, dim=0)
                            crops_high = torch.cat(
                                [crop_high[:, j, :8, :, :] for j in range(T)],
                                dim=0)
                            sample_masks = torch.cat([sample_mask] * T, dim=0)
                            s.stats.add_timestep_sample(
                                reconstructions, crops_high, sample_masks)

                        # endfor: statistic
                    # endfor: batch

                    pg.print_progress_bar(num_minibatch)
                # end no_grad()
            finally:
                # close files
                for s in statistics:
                    s.stats.write_histogram()
                    s.stats.close_stats_file()
 def __init__(self, alpha=0):
     self.alpha = 0.85
     self.L2Loss = nn.L1Loss()
     self.SSIMLoss = SSIM(nc=3)