Exemple #1
0
def test(epoch, i):
    net1 = DPDNN()
    net1 = net1.cuda()
    net1 = nn.DataParallel(net1)
    net1.load_state_dict(torch.load(opt.load_model_path))

    noise = Image.open('./data/test_data/sigma%d/test.png' % opt.noise_level)
    label = Image.open('./data/test_data/sigma%d/testgt.png' % opt.noise_level)

    img_H = noise.size[0]
    img_W = noise.size[1]

    transform = T.ToTensor()
    transform1 = T.ToPILImage()
    noise = transform(noise)
    noise = noise.resize_(1, 1, img_H, img_W)
    noise = noise.cuda()
    label = np.array(label).astype(np.float32)

    output = net1(noise)  # dim=4
    output = torch.clamp(output, min=0.0, max=1.0)
    output = torch.tensor(output)
    output = output.resize(img_H, img_W).cpu()
    output_img = transform1(output)

    # every 500 batches save test output
    if i % 500 == 0:
        save_index = str(
            int(epoch * (opt.num_data / opt.batch_size / 500) + (i + 1) / 500))
        output_img.save('./data/test_data/sigma%d/test_output/' %
                        opt.noise_level + save_index + '.png')

    output = np.array(output_img)
    mse, psnr = PSNR(output, label)
    return mse, psnr
Exemple #2
0
def test(savaimgs=False):
    test_Date = testDate("/home/yzm/pyproject/hw4/data/test_images",
                         "/home/yzm/pyproject/hw4/data/test_source")
    test_load = DataLoader(dataset=test_Date, batch_size=1, shuffle=False)

    model = Net().cuda()
    model.load_state_dict(
        torch.load(
            "/home/yzm/pyproject/hw4/chkpt/guassian/model_ffinal120_epoch.state"
        ))
    #model=torch.load("/home/yzm/pyproject/hw4/chkpt/guassian/model_epoch_50.pth")
    # for i in model.parameters():
    #     print(i)
    model.eval()
    i = 0
    psnr = 0
    for x, x1 in test_load:
        x1 = x1.cuda()
        x = x.cuda()
        y = model(x)
        psnr = psnr + PSNR(y, x1)
        #print(psnr.item()/24)
        if savaimgs is True:
            save_image(
                x1,
                f"/home/yzm/pyproject/hw4/experience/sourcegray/sourceimage_{i}_imag.jpg"
            )
            save_image(
                y,
                f"/home/yzm/pyproject/hw4/experience/test_out2/testimage_{i}_imag.jpg"
            )
        i += 1
    print(psnr.item() / 24)
  def test(self, config):
    """
    Output: bicubic image and SRCNN image pairs
    """
    self.loadModel()
    image_paths = prepare_data(self.sess, dataset="Test")
    avg_psnr_srcnn = 0
    avg_psnr_bicubic = 0
    for image_path in image_paths:
        image_dir, image_name = os.path.split(image_path)
        nx, ny, bicubic_img, ground_truth = input_setup(self.sess, config, image_path)
        data_dir = os.path.join('./{}'.format(config.checkpoint_dir), "test.h5")

        train_data, train_label = read_data(data_dir) # train_data(bicubic):(33, 33); train_label(gt):(21, 21)
        result = self.pred.eval({self.images: train_data, self.labels: train_label}) 

        PSNR_bicubic = PSNR(train_data, train_label)
        PSNR_srcnn = PSNR(result, train_label)
        avg_psnr_bicubic += PSNR_bicubic
        avg_psnr_srcnn += PSNR_srcnn

        result = merge(result, [nx, ny]) # result(SRCNN):(21, 21)
        result = result.squeeze()
        image_dir = os.path.join(os.getcwd(), config.sample_dir)
        image_path = os.path.join(image_dir, image_name)
        bicubic_path = os.path.join(image_dir, "bicubic_"+image_name)

        px = 1/plt.rcParams['figure.dpi']  # pixel in inches
        width = max(ground_truth.shape[0], ground_truth.shape[1])
        
        # plot image
        plt.figure(image_name, figsize=(2*width*px,3*width*px))
        ax1 = plt.subplot(3,1,1)
        ax1.set_title("SRCNN - PSNR - " + str(PSNR_srcnn))
        plt.imshow(toimage(result), cmap='gray')
        ax2 = plt.subplot(3,1,2)
        ax2.set_title("Bicubic - PSNR - " + str(PSNR_bicubic))
        plt.imshow(toimage(bicubic_img), cmap='gray')
        ax3 = plt.subplot(3,1,3)
        ax3.set_title("Ground Truth")
        plt.imshow(toimage(ground_truth), cmap='gray')
        plt.savefig(image_path)
        plt.close()
    avg_psnr_srcnn /= len(image_paths)
    avg_psnr_bicubic /= len(image_paths)
    print("average PSNR of srcnn = {}\n average PSNR of bicubic = {}".format(avg_psnr_srcnn, avg_psnr_bicubic))
    def eval(self, config):
        print('\nPreparing Data..\n')
        paths = prepare_data(config)
        num_data = len(paths)

        psnrFile = open('psnr_sigma_%s.txt' % config.noise_level, 'a')

        avg_time = 0
        avg_psnr = 0
        print('\nNow evaluating the dataset\n')
        for i in range(num_data):

            input_, label_ = get_image(paths[i], config)

            image_shape = input_.shape
            label_shape = label_.shape

            self.build_model(image_shape, label_shape)

            self.sess.run(tf.global_variables_initializer())

            self.load(config.checkpoint_dir, restore=True)

            time_ = time.time()
            result = self.sess.run([self.preds],
                                   feed_dict={self.images: input_ / 255.0})
            avg_time += time.time() - time_

            self.sess.close()
            tf.reset_default_graph()
            self.sess = tf.Session()

            img = np.squeeze(result) * 255
            img = np.clip(img, 0, 255)
            psnr = PSNR(img, label_)
            avg_psnr += psnr

            print('image: [%d/%d] time: [%.4f] psnr: [%.4f]' %
                  (i + 1, num_data, time.time() - time_, psnr))
            psnrFile.write('image: [%d/%d] time: [%.4f] psnr: [%.4f]\n' %
                           (i + 1, num_data, time.time() - time_, psnr))

            if not os.path.isdir(os.path.join(os.getcwd(), config.result_dir)):
                os.makedirs(os.path.join(os.getcwd(), config.result_dir))

            filename = os.path.basename(paths[i])
            imsave(img,
                   path=config.result_dir +
                   '/%d_sigma/JPEGImages/' % config.noise_level + filename)

        print('\nAverage Time: %.4f' % (avg_time / num_data))
        psnrFile.write('\nAverage Time: %.4f' % (avg_time / num_data))
        print('\nAverage PSNR: %.4f' % (avg_psnr / num_data))
        psnrFile.write('\nAverage PSNR: %.4f' % (avg_psnr / num_data))
        psnrFile.close()
Exemple #5
0
 def evaluate(self, test_dst=Dataset('./test_image/'), verbose=0) -> Model:
     """
     evaluate the psnr of whole images which have been merged. 
     Input:
         test_dst, Dataset. A instance of Dataset. 
         verbose, int. 
     Return:
         average psnr of images in test_path. 
     """
     PSNR = []
     test_path = test_dst.image_dir
     for _, _, files in os.walk(test_path):
         for image_name in files:
             # Read in image
             _, psnr = self.gen_sr_img(test_dst,
                                       image_name,
                                       verbose=verbose)
             PSNR.append(psnr)
     ave_psnr = np.sum(PSNR) / float(len(PSNR))
     print('average psnr of test images(whole) in %s is %f. \n' %
           (test_path, ave_psnr))
     return ave_psnr, PSNR
Exemple #6
0
    def eval(self, config):
        print("\nPrepare Data...\n")
        paths = prepare_data(config)
        data_num = len(paths)

        avg_time = 0
        avg_pasn = 0
        print("\nNow Start Testing...\n")
        for idx in range(data_num):
            input_, label_ = get_image(paths[idx], config.scale,
                                       config.matlab_bicubic)

            images_shape = input_.shape
            labels_shape = label_.shape
            self.build_model(images_shape, labels_shape)
            tf.global_variables_initializer().run(session=self.sess)

            self.load(config.checkpoint_dir, restore=True)

            time_ = time.time()
            result = self.sess.run([self.pred],
                                   feed_dict={self.images: input_ / 255.0})
            avg_time += time.time() - time_

            # import matlab.engine
            # eng = matlab.engine.start_matlab()
            # time_ = time.time()
            # result = np.asarray(eng.imresize(matlab.double((input_[0, :] / 255.0).tolist()), config.scale, 'bicubic'))
            # avg_time += time.time() - time_

            self.sess.close()
            tf.reset_default_graph()
            self.sess = tf.Session()

            x = np.squeeze(result) * 255.0
            x = np.clip(x, 0, 255)
            psnr = PSNR(x, label_[0], config.scale)
            avg_pasn += psnr

            print("image: %d/%d, time: %.4f, psnr: %.4f" %
                  (idx, data_num, time.time() - time_, psnr))

            if not os.path.isdir(os.path.join(os.getcwd(), config.result_dir)):
                os.makedirs(os.path.join(os.getcwd(), config.result_dir))
            imsave(x[:, :, ::-1], config.result_dir + "/%d.png" % idx)

        print("Avg. Time:", avg_time / data_num)
        print("Avg. PSNR:", avg_pasn / data_num)
Exemple #7
0
def test_one_graph(adj, adj_orig, features_csr, num_node, k_num, model,
                   placeholders, sess, feed_dict):
    adj_orig = adj_orig - sp.dia_matrix(
        (adj_orig.diagonal()[np.newaxis, :], [0]),
        shape=adj_orig.shape)  # delete self loop
    adj_orig.eliminate_zeros()
    adj_new = adj
    features = sparse_to_tuple(features_csr.tocoo())
    adj_label = adj_new + sp.eye(adj.shape[0])
    adj_label = sparse_to_tuple(adj_label)
    adj_clean = adj_orig.tocsr()
    k_num = int(k_num * size / noise_ratio)  # match the budget size
    if k_num != 0:
        adj_norm, adj_norm_sparse = preprocess_graph(adj_new)
        feed_dict.update({placeholders["adj"]: adj_norm})
        feed_dict.update({placeholders["adj_orig"]: adj_label})
        feed_dict.update({placeholders["features"]: features})
        feed_dict.update({placeholders['dropout']: FLAGS.dropout})
        model.k = k_num
        x_tilde = sess.run(model.realD_tilde,
                           feed_dict=feed_dict,
                           options=run_options)
        noised_indexes, clean_indexes = get_noised_indexes(
            x_tilde, adj_new, num_node)
        feed_dict.update({placeholders["noised_mask"]: noised_indexes})
        feed_dict.update({placeholders["clean_mask"]: clean_indexes})
        feed_dict.update({placeholders["noised_num"]: len(noised_indexes) / 2})
        test1 = model.test_new_indexes.eval(session=sess, feed_dict=feed_dict)
        test0 = model.test_noised_index.eval(session=sess, feed_dict=feed_dict)
        new_adj = get_new_adj(feed_dict, sess, model, noised_indexes, adj_new,
                              k_num, num_node)
    else:
        # new_adj = adj
        new_adj = adj.copy()
    new_adj_sparse = sp.csr_matrix(new_adj)

    psnr = PSNR(adj_clean[:num_node, :num_node],
                new_adj_sparse[:num_node, :num_node])
    wls = WL_no_label(adj_clean[:num_node, :num_node],
                      new_adj_sparse[:num_node, :num_node])
    return psnr, wls
Exemple #8
0
def test(opt, netG):
    aver_psnr = 0.0
    #aver_ssim = 0.0
    counter = 0

    test = ReadConcat(opt)
    testset = DataLoader(test, batch_size=1, shuffle=False)
    save_path = os.path.join(opt.out_dir, 'test')
    check_folder(save_path)
    netG.eval()

    for i,data in enumerate(testset):
        counter = i
        data_A = data['A']  # blur
        data_B = data['B']  # sharp
        if torch.cuda.is_available():
            data_A = data_A.cuda(opt.gpu)
            data_B = data_B.cuda(opt.gpu)
        with torch.no_grad():
            realA = Variable(data_A)
            realB = Variable(data_B)

        fakeB = netG(realA)
        # fakeB = image_recovery(fakeB.squeeze().cpu().detach().numpy())
        # realB = image_recovery(realB.squeeze().cpu().detach().numpy())
        fakeB = image_recovery(fakeB)
        realB = image_recovery(realB)

        aver_psnr += PSNR(fakeB, realB)
        # fakeB = Image.fromarray(fakeB)
        # realB = Image.fromarray(realB)
        # aver_ssim += SSIM(fakeB, realB)

        # save image
        img_path = data['img_name']
        save_image(fakeB, os.path.join(save_path, img_path[0]))
        print('save successfully {}'.format(save_path))

    aver_psnr /= counter
    # aver_ssim /= counter
    print('PSNR = %f' % (aver_psnr))
Exemple #9
0
    def test(self, config):
        print("\nPrepare Data...\n")
        paths = prepare_data(config)
        data_num = len(paths)

        avg_time = 0
        avg_pasn = 0
        print("\nNow Start Testing...\n")
        for idx in range(data_num):
            input_, label_ = get_image(paths[idx], config.scale)

            images_shape = input_.shape
            labels_shape = label_.shape
            self.build_model(images_shape, labels_shape)
            tf.initialize_all_variables().run(session=self.sess) 

            self.load(config.checkpoint_dir)
            
            time_ = time.time()
            result = self.sess.run([self.pred], feed_dict={self.images: input_ / 255.0})
            avg_time += time.time() - time_

            self.sess.close()
            tf.reset_default_graph()
            self.sess = tf.Session()
            
            x = np.squeeze(result) * 255.0
            x = np.clip(x, 0, 255)
            # checkimage(x)
            psnr = PSNR(x, label_[0])
            avg_pasn += psnr

            print "image: %d/%d, time: %.4f, psnr: %.4f" % (idx, data_num, time.time() - time_ , psnr)

            if not os.path.isdir(os.path.join(os.getcwd(),config.result_dir)):
                os.makedirs(os.path.join(os.getcwd(),config.result_dir))
            imsave(x, config.result_dir+'/%d.png' % idx)

        print "Avg. Time:", avg_time / data_num
        print "Avg. PSNR:", avg_pasn / data_num
 def train(self, X_train, X_test):
     """
     trains the autoencoder model
     :param X_train: training data
     :param X_test: validation data
     :return: loss and accuracy of model
     """
     es = K.callbacks.EarlyStopping(monitor='val_loss',
                                    mode='min',
                                    verbose=1,
                                    patience=15)
     self.autoencoder.compile(optimizer='adam',
                              loss='mse',
                              metrics=['accuracy', PSNR])
     history = self.autoencoder.fit(X_train,
                                    X_train,
                                    validation_data=(X_test, X_test),
                                    epochs=30,
                                    batch_size=32,
                                    verbose=1,
                                    callbacks=[es])
     psnr = PSNR(X_test, self.autoencoder.predict(X_test))
     return self.autoencoder.evaluate(X_test, X_test), psnr
Exemple #11
0
def test(data_path, scale, device, results_path, csv_path, label):
    images_name = [x for x in listdir(data_path)]

    model = ESPCN(upscale_factor=scale).to(device)
    model.load_state_dict(torch.load('saved_models/' + 'UPSCALE_X' + str(scale)+ '/' + 'best.pth'))

    ESPCN_PSNR = []
    BICUBIC_PSNR = []

    output_path = results_path + 'UPSCALE_X' + str(scale) + '/'
    if not os.path.exists(output_path):
        os.makedirs(output_path)

    bicubic_path = results_path + 'BICUBIC_X' + str(scale) + '/'
    if not os.path.exists(bicubic_path):
        os.makedirs(bicubic_path)

    lr_path = results_path + 'LR_X' + str(scale) + '/'
    if not os.path.exists(lr_path):
        os.makedirs(lr_path)
   
    for image_name in tqdm(images_name, desc=('convert LR images to HR images - ' + label)):
        image = Image.open(data_path + image_name)

        # produce LR and HR image 
        image_width = (image.width // scale) * scale
        image_height = (image.height // scale) * scale

        HR = image.resize((image_width, image_height), resample=Image.BICUBIC)
        LR = HR.resize((HR.width//scale, HR.height//scale), resample=Image.BICUBIC)
        LR.save(lr_path + image_name)

        # produce # reference bicubic interpolation image
        bicubic = LR.resize((LR.width*scale, LR.height*scale), resample=Image.BICUBIC)
        bicubic.save(bicubic_path + image_name)

        # produce SR image
        LR_Y, LR_Cb, LR_Cr = LR.convert('YCbCr').split()
        input_image = transforms.ToTensor()(LR_Y).view(1, -1, LR_Y.size[1], LR_Y.size[0]).to(device)
        
        output_image = model(input_image)
        output_Y = output_image.data[0].cpu().numpy()
        output_Y *= 255
        output_Y = output_Y.clip(0,255)
        output_Y = Image.fromarray(np.uint8(output_Y[0]), mode='L')

        output_Cb = LR_Cb.resize(output_Y.size, Image.BICUBIC)
        output_Cr = LR_Cr.resize(output_Y.size, Image.BICUBIC)

        SR = Image.merge('YCbCr', [output_Y, output_Cb, output_Cr]).convert('RGB')
        SR.save(output_path + image_name)

        # calculate PSNR
        HR_Y, _, _ = HR.convert('YCbCr').split()
        HR_Y = transforms.ToTensor()(HR_Y).to(device)
        bicubic_Y, _, _ = bicubic.convert('YCbCr').split()
        bicubic_Y = transforms.ToTensor()(bicubic_Y).to(device)

        SR_Y = output_image

        ESPCN_PSNR.append(PSNR(SR_Y, HR_Y).item())
        BICUBIC_PSNR.append(PSNR(bicubic_Y, HR_Y).item())
   
    # write to CSV
    file_path = csv_path + '_X' + str(scale) + '.csv'
    os.makedirs(os.path.dirname(file_path), exist_ok=True)
    solution_rows = [('Image', 'ESPCN PSNR', 'BICUBIC PSNR','Upscale')] + [(y, ESPCN_PSNR[i], BICUBIC_PSNR[i], scale) for (i, y) in enumerate(images_name)]
    with open(file_path, 'w', newline="") as f:
        writer = csv.writer(f)
        writer.writerows(solution_rows)
Exemple #12
0
    net = DPDNN()
    net = nn.DataParallel(net)
    net.load_state_dict(torch.load(opt.load_model_path))

    img = Image.open(label_img)
    # img.show()
    label = np.array(img).astype(np.float32)  # label:0~255
    img_H = img.size[0]
    img_W = img.size[1]
    img = transform1(img)

    img_noise = add_noise(img, opt.noise_level).resize_(1, 1, img_H, img_W)

    output = net(img_noise)
    output = output.cpu()
    output = output.resize_(img_H, img_W)
    output = torch.clamp(output, min=0, max=1)
    output = transform2(output)

    # output.show()
    # To save the output(denoised) image, you must create a new folder. Here is my path.
    output.save('./output/sigma%d/%d.png' % (opt.noise_level, i))

    img_noise = transform2(img_noise.resize_(img_H, img_W))
    # img_noise.show()
    img_noise.save('./output/sigma%d/%d_noise.png' % (opt.noise_level, i))
    output = np.array(output)  # output:0~255

    # Because of the randomness of Gaussian noise, the output results are different each time.
    print(i, 'MSE loss:%f, PSNR:%f' % (PSNR(output, label)))
        mdl = inference.LoadedModel(os.path.join(MODEL_DIR, p), device,
                                    UPSCALING)
        if mdl.unshaded:
            model_list[i] = UnshadedModel(UPSCALING, mdl.model, shading)
        else:
            model_list[i] = ShadedModel(UPSCALING, mdl.model, shading)
    else:
        model_list[i] = SimpleUpsample(UPSCALING, m["name"], shading)

# create output folder
os.makedirs(OUTPUT_FOLDER, exist_ok=True)

# STATISTICS
ssimLoss = MSSSIM()
ssimLoss.to(device)
psnrLoss = PSNR()
psnrLoss.to(device)
BORDER = 15
MIN_FILLING = 0.05
NUM_BINS = 100


class Statistics:
    def __init__(self):
        self.reset()
        self.downsample = nn.Upsample(scale_factor=1.0 / UPSCALING,
                                      mode='bilinear')
        self.downsample_loss = nn.MSELoss(reduction='none')
        self.downsample.to(device)
        self.downsample_loss.to(device)
Exemple #14
0
	def __init__(self, CONFIG):

		if CONFIG.mode==1 :
		
			gen_model = get_model('G', CONFIG.gen_model)
			dis_model = get_model('D', CONFIG.dis_model)
			VGG = tl.models.vgg19(pretrained=True, end_with='pool4', mode='static')

			lr_init = 1e-4
			lr_v = tf1.Variable(lr_init)
			beta1 = 0.9
			n_epoch_init = CONFIG.init_epoch # n_epoch_init =20 # 
			n_epoch = CONFIG.total_epoch # n_epoch = 100 # 
			batch_size = CONFIG.batch_size # batch_size = 8 # 
			decay_every = int(n_epoch/ 2)
			lr_decay = 0.1
			resume_epoch = 0
			 
			if CONFIG.load_weights:
			
				resume_epoch = CONFIG.model_epoch
			
				if CONFIG.gan_init:
					gen_model.load_weights('Checkpoints/GAN_INIT_{}_EPID_{}.h5'.format(CONFIG.gen_model, CONFIG.model_epoch))
					resume_epoch = 0
				else:	
					gen_model.load_weights('Checkpoints/GAN_{}_EPID_{}.h5'.format(CONFIG.gen_model, CONFIG.model_epoch))
					dis_model.load_weights('Checkpoints/DIS_{}_GAN_{}_EPID_{}.h5'.format(CONFIG.dis_model, CONFIG.gen_model, CONFIG.model_epoch))
				
		
			g_optimizer_init = tf2.optimizers.Adam(lr_v, beta_1=0.9, beta_2=0.999, epsilon=1e-07)
			g_optimizer = tf2.optimizers.Adam(lr_v, beta_1=0.9, beta_2=0.999, epsilon=1e-07)
			d_optimizer = tf2.optimizers.Adam(lr_v, beta_1=0.9, beta_2=0.999, epsilon=1e-07)

			gen_model.train()
			dis_model.train()
			VGG.train()

			train_ds = get_train_dataset(CONFIG)

			if not CONFIG.load_weights or CONFIG.gan_init:
			
				print('##	initial learning (G)')
				
				for epoch in range(n_epoch_init):
					for step, (lr_patchs, hr_patchs) in enumerate(train_ds):
						if lr_patchs.shape[0] != batch_size:  
							break
						step_time = time.time()
						with tf1.GradientTape() as tape:
							out_bicu = generate_bicubic_samples(lr_patchs.numpy(), CONFIG)
							#print( lr_patchs.shape, out_bicu.shape)
							fake_patchs = gen_model([lr_patchs, out_bicu])
							
							mse_loss = tl.cost.mean_squared_error(fake_patchs, hr_patchs, is_mean=True)
						grad = tape.gradient(mse_loss, gen_model.trainable_weights)
						g_optimizer_init.apply_gradients(zip(grad, gen_model.trainable_weights))
						print("Epoch: [{}/{}] step: [{}/{}] time: {:.3f}s, mse: {:.6f} ".format(
						epoch+1+resume_epoch, resume_epoch+n_epoch_init, step+1, CONFIG.no_of_batches, time.time() - step_time, mse_loss))
					
					path = 'Training_outputs/gan_init_{}_train_{}.png'.format(CONFIG.gen_model, epoch+1+resume_epoch)
					tl.vis.save_images(fake_patchs.numpy(), [2, CONFIG.batch_size//2], path)
					
					if ((epoch+1+resume_epoch) % CONFIG.save_interval) == 0:
						gen_model.save_weights('Checkpoints/GAN_INIT_{}_EPID_{}.h5'.format(CONFIG.gen_model, epoch+1+resume_epoch))
				
				gen_model.save_weights('Checkpoints/GAN_INIT_{}_EPID_{}.h5'.format(CONFIG.gen_model, n_epoch_init + resume_epoch))
			
			
			
			print('##	adversarial learning (G, D)')
				
			for epoch in range(n_epoch):

				for step, (lr_patchs, hr_patchs) in enumerate(train_ds):
					if lr_patchs.shape[0] != batch_size: # if the remaining data in this epoch < batch_size
						break
					step_time = time.time()
					with tf1.GradientTape(persistent=True) as tape:
		      
						#out_bicu = generate_bicubic_samples(np.squeeze(lr_patchs,axis=3), CONFIG.scale)
						out_bicu = generate_bicubic_samples(lr_patchs.numpy(), CONFIG)
						#print( lr_patchs.shape, out_bicu.shape)
						fake_patchs = gen_model([lr_patchs, out_bicu])

						logits_fake = dis_model(fake_patchs)
						logits_real = dis_model(hr_patchs)

						feature_fake = VGG((fake_patchs+1)/2.) # the pre-trained VGG uses the input range of [0, 1]
						feature_real = VGG((hr_patchs+1)/2.)
		      
						d_loss1 = tl.cost.sigmoid_cross_entropy(logits_real, tf1.ones_like(logits_real))
						d_loss2 = tl.cost.sigmoid_cross_entropy(logits_fake, tf1.zeros_like(logits_fake))
						d_loss = d_loss1 + d_loss2
			      
						g_gan_loss = 1e-3 * tl.cost.sigmoid_cross_entropy(logits_fake, tf1.ones_like(logits_fake))
						mse_loss = tl.cost.mean_squared_error(fake_patchs, hr_patchs, is_mean=True)

						vgg_loss = 2e-6 * tl.cost.mean_squared_error(feature_fake, feature_real, is_mean=True)
						g_loss = mse_loss + vgg_loss + g_gan_loss

					grad = tape.gradient(g_loss, gen_model.trainable_weights)
					g_optimizer.apply_gradients(zip(grad, gen_model.trainable_weights))
					grad = tape.gradient(d_loss, dis_model.trainable_weights)
					d_optimizer.apply_gradients(zip(grad, dis_model.trainable_weights))
					print("Epoch: [{}/{}] step: [{}/{}] time: {:.3f}s, g_loss(mse:{:.6f}, vgg:{:.6f}, adv:{:.6f}) d_loss: {:.6f}".format(
					epoch+1+resume_epoch, resume_epoch + n_epoch, step+1, CONFIG.no_of_batches, time.time() - step_time, mse_loss, vgg_loss, g_gan_loss, d_loss))
					
				# update the learning rate
				'''if (epoch+resume_epoch) % decay_every == 0:
					new_lr_decay = lr_decay**((epoch+resume_epoch)// decay_every)
					lr_v.assign(lr_init * new_lr_decay)
					log = " ** new learning rate: %f (for GAN)" % (lr_init * new_lr_decay)
					print(log)
                		'''
				if (epoch+1+resume_epoch)%  CONFIG.save_interval == 0:
					gen_model.save_weights('Checkpoints/GAN_{}_EPID_{}.h5'.format(CONFIG.gen_model, epoch+1+resume_epoch))
					dis_model.save_weights('Checkpoints/DIS_{}_GAN_{}_EPID_{}.h5'.format(CONFIG.dis_model, CONFIG.gen_model, epoch+1+resume_epoch))
					print("Save time: {}content".format(time.asctime( time.localtime(time.time()))))
					for i in range(CONFIG.batch_size):
						if CONFIG.gen_model==1:
							lrimg = np.squeeze(lr_patchs[i], axis =-1)
							lrimg = np.pad(lrimg, ((64, 64), (64, 64)), constant_values=(255.0))
							#opimg = cast_uint8(fake_patchs[i].numpy())
							opimg = fake_patchs[i].numpy()
							combine_imgs = np.concatenate((lrimg[:,:,np.newaxis], out_bicu[i], opimg, hr_patchs[i]), axis = 1)
						else:
							lrimg = np.pad(lr_patchs[i], ((192, 192), (192, 192), (0, 0)), constant_values=(255.0))
							#opimg = cast_uint8(fake_patchs[i].numpy())
							opimg = fake_patchs[i].numpy()
							combine_imgs = np.concatenate((lrimg, out_bicu[i], opimg, hr_patchs[i]), axis = 1)
						path = 'Training_outputs/id_{}_gan_{}_train_{}.png'.format(i+1, CONFIG.gen_model, epoch+1+resume_epoch)
						tl.vis.save_image(combine_imgs,path)

			gen_model.save_weights('Checkpoints/GAN_{}_FINAL.h5'.format(CONFIG.gen_model))
			dis_model.save_weights('Checkpoints/DIS_{}_GAN_{}_FINAL.h5'.format(CONFIG.dis_model, CONFIG.gen_model))
					

				  

		elif CONFIG.mode==2:	## Validation

			model = get_model('G', CONFIG.gen_model)
			model.load_weights('Checkpoints/GAN_{}_EPID_{}.h5'.format(CONFIG.gen_model, CONFIG.model_epoch))
			model.eval()  ## disable dropout, batch norm moving avg ...

			save_time = time.time()
			
			## Reading Validation dataset
			lrimg_file_list = tl.files.load_file_list(path=CONFIG.dir_val_in, regx='.*.png', printable=False)
			hrimg_file_list = tl.files.load_file_list(path=CONFIG.dir_val_target, regx='.*.png', printable=False)
			lrimg_file_list.sort(key=tl.files.natural_keys)
			hrimg_file_list.sort(key=tl.files.natural_keys)
			lrimg_list = np.array(tl.vis.read_images(lrimg_file_list, path=CONFIG.dir_val_in, n_threads=32))
			hrimg_list = np.array(tl.vis.read_images(hrimg_file_list, path=CONFIG.dir_val_target, n_threads=32)) 
			
			if CONFIG.gen_model==1:
				lrimg_list = lrimg_list[:,:,:,np.newaxis] 
				hrimg_list = hrimg_list[:,:,:,np.newaxis]

			bcimg_list = generate_bicubic_samples(lrimg_list,CONFIG)
			opimg_list = model([tf1.cast(lrimg_list,tf1.float32), tf1.cast(bcimg_list,tf1.float32)]) 
			opimg_list = opimg_list.numpy()

			bicubic_psnr, model_psnr = PSNR (hrimg_list , bcimg_list, opimg_list)
			bicubic_ssim, model_ssim = SSIM (hrimg_list , bcimg_list, opimg_list)
			
			for i in range(lrimg_list.shape[0]):
				name= lrimg_file_list[i].split('/')[-1].split('.')[0]
				if CONFIG.gen_model==1:
					lrimg = np.pad(lrimg_list[i], ((64, 64), (64, 64),(0, 0)), constant_values=(255.0))
				else:
					lrimg = np.pad(lrimg_list[i], ((192, 192), (192, 192), (0, 0)), constant_values=(255.0))
				combine_imgs = np.concatenate((lrimg, bcimg_list[i], opimg_list[i], hrimg_list[i]), axis = 1)

				path = 'Validation_outputs/{}_gan_{}_val_{}.png'.format(name, CONFIG.gen_model, CONFIG.model_epoch)
				tl.vis.save_image(combine_imgs, path)
            
			print(np.stack((model_psnr, bicubic_psnr), axis=-1))
			print(np.stack((model_ssim, bicubic_ssim), axis=-1))
			print(np.subtract(model_psnr, bicubic_psnr))
			print('SUM(PSNR DIFF): {}'.format(np.sum(np.subtract(model_psnr, bicubic_psnr))))
			print('AVG MODEL PSNR: {}, AVG BICUBIC PSNR: {}'.format(np.sum(model_psnr)/lrimg_list.shape[0], np.sum(bicubic_psnr)/lrimg_list.shape[0]))
			print('SUM(SSIM DIFF): {}'.format(np.sum(np.subtract(model_ssim, bicubic_ssim))))
			print('AVG MODEL SSIM: {}, AVG BICUBIC SSIM: {}'.format(np.sum(model_ssim)/lrimg_list.shape[0], np.sum(bicubic_ssim)/lrimg_list.shape[0]))
			print((time.time()-save_time)/10)
            loss = criterion(preds, labels)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            loss_sum += loss.item()

        print("TRAIN: EPOCH %04d / %04d | LOSS %.4f" %
              (epoch, args.num_epochs, loss_sum / 64))

        model.eval()

        for data in eval_dataloader:
            inputs, labels = data

            inputs = inputs.to(device)
            labels = labels.to(device)

            with torch.no_grad():
                preds = model(inputs).clamp(0.0, 1.0)

            psnr = PSNR(preds, labels)

            print("Eval_PSNR = {}".format(psnr))

        if (epoch + 1) % 10 == 0:
            torch.save(
                model.state_dict(),
                os.path.join(args.outputs_dir, 'epoch_{}.pth'.format(epoch)))
def train():
    if not os.path.exists(Config['checkpoint_path']):
        os.makedirs(Config['checkpoint_path'])
        os.makedirs(os.path.join(Config['checkpoint_path'], 'generators'))
        os.makedirs(os.path.join(Config['checkpoint_path'], 'discriminators'))

    device = torch.device('cuda:0' if torch.cuda.is_available()
                          and Config['use_cuda'] else 'cpu')

    # print(Config['img_size']*Config['scale'])
    transform = torchvision.transforms.Compose([
        torchvision.transforms.RandomCrop(
            (Config['img_size'][0] * Config['scale'],
             Config['img_size'][1] * Config['scale'])),
        torchvision.transforms.ToTensor()
    ])

    normalize = torchvision.transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                                 std=[0.229, 0.224, 0.225])
    #print(Config['img_size'])
    scale = torchvision.transforms.Compose([
        torchvision.transforms.ToPILImage(),
        torchvision.transforms.Resize(Config['img_size']),
        torchvision.transforms.ToTensor(),
        torchvision.transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                         std=[0.229, 0.224, 0.225])
    ])
    dataset = torchvision.datasets.ImageFolder(root=Config['train_set_path'],
                                               transform=transform)

    data_loader = torch.utils.data.DataLoader(
        dataset,
        batch_size=Config['batch_size'],
        shuffle=True,
        num_workers=Config['num_workers'])

    generator = Generator(Config)
    if Config['generator_checkpoint']:
        generator.load_state_dict(torch.load(Config['generator_checkpoint']))

    discriminator = Discriminator(Config)
    if Config['discriminator_checkpoint']:
        discriminator.load_state_dict(
            torch.load(Config['discriminator_checkpoint']))

    feat_extractor = get_feat_extractor()

    content_loss = nn.MSELoss()
    adversarial_loss = nn.BCELoss()

    ones_const = torch.autograd.Variable(torch.ones(Config['batch_size'], 1))

    generator.to(device)
    discriminator.to(device)
    feat_extractor.to('cpu')
    ones_const.to(device)

    opt_generator = torch.optim.Adam(generator.parameters(),
                                     lr=Config['generator_lr'])
    opt_discriminator = torch.optim.Adam(discriminator.parameters(),
                                         lr=Config['discriminator_lr'])

    if Config['tensorboard_log']:
        writer_pretrain = SummaryWriter(
            os.path.join(Config['checkpoint_path'], 'pretrain'))
        writer = SummaryWriter(Config['checkpoint_path'])

    low_res = torch.FloatTensor(Config['batch_size'], 3, Config['img_size'][0],
                                Config['img_size'][1])

    for epoch in range(int(Config['batch_size'] * 0.3)):

        mean_generator_content_loss = 0.0

        for i, data in enumerate(data_loader):
            high_res_real, _ = data

            if high_res_real.shape[0] != Config['batch_size']: continue
            for j in range(Config['batch_size']):
                low_res[j] = scale(high_res_real[j])
                high_res_real[j] = normalize(high_res_real[j])

            high_res_real = torch.autograd.Variable(high_res_real.to(device))
            high_res_fake = generator(
                torch.autograd.Variable(low_res).to(device))

            generator.zero_grad()
            #print(high_res_fake.shape, high_res_real.shape)
            generator_content_loss = content_loss(high_res_fake, high_res_real)
            #print(generator_content_loss)
            mean_generator_content_loss += generator_content_loss.data

            generator_content_loss.backward()
            opt_generator.step()
            print(
                f'Epoch {epoch} Iter {i/len(data_loader)}: MSE Loss {generator_content_loss.data}'
            )
            writer.add_figure('Pretrain SR Generator',
                              visualize(low_res,
                                        high_res_real.cpu().data,
                                        high_res_fake.cpu().data),
                              global_step=epoch)
        writer.add_scalar('generator_mse_loss',
                          mean_generator_content_loss / len(data_loader),
                          epoch)
    torch.save(
        generator.state_dict(),
        os.path.join(Config['checkpoint_path'],
                     'generators/generator_pretrain.pth'))

    opt_generator = torch.optim.Adam(generator.parameters(),
                                     lr=Config['generator_lr'] * 0.1)
    opt_discriminator = torch.optim.Adam(discriminator.parameters(),
                                         lr=Config['discriminator_lr'] * 0.1)

    for epoch in range(Config['epochs']):
        mean_generator_content_loss = 0.0
        mean_generator_adversarial_loss = 0.0
        mean_generator_total_loss = 0.0
        mean_discriminator_loss = 0.0

        for i, data in enumerate(data_loader):
            high_res_real, _ = data
            psnrs = []
            ssims = []

            for j in range(Config['batch_size']):
                low_res[j] = scale(high_res_real[j])
                high_res_real[j] = normalize(high_res_real[j])

            low_res.to(device)
            high_res_real = torch.autograd.Variable(high_res_real.to(device))
            target_real = torch.autograd.Variable(
                torch.rand(Config['batch_size'], 1) * 0.5 + 0.7).to(device)
            target_fake = torch.autograd.Variable(
                torch.rand(Config['batch_size'], 1) * 0.3).to(device)
            high_res_fake = generator(
                torch.autograd.Variable(low_res).to(device))

            discriminator.zero_grad()
            discriminator_loss = adversarial_loss(
                discriminator(high_res_real), target_real) + adversarial_loss(
                    discriminator(high_res_fake), target_fake)

            mean_discriminator_loss += discriminator_loss.data

            discriminator_loss.backward(retain_graph=True)
            opt_discriminator.step()

            generator.zero_grad()

            real_features = torch.autograd.Variable(
                feat_extractor(high_res_real.to('cpu')).to(device).data)
            fake_features = feat_extractor(high_res_fake.to('cpu')).to(device)

            generator_content_loss = content_loss(
                high_res_fake, high_res_real) + 0.006 * content_loss(
                    fake_features, real_features)
            mean_generator_content_loss += generator_content_loss.data

            generator_adversarial_loss = adversarial_loss(
                discriminator(high_res_fake), ones_const.to(device))
            mean_generator_adversarial_loss += generator_adversarial_loss.data

            generator_total_loss = generator_content_loss + 1e-3 * generator_adversarial_loss
            mean_generator_total_loss += generator_total_loss.data

            generator_total_loss.backward()
            opt_generator.step()

            for j in range(Config['batch_size']):
                psnrs.append(PSNR()(high_res_real[j], high_res_fake[j]))
                ssims.append(SSIM()(high_res_real[j], high_res_fake[j]))

            print(
                f'Epoch {epoch} Iter {i/len(data_loader)}: Discriminator Loss {discriminator_loss.data}, Generator Loss: {generator_content_loss.data}/{generator_adversarial_loss.data}/{generator_total_loss.data}'
            )
            writer.add_figure('Training Super Resolution',
                              visualize(low_res,
                                        high_res_real.cpu().data,
                                        high_res_fake.cpu().data),
                              global_step=epoch)
        psnr = np.array(psnrs).mean()
        ssim = np.array(ssims).mean()
        writer.add_scalar('discriminator_loss',
                          mean_discriminator_loss / len(data_loader),
                          global_step=epoch)
        writer.add_scalar('generator_content_loss',
                          mean_generator_content_loss / len(data_loader),
                          global_step=epoch)
        writer.add_scalar('generator_adversarial_loss',
                          mean_generator_adversarial_loss / len(data_loader),
                          global_step=epoch)
        writer.add_scalar('generator_total_loss',
                          mean_generator_total_loss / len(data_loader),
                          global_step=epoch)
        writer.add_scalar('PSNR', psnr, global_step=epoch)
        writer.add_scalar('SSIM', ssim, global_step=epoch)
    torch.save(
        generator.state_dict(),
        os.path.join(Config['checkpoint_path'],
                     'generators/generator_final.pth'))
    torch.save(
        discriminator.state_dict(),
        os.path.join(Config['checkpoint_path'],
                     'discriminators/discriminator_final.pth'))
Exemple #17
0
            loss = criterion(preds, labels)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            loss_arr += [loss.item()]

        print("TRAIN: EPOCH %04d / %04d | LOSS %.4f" %
              (epoch, 400, np.mean(loss_arr)))

        with torch.no_grad():

            model.eval()

            for data in eval_dataloader:
                inputs, labels = data

                inputs = inputs.to(device)
                labels = labels.to(device)

                preds1 = model(inputs).clamp(0.0, 1.0)
                psnr1 = PSNR(preds1, labels)
                print("Eval PSNR: {: .3f}".format(psnr1))

        if epoch % 50 == 0:
            torch.save(
                model.state_dict(),
                os.path.join(args.outputs_dir, 'epoch_{}.pth'.format(epoch)))
Exemple #18
0
                batch_out_y = np.copy(batch_out)[:, :, 0]
            # RGB to Y
            else:
                img_gt_y = _rgb2ycbcr(np.copy(img_gt))[:, :, 0]
                batch_out_y = _rgb2ycbcr(np.copy(batch_out))[:, :, 0]

            # Save to file
            out_fn = files_gt[i].split("/")[-1][:-4]
            Image.fromarray(batch_out).save('./output/Bic/{}/{}.png'.format(
                testset, out_fn))

            # Evaluation
            CROP_S = 4

            # PSNR
            psnrs.append(PSNR(img_gt_y, batch_out_y, CROP_S))

            # SSIM
            ssims.append(
                skimage.metrics.structural_similarity(
                    img_gt_y,
                    batch_out_y,
                    data_range=255,
                    multichannel=False,
                    gaussian_weights=True,
                    sigma=1.5,
                    use_sample_covariance=False))

            # LPIPS
            img_gt_t = im2tensor(img_gt)
            batch_out_t = im2tensor(batch_out)
def train():
    if not os.path.exists(Config['checkpoint_path']):
        os.makedirs(Config['checkpoint_path'])
        os.makedirs(os.path.join(Config['checkpoint_path'], 'generators'))
        os.makedirs(os.path.join(Config['checkpoint_path'], 'discriminators'))

    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')

    dataset = BGRSRDataset(
        os.path.join(Config['train_set_path'], 'images'),
        os.path.join(Config['train_set_path'], 'backgrounds'))

    data_loader = torch.utils.data.DataLoader(
        dataset,
        batch_size=Config['batch_size'],
        shuffle=True,
        num_workers=Config['num_workers'])
    train_data_loader = torch.utils.data.DataLoader(
        dataset,
        batch_size=Config['batch_size'] // 2,
        shuffle=True,
        num_workers=Config['num_workers'])
    generator = Generator()
    if Config['generator_checkpoint']:
        generator.load_state_dict(torch.load(Config['generator_checkpoint']))

    discriminator = Discriminator()
    if Config['discriminator_checkpoint']:
        discriminator.load_state_dict(
            torch.load(Config['discriminator_checkpoint']))

    feat_extractor = get_feat_extractor()

    background_replacement_loss = nn.MSELoss()
    content_loss = nn.MSELoss()
    adversarial_loss = nn.BCELoss()

    ones_const = torch.autograd.Variable(torch.ones(Config['batch_size'], 1))

    generator.to(device)
    discriminator.to(device)
    generator.train()
    discriminator.train()
    feat_extractor.to('cpu')
    ones_const.to(device)

    opt_generator = torch.optim.Adam(generator.parameters(),
                                     lr=Config['generator_lr'])
    # opt_discriminator = torch.optim.Adam(discriminator.parameters(), lr=Config['discriminator_lr'])

    if Config['tensorboard_log']:
        writer_pretrain = SummaryWriter(
            os.path.join(Config['checkpoint_path'], 'pretrain'))
        writer = SummaryWriter(Config['checkpoint_path'])

    low_res = torch.FloatTensor(Config['batch_size'], 6, Config['img_size'][0],
                                Config['img_size'][1])
    for epoch in range(int(Config['batch_size'] * 0.1)):
        mean_generator_content_loss = 0.0
        mean_background_replacement_loss = 0.0
        for i, data in enumerate(data_loader):
            input_images, background_images, bgrep_images, output_images = data

            if input_images.shape[0] != Config['batch_size']: continue
            inputs = torch.cat([input_images, background_images], 1)

            high_res_real = torch.autograd.Variable(output_images.to(device))
            low_res_fake, high_res_fake = generator(
                torch.autograd.Variable(inputs.to(device)))

            #            print(low_res_fake, high_res_fake)
            generator.zero_grad()

            bgr_content_loss = background_replacement_loss(
                low_res_fake, bgrep_images.to(device))
            gen_content_loss = content_loss(high_res_fake,
                                            output_images.to(device))
            total_gen_loss = bgr_content_loss + gen_content_loss

            mean_generator_content_loss += gen_content_loss.data
            mean_background_replacement_loss += bgr_content_loss.data

            total_gen_loss.backward()
            opt_generator.step()

            print(
                f'Epoch {epoch} Iter {i,len(data_loader)}: BGR Loss:{bgr_content_loss.data}, SR Loss:{gen_content_loss.data}'
            )
            writer.add_figure('Pretrain BGRSR Generator',
                              visualize(input_images, background_images,
                                        low_res_fake, high_res_fake,
                                        output_images),
                              global_step=epoch)
        writer.add_scalar('background_replacement_loss',
                          mean_generator_content_loss / len(data_loader),
                          epoch)
        writer.add_scalar('sr_content_loss',
                          mean_generator_content_loss / len(data_loader),
                          epoch)
    torch.save(
        generator.state_dict(),
        os.path.join(Config['checkpoint_path'],
                     'generators/generator_pretrain.pth'))

    opt_generator = torch.optim.Adam(generator.parameters(),
                                     lr=Config['generator_lr'])
    opt_discriminator = torch.optim.Adam(discriminator.parameters(),
                                         lr=Config['discriminator_lr'])

    ones_const_train = torch.autograd.Variable(
        torch.ones(Config['batch_size'] // 2, 1))
    #data_loader.batch_size = data_loader.batch_size//2
    for epoch in range(Config['epochs']):
        mean_generator_replacement_loss = 0.0
        mean_generator_content_loss = 0.0
        mean_generator_adversarial_loss = 0.0
        mean_generator_total_loss = 0.0
        mean_discriminator_loss = 0.0
        mean_background_replacement_loss = 0.0
        for i, data in enumerate(train_data_loader):
            input_images, background_images, bgrep_images, output_images = data
            psnrs = []
            ssims = []

            output_images = torch.autograd.Variable(output_images.to(device))
            target_real = torch.autograd.Variable(
                torch.rand(Config['batch_size'] // 2, 1) * 0.5 +
                0.7).to(device)
            target_fake = torch.autograd.Variable(
                torch.rand(Config['batch_size'] // 2, 1) * 0.3).to(device)

            inputs = torch.cat([input_images, background_images], 1)
            inputs = inputs.to(device)
            low_res_fake, high_res_fake = generator(
                torch.autograd.Variable(inputs))

            discriminator.zero_grad()
            discriminator_loss = adversarial_loss(
                discriminator(output_images), target_real) + adversarial_loss(
                    discriminator(high_res_fake), target_fake)

            mean_discriminator_loss += discriminator_loss.data

            discriminator_loss.backward(retain_graph=True)
            opt_discriminator.step()

            generator.zero_grad()

            real_features = torch.autograd.Variable(
                feat_extractor(output_images.to('cpu')).to(device).data)
            fake_features = feat_extractor(high_res_fake.to('cpu')).to(device)

            generator_content_loss = content_loss(
                high_res_fake, output_images.to(device)
            ) + 0.006 * content_loss(fake_features, real_features)
            mean_generator_content_loss += generator_content_loss.data

            #real_bgrep_features = torch.autograd.Variable(feat_extractor(bgrep_images.to('cpu')).to(device).data)
            #fake_bgrep_features = feat_extractor(low_res_fake.to('cpu')).to(device)

            bgr_content_loss = background_replacement_loss(
                low_res_fake, bgrep_images.to(device)
            )  # + 0.006 * content_loss(fake_bgrep_features, real_bgrep_features)
            mean_background_replacement_loss += bgr_content_loss.to(device)

            generator_adversarial_loss = adversarial_loss(
                discriminator(high_res_fake), ones_const_train.to(device))
            mean_generator_adversarial_loss += generator_adversarial_loss.data

            generator_total_loss = generator_content_loss + 1e-3 * generator_adversarial_loss

            mean_generator_total_loss += generator_total_loss.data

            generator_total_loss.backward()
            opt_generator.step()

            for j in range(Config['batch_size'] // 2):
                img1 = output_images[j].detach().cpu().numpy().transpose(
                    1, 2, 0)
                img2 = high_res_fake[j].detach().cpu().numpy().transpose(
                    1, 2, 0)
                psnrs.append(PSNR()(output_images[j],
                                    high_res_fake[j]).data.cpu().numpy())
                ssims.append(
                    compare_ssim(img1, img2, gradient=False,
                                 multichannel=True))

            print(
                f'Epoch {epoch} Iter {i/len(train_data_loader)}: Discriminator Loss {discriminator_loss.data}, BG Replacement Loss: {bgr_content_loss.data/generator_content_loss.data/generator_adversarial_loss.data/generator_total_loss.data}'
            )
            writer.add_figure(
                'Training BGRSRGAN',
                visualize(input_images, background_images, low_res_fake,
                          high_res_fake, output_images), epoch)
        psnr = np.array(psnrs).mean()
        ssim = np.array(ssims).mean()
        writer.add_scalar('discriminator_loss',
                          mean_discriminator_loss / len(train_data_loader),
                          epoch)
        writer.add_scalar(
            'background_replacement_loss',
            mean_background_replacement_loss / len(train_data_loader), epoch)
        writer.add_scalar('generator_content_loss',
                          mean_generator_content_loss / len(train_data_loader),
                          epoch)
        writer.add_scalar(
            'generator_adversarial_loss',
            mean_generator_adversarial_loss / len(train_data_loader), epoch)
        writer.add_scalar('generator_total_loss',
                          generator_total_loss / len(train_data_loader), epoch)
        writer.add_scalar('PSNR', psnr, epoch)
        writer.add_scalar('SSIM', ssim, epoch)

        if (epoch + 1) % 5 == 0:
            torch.save(
                generator.state_dict,
                os.path.join(Config['checkpoint_path'],
                             'generators/generator_' + str(epoch + 1) +
                             '.pth'))
            torch.save(
                discriminator.state_dict,
                os.path.join(
                    Config['checkpoint_path'],
                    'discriminators/discriminator_' + str(epoch + 1) + '.pth'))
    torch.save(
        generator.state_dict,
        os.path.join(Config['checkpoint_path'],
                     'generators/generator_' + str(epoch + 1) + '.pth'))
    torch.save(
        discriminator.state_dict,
        os.path.join(Config['checkpoint_path'],
                     'discriminators/discriminator_' + str(epoch + 1) +
                     '.pth'))
def run():
    torch.ops.load_library("./Renderer.dll")

    #########################
    # CONFIGURATION
    #########################

    if 1:
        OUTPUT_FOLDER = "../result-stats/adaptiveDvr3/"
        DATASET_PREFIX = "D:/VolumeSuperResolution-InputData/"
        DATASETS = [
            ("Ejecta", "gt-dvr-ejecta6-test.hdf5",
             "gt-dvr-ejecta6-test-screen8x.hdf5"),
            ("RM", "gt-dvr-rm1-test.hdf5", "gt-dvr-rm1-test-screen8x.hdf5"),
            ("Thorax", "gt-dvr-thorax2-test.hdf5",
             "gt-dvr-thorax2-test-screen8x.hdf5"),
        ]

        NETWORK_DIR = "D:/VolumeSuperResolution/adaptive-dvr-modeldir/"
        NETWORKS = [  #suffixed with _importance.pt and _recon.pt
            #title, file prefix
            #("v5-temp001", "adapDvr5-rgb-temp001-epoch300"),
            #("v5-temp010", "adapDvr5-rgb-temp010-epoch300"),
            #("v5-temp100", "adapDvr5-rgb-temp100-epoch300"),
            #("v5-temp001-perc", "adapDvr5-rgb-temp001-perc01-epoch300"),
            ("v5-perc01+bn", "adapDvr5-rgb-perc01-bn-epoch500"),
            ("v5-perc01-bn", "adapDvr5-rgb-temp001-perc01-epoch500")
        ]

        # Test if it is better to post-train with dense networks and PDE inpainting
        POSTTRAIN_NETWORK_DIR = "D:/VolumeSuperResolution/adaptive-dvr-modeldir/"
        POSTTRAIN_NETWORKS = [
            # title, file suffix to POSTTRAIN_NETWORK_DIR, inpainting {'fast', 'pde'}
            #("Enhance PDE (post)", "inpHv2-pde05-epoch200.pt", "pde")
            ("v6pr2-noTemp",
             "ejecta6pr2-plastic05-lpips-noTempCon-epoch500_recon.pt", "fast",
             False),
            ("v6pr2-tl2-100",
             "ejecta6pr2-plastic05-lpips-tl2-100-epoch500_recon.pt", "fast",
             True)
        ]

        SAMPLING_FILE = "D:/VolumeSuperResolution-InputData/samplingPattern.hdf5"
        SAMPLING_PATTERNS = ['plastic']

        HEATMAP_MIN = [0.002]
        HEATMAP_MEAN = [
            0.02, 0.05, 0.1, 0.2
        ]  #[0.01, 0.02, 0.03, 0.04, 0.06, 0.08, 0.1, 0.2, 0.3, 0.5, 0.8, 1.0]
        USE_BINARY_SEARCH_ON_MEAN = True

        UPSCALING = 8  # = networkUp * postUp

        IMPORTANCE_BORDER = 8
        LOSS_BORDER = 32
        BATCH_SIZE = 1  #2

    #########################
    # LOADING
    #########################

    device = torch.device("cuda")

    # Load Networks
    IMPORTANCE_BASELINE1 = "ibase1"
    IMPORTANCE_BASELINE2 = "ibase2"
    RECON_BASELINE = "rbase"

    # load importance model
    print("load importance networks")

    class ImportanceModel:
        def __init__(self, file):
            if file == IMPORTANCE_BASELINE1:
                self._net = importance.UniformImportanceMap(1, 0.5)
                self._upscaling = 1
                self._name = "constant"
                self.disableTemporal = True
                self._requiresPrevious = False
            elif file == IMPORTANCE_BASELINE2:
                self._net = importance.GradientImportanceMap(
                    1, (0, 1), (1, 1), (2, 1))
                self._upscaling = 1
                self._name = "curvature"
                self.disableTemporal = True
                self._requiresPrevious = False
            else:
                self._name = file[0]
                file = os.path.join(NETWORK_DIR, file[1] + "_importance.pt")
                extra_files = torch._C.ExtraFilesMap()
                extra_files['settings.json'] = ""
                self._net = torch.jit.load(file,
                                           map_location=device,
                                           _extra_files=extra_files)
                settings = json.loads(extra_files['settings.json'])
                self._upscaling = settings['networkUpscale']
                self._requiresPrevious = settings.get("requiresPrevious",
                                                      False)
                self.disableTemporal = settings.get("disableTemporal", True)

        def networkUpscaling(self):
            return self._upscaling

        def name(self):
            return self._name

        def __repr__(self):
            return self.name()

        def call(self, input, prev_warped_out):
            if self._requiresPrevious:
                input = torch.cat([
                    input,
                    models.VideoTools.flatten_high(prev_warped_out,
                                                   self._upscaling)
                ],
                                  dim=1)
            input = F.pad(input, [IMPORTANCE_BORDER] * 4, 'constant', 0)
            output = self._net(input)  # the network call
            output = F.pad(output, [-IMPORTANCE_BORDER * self._upscaling] * 4,
                           'constant', 0)
            return output

    importanceBaseline1 = ImportanceModel(IMPORTANCE_BASELINE1)
    importanceBaseline2 = ImportanceModel(IMPORTANCE_BASELINE2)
    importanceModels = [ImportanceModel(f) for f in NETWORKS]

    # load reconstruction networks
    print("load reconstruction networks")

    class ReconstructionModel:
        def __init__(self, file):
            if file == RECON_BASELINE:

                class Inpainting(nn.Module):
                    def forward(self, x, mask):
                        input = x[:, 0:4, :, :].contiguous(
                        )  # rgba, don't use normal xyz, depth
                        mask = x[:, 8, :, :].contiguous()
                        return torch.ops.renderer.fast_inpaint(mask, input)

                self._net = Inpainting()
                self._upscaling = 1
                self._name = "inpainting"
                self.disableTemporal = True
            else:
                self._name = file[0]
                file = os.path.join(NETWORK_DIR, file[1] + "_recon.pt")
                extra_files = torch._C.ExtraFilesMap()
                extra_files['settings.json'] = ""
                self._net = torch.jit.load(file,
                                           map_location=device,
                                           _extra_files=extra_files)
                self._settings = json.loads(extra_files['settings.json'])
                self.disableTemporal = False
                requiresMask = self._settings.get('expectMask', False)
                if self._settings.get("interpolateInput", False):
                    self._originalNet = self._net

                    class Inpainting2(nn.Module):
                        def __init__(self, orignalNet, requiresMask):
                            super().__init__()
                            self._n = orignalNet
                            self._requiresMask = requiresMask

                        def forward(self, x, mask):
                            input = x[:, 0:8, :, :].contiguous(
                            )  # rgba, normal xyz, depth
                            mask = x[:, 8, :, :].contiguous()
                            inpainted = torch.ops.renderer.fast_inpaint(
                                mask, input)
                            x[:, 0:8, :, :] = inpainted
                            if self._requiresMask:
                                return self._n(x, mask)
                            else:
                                return self._n(x)

                    self._net = Inpainting2(self._originalNet, requiresMask)

        def name(self):
            return self._name

        def __repr__(self):
            return self.name()

        def call(self, input, mask, prev_warped_out):
            input = torch.cat([input, prev_warped_out], dim=1)
            output = self._net(input, mask)
            return output

    class ReconstructionModelPostTrain:
        """
        Reconstruction model that are trained as dense reconstruction networks
        after the adaptive training.
        They don't recive the sampling mask as input, but can start with PDE-based inpainting
        """
        def __init__(self, name: str, model_path: str, inpainting: str,
                     has_temporal: bool):
            assert inpainting == 'fast' or inpainting == 'pde', "inpainting must be either 'fast' or 'pde', but got %s" % inpainting
            self._inpainting = inpainting

            self._name = name
            file = os.path.join(POSTTRAIN_NETWORK_DIR, model_path)
            extra_files = torch._C.ExtraFilesMap()
            extra_files['settings.json'] = ""
            self._net = torch.jit.load(file,
                                       map_location=device,
                                       _extra_files=extra_files)
            self._settings = json.loads(extra_files['settings.json'])
            assert self._settings.get(
                'upscale_factor', None) == 1, "selected file is not a 1x SRNet"
            self.disableTemporal = not has_temporal

        def name(self):
            return self._name

        def __repr__(self):
            return self.name()

        def call(self, input, mask, prev_warped_out):
            # no sampling and no AO
            input_no_sampling = input[:, 0:8, :, :].contiguous(
            )  # rgba, normal xyz, depth
            sampling_mask = mask[:, 0, :, :].contiguous()
            # perform inpainting
            if self._inpainting == 'pde':
                inpainted = torch.ops.renderer.pde_inpaint(
                    sampling_mask,
                    input_no_sampling,
                    200,
                    1e-4,
                    5,
                    2,  # m0, epsilon, m1, m2
                    0,  # mc -> multigrid recursion count. =0 disables the multigrid hierarchy
                    9,
                    0)  # ms, m3
            else:
                inpainted = torch.ops.renderer.fast_inpaint(
                    sampling_mask, input_no_sampling)
            # run network
            if self.disableTemporal:
                prev_warped_out = torch.zeros_like(prev_warped_out)
            input = torch.cat([inpainted, prev_warped_out], dim=1)
            output = self._net(input)
            if isinstance(output, tuple):
                output = output[0]
            return output

    reconBaseline = ReconstructionModel(RECON_BASELINE)
    reconModels = [ReconstructionModel(f) for f in NETWORKS]
    reconPostModels = [
        ReconstructionModelPostTrain(name, file, inpainting, has_temporal)
        for (name, file, inpainting, has_temporal) in POSTTRAIN_NETWORKS
    ]
    allReconModels = reconModels + reconPostModels

    NETWORK_COMBINATIONS = \
        [(importanceBaseline1, reconBaseline), (importanceBaseline2, reconBaseline)] + \
        [(importanceBaseline1, reconNet) for reconNet in allReconModels] + \
        [(importanceBaseline2, reconNet) for reconNet in allReconModels] + \
        [(importanceNet, reconBaseline) for importanceNet in importanceModels] + \
        list(zip(importanceModels, reconModels)) + \
        [(importanceNet, reconPostModel) for importanceNet in importanceModels for reconPostModel in reconPostModels]
    #NETWORK_COMBINATIONS = list(zip(importanceModels, reconModels))
    print("Network combinations:")
    for (i, r) in NETWORK_COMBINATIONS:
        print("  %s - %s" % (i.name(), r.name()))

    # load sampling patterns
    print("load sampling patterns")
    with h5py.File(SAMPLING_FILE, 'r') as f:
        sampling_pattern = dict([(name, torch.from_numpy(f[name][...]).to(device)) \
            for name in SAMPLING_PATTERNS])

    #heatmap
    HEATMAP_CFG = [(min, mean) for min in HEATMAP_MIN for mean in HEATMAP_MEAN
                   if min < mean]
    print("heatmap configs:", HEATMAP_CFG)

    #########################
    # DEFINE STATISTICS
    #########################
    ssimLoss = SSIM(size_average=False)
    ssimLoss.to(device)
    psnrLoss = PSNR()
    psnrLoss.to(device)
    lpipsColor = lpips.PerceptualLoss(model='net-lin',
                                      net='alex',
                                      use_gpu=True)
    MIN_FILLING = 0.05
    NUM_BINS = 200

    class Statistics:
        def __init__(self):
            self.histogram_color = np.zeros(NUM_BINS, dtype=np.float64)
            self.histogram_alpha = np.zeros(NUM_BINS, dtype=np.float64)
            self.histogram_counter = 0

        def create_datasets(self, hdf5_file: h5py.File, stats_name: str,
                            histo_name: str, num_samples: int,
                            extra_info: dict):

            self.stats_name = stats_name
            self.expected_num_samples = num_samples
            stats_shape = (num_samples, len(list(StatFieldDvr)))
            self.stats_file = hdf5_file.require_dataset(stats_name,
                                                        stats_shape,
                                                        dtype='f',
                                                        exact=True)
            self.stats_file.attrs['NumFields'] = len(list(StatFieldDvr))
            for field in list(StatFieldDvr):
                self.stats_file.attrs['Field%d' % field.value] = field.name
            for key, value in extra_info.items():
                self.stats_file.attrs[key] = value
            self.stats_index = 0

            histo_shape = (NUM_BINS, len(list(HistoFieldDvr)))
            self.histo_file = hdf5_file.require_dataset(histo_name,
                                                        histo_shape,
                                                        dtype='f',
                                                        exact=True)
            self.histo_file.attrs['NumFields'] = len(list(HistoFieldDvr))
            for field in list(HistoFieldDvr):
                self.histo_file.attrs['Field%d' % field.value] = field.name
            for key, value in extra_info.items():
                self.histo_file.attrs[key] = value

        def add_timestep_sample(self, pred_rgba, gt_rgba, sampling_mask):
            """
            adds a timestep sample:
            pred_rgba: prediction rgba
            gt_rgba: ground truth rgba
            """
            B = pred_rgba.shape[0]

            #apply border
            pred_rgba = pred_rgba[:, :, LOSS_BORDER:-LOSS_BORDER,
                                  LOSS_BORDER:-LOSS_BORDER]
            gt_rgba = gt_rgba[:, :, LOSS_BORDER:-LOSS_BORDER,
                              LOSS_BORDER:-LOSS_BORDER]

            # PSNR
            psnr_color = psnrLoss(pred_rgba[:, 0:3, :, :],
                                  gt_rgba[:, 0:3, :, :]).cpu().numpy()
            psnr_alpha = psnrLoss(pred_rgba[:, 3:4, :, :],
                                  gt_rgba[:, 3:4, :, :]).cpu().numpy()

            # SSIM
            ssim_color = ssimLoss(pred_rgba[:, 0:3, :, :],
                                  gt_rgba[:, 0:3, :, :]).cpu().numpy()
            ssim_alpha = ssimLoss(pred_rgba[:, 3:4, :, :],
                                  gt_rgba[:, 3:4, :, :]).cpu().numpy()

            # Perceptual
            lpips_color = torch.cat([ \
                lpipsColor(pred_rgba[b, 0:3, :, :], gt_rgba[b, 0:3, :, :], normalize=True) \
                    for b in range(B)], dim=0).cpu().numpy()

            # Samples
            samples = torch.mean(sampling_mask, dim=(1, 2, 3)).cpu().numpy()

            # Write samples to file
            for b in range(B):
                assert self.stats_index < self.expected_num_samples, "Adding more samples than specified"
                self.stats_file[self.stats_index, :] = np.array([
                    psnr_color[b], psnr_alpha[b], ssim_color[b], ssim_alpha[b],
                    lpips_color[b], samples[b]
                ],
                                                                dtype='f')
                self.stats_index += 1

            # Histogram
            self.histogram_counter += 1

            color_diff = F.l1_loss(gt_rgba[:, 0:3, :, :],
                                   pred_rgba[:, 0:3, :, :],
                                   reduction='none').sum(dim=0) / 6
            histogram, _ = np.histogram(color_diff.cpu().numpy(),
                                        bins=NUM_BINS,
                                        range=(0, 1),
                                        density=True)
            self.histogram_color += (
                histogram /
                (NUM_BINS * B) - self.histogram_color) / self.histogram_counter

            alpha_diff = F.l1_loss(gt_rgba[:, 3, :, :],
                                   pred_rgba[:, 3, :, :],
                                   reduction='none')
            histogram, _ = np.histogram(alpha_diff.cpu().numpy(),
                                        bins=NUM_BINS,
                                        range=(0, 1),
                                        density=True)
            self.histogram_alpha += (
                histogram /
                (NUM_BINS * B) - self.histogram_alpha) / self.histogram_counter

        def close_stats_file(self):
            self.stats_file.attrs['NumEntries'] = self.stats_index

        def write_histogram(self):
            """
            After every sample for the current dataset was processed, write
            a histogram of the errors in a new file
            """
            for i in range(NUM_BINS):
                self.histo_file[i, :] = np.array([
                    i / NUM_BINS, (i + 1) / NUM_BINS, self.histogram_color[i],
                    self.histogram_alpha[i]
                ])

    #########################
    # DATASET
    #########################
    class FullResDataset(torch.utils.data.Dataset):
        def __init__(self, file_high, file_low):
            self.hdf5_file_high = h5py.File(file_high, 'r')
            self.dset_high = self.hdf5_file_high['gt']
            self.hdf5_file_low = h5py.File(file_low, 'r')
            self.dset_low = self.hdf5_file_low['gt']
            print("Dataset shape:", self.dset_high.shape)

        def __len__(self):
            return self.dset_high.shape[0]

        def num_timesteps(self):
            return self.dset_high.shape[1]

        def __getitem__(self, idx):
            return self.dset_high[idx, ...], self.dset_low[idx, ...]

    #########################
    # COMPUTE STATS for each dataset
    #########################
    for dataset_name, dataset_file_high, dataset_file_low in DATASETS:
        dataset_file_high = os.path.join(DATASET_PREFIX, dataset_file_high)
        dataset_file_low = os.path.join(DATASET_PREFIX, dataset_file_low)
        print("Compute statistics for", dataset_name)

        # create output file
        os.makedirs(OUTPUT_FOLDER, exist_ok=True)
        output_file = os.path.join(OUTPUT_FOLDER, dataset_name + '.hdf5')
        print("Save to", output_file)
        with h5py.File(output_file, 'a') as output_hdf5_file:

            # load dataset
            set = FullResDataset(dataset_file_high, dataset_file_low)
            data_loader = torch.utils.data.DataLoader(set,
                                                      batch_size=BATCH_SIZE,
                                                      shuffle=False)

            # define statistics
            StatsCfg = collections.namedtuple(
                "StatsCfg", "stats importance recon heatmin heatmean pattern")
            statistics = []
            for (inet, rnet) in NETWORK_COMBINATIONS:
                for (heatmin, heatmean) in HEATMAP_CFG:
                    for pattern in SAMPLING_PATTERNS:
                        stats_info = {
                            'importance': inet.name(),
                            'reconstruction': rnet.name(),
                            'heatmin': heatmin,
                            'heatmean': heatmean,
                            'pattern': pattern
                        }
                        stats_filename = "Stats_%s_%s_%03d_%03d_%s" % (
                            inet.name(), rnet.name(), heatmin * 1000,
                            heatmean * 1000, pattern)
                        histo_filename = "Histogram_%s_%s_%03d_%03d_%s" % (
                            inet.name(), rnet.name(), heatmin * 1000,
                            heatmean * 1000, pattern)
                        s = Statistics()
                        s.create_datasets(output_hdf5_file, stats_filename,
                                          histo_filename,
                                          len(set) * set.num_timesteps(),
                                          stats_info)
                        statistics.append(
                            StatsCfg(stats=s,
                                     importance=inet,
                                     recon=rnet,
                                     heatmin=heatmin,
                                     heatmean=heatmean,
                                     pattern=pattern))
            print(len(statistics),
                  " different combinations are performed per sample")

            # compute statistics
            try:
                with torch.no_grad():
                    num_minibatch = len(data_loader)
                    pg = ProgressBar(num_minibatch, 'Evaluation', length=50)
                    for iteration, (crop_high,
                                    crop_low) in enumerate(data_loader, 0):
                        pg.print_progress_bar(iteration)
                        crop_high = crop_high.to(device)
                        crop_low = crop_low.to(device)
                        B, T, C, H, W = crop_high.shape
                        _, _, _, Hlow, Wlow = crop_low.shape
                        assert Hlow * UPSCALING == H

                        # try out each combination
                        for s in statistics:
                            #print(s)
                            # get input to evaluation
                            importanceNetUpscale = s.importance.networkUpscaling(
                            )
                            importancePostUpscale = UPSCALING // importanceNetUpscale
                            pattern = sampling_pattern[s.pattern][:H, :W]

                            # loop over timesteps
                            pattern = pattern.unsqueeze(0).unsqueeze(0)
                            previous_importance = None
                            previous_output = None
                            reconstructions = []
                            for j in range(T):
                                # extract flow (always the last two channels of crop_high)
                                flow = crop_high[:, j, C - 2:, :, :]

                                # compute importance map
                                importance_input = crop_low[:, j, :8, :, :]
                                if j == 0 or s.importance.disableTemporal:
                                    previous_input = torch.zeros(
                                        B,
                                        1,
                                        importance_input.shape[2] *
                                        importanceNetUpscale,
                                        importance_input.shape[3] *
                                        importanceNetUpscale,
                                        dtype=crop_high.dtype,
                                        device=crop_high.device)
                                else:
                                    flow_low = F.interpolate(
                                        flow,
                                        scale_factor=1 / importancePostUpscale)
                                    previous_input = models.VideoTools.warp_upscale(
                                        previous_importance, flow_low, 1,
                                        False)
                                importance_map = s.importance.call(
                                    importance_input, previous_input)
                                if len(importance_map.shape) == 3:
                                    importance_map = importance_map.unsqueeze(
                                        1)
                                previous_importance = importance_map

                                target_mean = s.heatmean
                                if USE_BINARY_SEARCH_ON_MEAN:
                                    # For regular sampling, the normalization does not work properly,
                                    # use binary search on the heatmean instead
                                    def f(x):
                                        postprocess = importance.PostProcess(
                                            s.heatmin, x,
                                            importancePostUpscale,
                                            LOSS_BORDER //
                                            importancePostUpscale, 'basic')
                                        importance_map2 = postprocess(
                                            importance_map)[0].unsqueeze(1)
                                        sampling_mask = (
                                            importance_map2 >= pattern).to(
                                                dtype=importance_map.dtype)
                                        samples = torch.mean(
                                            sampling_mask).item()
                                        return samples

                                    target_mean = binarySearch(
                                        f, s.heatmean, s.heatmean, 10, 0, 1)
                                    #print("Binary search for #samples, mean start={}, result={} with samples={}, original={}".
                                    #      format(s.heatmean, s.heatmean, f(target_mean), f(s.heatmean)))

                                # normalize and upscale importance map
                                postprocess = importance.PostProcess(
                                    s.heatmin, target_mean,
                                    importancePostUpscale,
                                    LOSS_BORDER // importancePostUpscale,
                                    'basic')
                                importance_map = postprocess(
                                    importance_map)[0].unsqueeze(1)
                                #print("mean:", torch.mean(importance_map).item())

                                # create samples
                                sample_mask = (importance_map >= pattern).to(
                                    dtype=importance_map.dtype)

                                reconstruction_input = torch.cat(
                                    (
                                        sample_mask *
                                        crop_high[:, j, 0:
                                                  8, :, :],  # rgba, normal xyz, depth
                                        sample_mask),  # sample mask
                                    dim=1)

                                # warp previous output
                                if j == 0 or s.recon.disableTemporal:
                                    previous_input = torch.zeros(
                                        B,
                                        4,
                                        H,
                                        W,
                                        dtype=crop_high.dtype,
                                        device=crop_high.device)
                                else:
                                    previous_input = models.VideoTools.warp_upscale(
                                        previous_output, flow, 1, False)

                                # run reconstruction network
                                reconstruction = s.recon.call(
                                    reconstruction_input, sample_mask,
                                    previous_input)

                                # clamp
                                reconstruction_clamped = torch.clamp(
                                    reconstruction, 0, 1)
                                reconstructions.append(reconstruction_clamped)

                                ## test
                                #if j==0:
                                #    plt.figure()
                                #    plt.imshow(reconstruction_clamped[0,0:3,:,:].cpu().numpy().transpose((1,2,0)))
                                #    plt.title(s.stats.stats_name)
                                #    plt.show()

                                # save for next frame
                                previous_output = reconstruction_clamped

                            #endfor: timesteps

                            # compute statistics
                            reconstructions = torch.cat(reconstructions, dim=0)
                            crops_high = torch.cat(
                                [crop_high[:, j, :8, :, :] for j in range(T)],
                                dim=0)
                            sample_masks = torch.cat([sample_mask] * T, dim=0)
                            s.stats.add_timestep_sample(
                                reconstructions, crops_high, sample_masks)

                        # endfor: statistic
                    # endfor: batch

                    pg.print_progress_bar(num_minibatch)
                # end no_grad()
            finally:
                # close files
                for s in statistics:
                    s.stats.write_histogram()
                    s.stats.close_stats_file()
                    np.uint8)).save('{}/result/v{}/{}.png'.format(
                        EXP_NAME, str(VERSION),
                        fn.split('/')[-1]))

                # PSNR
                img_gt = (val_H * 255).astype(np.uint8)
                img_target = ((batch_Out) * 255).astype(np.uint8)

                CROP_S = 16
                if CROP_S > 0:
                    img_gt = img_gt[CROP_S:-CROP_S, CROP_S:-CROP_S]
                    img_target = img_target[CROP_S:-CROP_S, CROP_S:-CROP_S]

                psnrs.append(
                    PSNR(
                        _rgb2ycbcr(img_gt)[:, :, 0],
                        _rgb2ycbcr(img_target)[:, :, 0], 0))

                # LPIPS
                dist = model_LPIPS.forward(
                    im2tensor(img_target),
                    im2tensor(img_gt))  # RGB image from [0,255] to [-1,1]
                lpips.append(dist)

        print('AVG PSNR/LPIPS: Validation: {}/{}'.format(
            np.mean(np.asarray(psnrs)), np.mean(np.asarray(lpips))))

        # Save best model
        if i % I_SAVE == 0:
            if np.mean(np.asarray(lpips)) < best_avg_lpips:
                best_avg_lpips = np.mean(np.asarray(lpips))
Exemple #22
0
    img_in = np.pad(np.rot90(img_lr, 2), ((0,1), (0,1), (0,0)), mode='reflect').transpose((2,0,1))
    out_r2 = FourSimplexInterp(LUT, img_in, h, w, q, 2, upscale=UPSCALE)

    img_in = np.pad(np.rot90(img_lr, 3), ((0,1), (0,1), (0,0)), mode='reflect').transpose((2,0,1))
    out_r3 = FourSimplexInterp(LUT, img_in, w, h, q, 1, upscale=UPSCALE)

    img_out = (out_r0/1.0 + out_r1/1.0 + out_r2/1.0 + out_r3/1.0) / 255.0
    img_out = img_out.transpose((1,2,0))
    img_out = np.round(np.clip(img_out, 0, 1) * 255).astype(np.uint8)

    # Matching image sizes 
    if img_gt.shape[0] < img_out.shape[0]:
        img_out = img_out[:img_gt.shape[0]]
    if img_gt.shape[1] < img_out.shape[1]:
        img_out = img_out[:, :img_gt.shape[1]]

    if img_gt.shape[0] > img_out.shape[0]:
        img_out = np.pad(img_out, ((0,img_gt.shape[0]-img_out.shape[0]),(0,0),(0,0)))
    if img_gt.shape[1] > img_out.shape[1]:
        img_out = np.pad(img_out, ((0,0),(0,img_gt.shape[1]-img_out.shape[1]),(0,0)))

    # Save to file
    Image.fromarray(img_out).save('./output_S_x{}_{}bit/{}_LUT_interp_{}bit.png'.format(UPSCALE, SAMPLING_INTERVAL, fn.split('/')[-1][:-4], SAMPLING_INTERVAL))

    CROP_S = 4
    psnr = PSNR(_rgb2ycbcr(img_gt)[:,:,0], _rgb2ycbcr(img_out)[:,:,0], CROP_S)
    psnrs.append(psnr)

print('AVG PSNR: {}'.format(np.mean(np.asarray(psnrs))))

Exemple #23
0
def train(scale, device):

    # initialize model
    model = ESPCN(upscale_factor=scale).to(device)

    # MSE loss function
    criterion = nn.MSELoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=1e-2)

    # Learning rate scheduler
    scheduler = MultiStepLR(optimizer, milestones=[15, 80], gamma=0.1)

    # load the data
    train_dataset = ImageDataset('data/processed/train',
                                 upscale_factor=scale,
                                 input_transform=transforms.ToTensor(),
                                 target_transform=transforms.ToTensor())

    val_dataset = ImageDataset('data/processed/val',
                               upscale_factor=scale,
                               input_transform=transforms.ToTensor(),
                               target_transform=transforms.ToTensor())

    train_loader = DataLoader(dataset=train_dataset,
                              batch_size=64,
                              shuffle=True,
                              num_workers=4)
    val_loader = DataLoader(dataset=val_dataset,
                            batch_size=64,
                            shuffle=True,
                            num_workers=4)

    # Train Model
    epoch = 100
    train_loss = []
    train_psnr = []

    best_weights = copy.deepcopy(model.state_dict())
    best_epoch = 0
    best_psnr = 0.0

    for epoch in range(1, epoch + 1):
        # Train Model
        model.train()
        loss = 0
        losses = []
        print("Starting epoch number " + str(epoch))

        for i, (images, labels) in enumerate(train_loader):
            # images shape torch.Size([64, 1, 85, 85])
            # labels shape torch.Size([64, 1, 255, 255])
            images, labels = images.to(device), labels.to(device)
            optimizer.zero_grad()
            out_images = model(images)

            loss = criterion(out_images, labels)
            loss.backward()
            optimizer.step()
            losses.append(loss)

        loss = torch.stack(losses).mean().item()
        train_loss.append(loss)
        print("Loss for Training on Epoch " + str(epoch) + " is " +
              "{:.6f}".format(loss))

        # save model
        save_dir = 'saved_models/UPSCALE_X' + str(scale) + '/'
        if not os.path.exists(save_dir):
            os.makedirs(save_dir)
        torch.save(model.state_dict(),
                   os.path.join(save_dir, 'epoch_{}.pth'.format(epoch)))

        # Evaluate Model
        model.eval()
        psnr = 0
        psnrs = []
        for i, (images, labels) in enumerate(val_loader):
            images, labels = images.to(device), labels.to(device)

            with torch.no_grad():
                out_images = model(images)

            psnr = PSNR(out_images, labels)
            psnrs.append(psnr)

        psnr = torch.stack(psnrs).mean().item()
        train_psnr.append(psnr)
        print('Eval PSNR: {:.2f}\n'.format(psnr))

        if psnr > best_psnr:
            best_epoch = epoch
            best_psnr = psnr
            best_weights = copy.deepcopy(model.state_dict())

        scheduler.step()

    print('best epoch: {}, psnr: {:.2f}'.format(best_epoch, best_psnr))
    torch.save(best_weights, os.path.join(save_dir, 'best.pth'))

    # write PSNR to CSV file
    csv_name = 'results/Eval_PSNR_X' + str(scale) + '.csv'
    write_csv(csv_name, train_psnr, scale)

    # write losses to CSV file
    file_path = 'results/train_loss_X' + str(scale) + '.csv'
    os.makedirs(os.path.dirname(file_path), exist_ok=True)
    solution_rows = [('epoch', 'train_loss', 'Upscale')
                     ] + [(i, y, scale) for (i, y) in enumerate(train_loss)]
    with open(file_path, 'w', newline="") as f:
        writer = csv.writer(f)
        writer.writerows(solution_rows)
Exemple #24
0
def train():
    args = parser.parse_args()
    print("Number of GPUS available" + str(torch.cuda.device_count()))
    model = Net().cuda()
    #model.load_state_dict(torch.load("/home/yzm/pyproject/hw4/chkpt/guassian/model_ffinal_epoch.state"))
    optimizer = optim.SGD(model.parameters(),
                          lr=args.lr,
                          momentum=0.9,
                          weight_decay=0.0001)

    #useless
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                step_size=10,
                                                gamma=0.1)

    dataset = DatasetFromFolder()

    dataloader = DataLoader(dataset=dataset,
                            batch_size=args.batch_size,
                            shuffle=True)
    print(
        f'start with dataloader:{len(dataloader)},batchsize of {args.batch_size}'
    )
    args = parser.parse_args()
    model.train()
    iterationnum = 0
    losslist = []
    iteration = []
    for ei in range(0, args.Epochs):
        train_loss = 0

        for x, target in dataloader:
            iterationnum = iterationnum + 1
            x = x.cuda()
            target = target.cuda()
            y = model(x)
            loss = (100 - PSNR(y, target)).cuda()
            iteration.append(iterationnum)
            losslist.append(loss)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            print("{%3d}/{%3d} -------loss:{%3f}" %
                  (ei, args.Epochs - 1, loss))

            train_loss += loss.item() / len(dataloader)
        scheduler.step()
        print(f"----------------{ei}epoch's loss is{train_loss}")

        if ei % args.save_every == args.save_every - 1:
            print(f"save model model_{ei}_epoch")
            torch.save(model.state_dict(),
                       f"../chkpt/{args.exp_name}/model_{ei}_epoch.state")
    torch.save(model.state_dict(),
               f"../chkpt/{args.exp_name}/model_final_epoch.state")
    plt.figure()
    plt.plot(iteration, losslist, label='loss')
    plt.draw()
    plt.show()
    plt.savefig("/home/yzm/pyproject/hw4/experience/loss.jpg")
Exemple #25
0
def run():
    torch.ops.load_library("./Renderer.dll")

    #########################
    # CONFIGURATION
    #########################

    if 0:
        OUTPUT_FOLDER = "../result-stats/adaptiveIso2/"
        DATASET_PREFIX = "D:/VolumeSuperResolution-InputData/"
        DATASETS = [
            #("Ejecta", "gt-rendering-ejecta-v2-test.hdf5"),
            ("RM", "gt-rendering-rm-v1.hdf5"),
            #("Human", "gt-rendering-human-v1.hdf5"),
            #("Thorax", "gt-rendering-thorax-v1.hdf5"),
        ]

        NETWORK_DIR = "D:/VolumeSuperResolution/adaptive-modeldir/"
        NETWORKS = [  #suffixed with _importance.pt and _recon.pt
            #("adaptive011", "adaptive011_epoch500"), #title, file prefix
            ("adaptive019", "adaptive019_epoch470"),
            ("adaptive023", "adaptive023_epoch300")
        ]

        SAMPLING_FILE = "D:/VolumeSuperResolution-InputData/samplingPattern.hdf5"
        SAMPLING_PATTERNS = ['halton', 'plastic', 'random']

        HEATMAP_MIN = [0.01, 0.05, 0.2]
        HEATMAP_MEAN = [0.05, 0.1, 0.2, 0.5]

        UPSCALING = 8  # = networkUp * postUp

        IMPORTANCE_BORDER = 8
        LOSS_BORDER = 32
        BATCH_SIZE = 8

    elif 0:
        OUTPUT_FOLDER = "../result-stats/adaptiveIsoEnhance6/"
        DATASET_PREFIX = "D:/VolumeSuperResolution-InputData/"
        DATASETS = [
            ("Ejecta", "gt-rendering-ejecta-v2-test.hdf5"),
            #("RM", "gt-rendering-rm-v1.hdf5"),
            #("Human", "gt-rendering-human-v1.hdf5"),
            #("Thorax", "gt-rendering-thorax-v1.hdf5"),
            #("Head", "gt-rendering-head.hdf5"),
        ]

        NETWORK_DIR = "D:/VolumeSuperResolution/adaptive-modeldir/"
        NETWORKS = [  #suffixed with _importance.pt and _recon.pt
            #title, file prefix
            #("U-Net (5-4)", "sizes/size5-4_epoch500"),
            #("Enhance-Net (epoch 50)", "enhance2_imp050_epoch050"),
            #("Enhance-Net (epoch 400)", "enhance2_imp050_epoch400"),
            #("Enhance-Net (Thorax)", "enhance_imp050_Thorax_epoch200"),
            #("Enhance-Net (RM)", "enhance_imp050_RM_epoch200"),
            #("Imp100", "enhance4_imp100_epoch300"),
            #("Imp100res", "enhance4_imp100res_epoch230"),
            ("Imp100res+N", "enhance4_imp100res+N_epoch300"),
            ("Imp100+N", "enhance4_imp100+N_epoch300"),
            #("Imp100+N-res", "enhance4_imp100+N-res_epoch300"),
            #("Imp100+N-resInterp", "enhance4_imp100+N-resInterp_epoch300"),
            #("U-Net (5-4)", "size5-4_epoch500"),
            #("U-Net (5-3)", "size5-3_epoch500"),
            #("U-Net (4-4)", "size4-4_epoch500"),
        ]

        # Test if it is better to post-train with dense networks and PDE inpainting
        POSTTRAIN_NETWORK_DIR = "D:/VolumeSuperResolution/dense-modeldir/"
        POSTTRAIN_NETWORKS = [
            # title, file suffix to POSTTRAIN_NETWORK_DIR, inpainting {'fast', 'pde'}
            #("Enhance PDE (post)", "inpHv2-pde05-epoch200.pt", "pde")
        ]

        SAMPLING_FILE = "D:/VolumeSuperResolution-InputData/samplingPattern.hdf5"
        SAMPLING_PATTERNS = ['plastic']

        HEATMAP_MIN = [0.002]
        HEATMAP_MEAN = [
            0.05
        ]  #[0.01, 0.02, 0.03, 0.04, 0.06, 0.08, 0.1, 0.2, 0.3, 0.5, 0.8, 1.0]
        USE_BINARY_SEARCH_ON_MEAN = True

        UPSCALING = 8  # = networkUp * postUp

        IMPORTANCE_BORDER = 8
        LOSS_BORDER = 32
        BATCH_SIZE = 4

    elif 0:
        OUTPUT_FOLDER = "../result-stats/adaptiveIsoEnhance5Sampling/"
        DATASET_PREFIX = "D:/VolumeSuperResolution-InputData/"
        DATASETS = [
            ("Ejecta", "gt-rendering-ejecta-v2-test.hdf5"),
        ]

        NETWORK_DIR = "D:/VolumeSuperResolution/adaptive-modeldir/"
        NETWORKS = [  #suffixed with _importance.pt and _recon.pt
            #title, file prefix
            ("Enhance-Net (epoch 400)", "enhance2_imp050_epoch400"),
        ]

        # Test if it is better to post-train with dense networks and PDE inpainting
        POSTTRAIN_NETWORK_DIR = "D:/VolumeSuperResolution/dense-modeldir/"
        POSTTRAIN_NETWORKS = [
            # title, file suffix to POSTTRAIN_NETWORK_DIR, inpainting {'fast', 'pde'}
            #("Enhance PDE (post)", "inpHv2-pde05-epoch200.pt", "pde")
        ]

        SAMPLING_FILE = "D:/VolumeSuperResolution-InputData/samplingPattern.hdf5"
        SAMPLING_PATTERNS = ['halton', 'plastic', 'random', 'regular']
        #SAMPLING_PATTERNS = ['regular']

        HEATMAP_MIN = [0.002]
        HEATMAP_MEAN = [0.05]
        USE_BINARY_SEARCH_ON_MEAN = True

        UPSCALING = 8  # = networkUp * postUp

        IMPORTANCE_BORDER = 8
        LOSS_BORDER = 32
        BATCH_SIZE = 4

    elif 1:
        OUTPUT_FOLDER = "../result-stats/adaptiveIsoEnhance8Sampling/"
        DATASET_PREFIX = "D:/VolumeSuperResolution-InputData/"
        DATASETS = [
            ("Ejecta", "gt-rendering-ejecta-v2-test.hdf5"),
            #("RM", "gt-rendering-rm-v1.hdf5"),
            #("Human", "gt-rendering-human-v1.hdf5"),
            #("Thorax", "gt-rendering-thorax-v1.hdf5"),
            #("Head", "gt-rendering-head.hdf5"),
        ]

        NETWORK_DIR = "D:/VolumeSuperResolution/adaptive-modeldir/"
        NETWORKS = [  #suffixed with _importance.pt and _recon.pt
            #title, file prefix
            ("regular", "enhance7_regular_epoch190"),
            ("random", "enhance7_random_epoch190"),
            ("halton", "enhance7_halton_epoch190"),
            ("plastic", "enhance7_plastic_epoch190"),
        ]

        # Test if it is better to post-train with dense networks and PDE inpainting
        POSTTRAIN_NETWORK_DIR = "D:/VolumeSuperResolution/dense-modeldir/"
        POSTTRAIN_NETWORKS = [
            # title, file suffix to POSTTRAIN_NETWORK_DIR, inpainting {'fast', 'pde'}
            #("Enhance PDE (post)", "inpHv2-pde05-epoch200.pt", "pde")
        ]

        SAMPLING_FILE = "D:/VolumeSuperResolution-InputData/samplingPattern.hdf5"
        SAMPLING_PATTERNS = ['regular', 'random', 'halton', 'plastic']

        HEATMAP_MIN = [0.002]
        HEATMAP_MEAN = [
            0.05
        ]  #[0.01, 0.02, 0.03, 0.04, 0.06, 0.08, 0.1, 0.2, 0.3, 0.5, 0.8, 1.0]
        USE_BINARY_SEARCH_ON_MEAN = True

        UPSCALING = 8  # = networkUp * postUp

        IMPORTANCE_BORDER = 8
        LOSS_BORDER = 32
        BATCH_SIZE = 4

    elif 0:
        OUTPUT_FOLDER = "../result-stats/adaptiveIsoImp/"
        DATASET_PREFIX = "D:/VolumeSuperResolution-InputData/"
        DATASETS = [
            ("Ejecta", "gt-rendering-ejecta-v2-test.hdf5"),
            #("RM", "gt-rendering-rm-v1.hdf5"),
            #("Human", "gt-rendering-human-v1.hdf5"),
            #("Thorax", "gt-rendering-thorax-v1.hdf5"),
        ]

        NETWORK_DIR = "D:/VolumeSuperResolution/adaptive-modeldir/imp/"
        NETWORKS = [  #suffixed with _importance.pt and _recon.pt
            #("adaptive011", "adaptive011_epoch500"), #title, file prefix
            ("imp005", "imp005_epoch500"),
            ("imp010", "imp010_epoch500"),
            ("imp020", "imp020_epoch500"),
            ("imp050", "imp050_epoch500"),
        ]

        SAMPLING_FILE = "D:/VolumeSuperResolution-InputData/samplingPattern.hdf5"
        SAMPLING_PATTERNS = ['halton']

        HEATMAP_MIN = [0.002]
        HEATMAP_MEAN = [0.005, 0.01, 0.02, 0.05, 0.1]
        USE_BINARY_SEARCH_ON_MEAN = True

        UPSCALING = 8  # = networkUp * postUp

        IMPORTANCE_BORDER = 8
        LOSS_BORDER = 32
        BATCH_SIZE = 16

    #########################
    # LOADING
    #########################

    device = torch.device("cuda")

    # Load Networks
    IMPORTANCE_BASELINE1 = "ibase1"
    IMPORTANCE_BASELINE2 = "ibase2"
    IMPORTANCE_BASELINE3 = "ibase3"
    RECON_BASELINE = "rbase"

    # load importance model
    print("load importance networks")

    class ImportanceModel:
        def __init__(self, file):
            if file == IMPORTANCE_BASELINE1:
                self._net = importance.UniformImportanceMap(1, 0.5)
                self._upscaling = 1
                self._name = "constant"
                self.disableTemporal = True
                self._requiresPrevious = False
            elif file == IMPORTANCE_BASELINE2:
                self._net = importance.GradientImportanceMap(
                    1, (1, 1), (2, 1), (3, 1))
                self._upscaling = 1
                self._name = "curvature"
                self.disableTemporal = True
                self._requiresPrevious = False
            else:
                self._name = file[0]
                file = os.path.join(NETWORK_DIR, file[1] + "_importance.pt")
                extra_files = torch._C.ExtraFilesMap()
                extra_files['settings.json'] = ""
                self._net = torch.jit.load(file,
                                           map_location=device,
                                           _extra_files=extra_files)
                settings = json.loads(extra_files['settings.json'])
                self._upscaling = settings['networkUpscale']
                self._requiresPrevious = settings.get("requiresPrevious",
                                                      False)
                self.disableTemporal = settings.get("disableTemporal", True)

        def networkUpscaling(self):
            return self._upscaling

        def name(self):
            return self._name

        def __repr__(self):
            return self.name()

        def call(self, input, prev_warped_out):
            if self._requiresPrevious:
                input = torch.cat([
                    input,
                    models.VideoTools.flatten_high(prev_warped_out,
                                                   self._upscaling)
                ],
                                  dim=1)
            input = F.pad(input, [IMPORTANCE_BORDER] * 4, 'constant', 0)
            output = self._net(input)  # the network call
            output = F.pad(output, [-IMPORTANCE_BORDER * self._upscaling] * 4,
                           'constant', 0)
            return output

    class LuminanceImportanceModel:
        def __init__(self):
            self.disableTemporal = True

        def setTestFile(self, filename):
            importance_file = filename[:-5] + "-luminanceImportance.hdf5"
            if os.path.exists(importance_file):
                self._exist = True
                self._file = h5py.File(importance_file, 'r')
                self._dset = self._file['importance']
            else:
                self._exist = False
                self._file = None
                self._dset = None

        def isAvailable(self):
            return self._exist

        def setIndices(self, indices: torch.Tensor):
            assert len(indices.shape) == 1
            self._indices = list(indices.cpu().numpy())

        def setTime(self, time):
            self._time = time

        def networkUpscaling(self):
            return UPSCALING

        def name(self):
            return "luminance-contrast"

        def __repr__(self):
            return self.name()

        def call(self, input, prev_warped_out):
            B, C, H, W = input.shape
            if not self._exist:
                return torch.ones(B,
                                  1,
                                  H,
                                  W,
                                  dtype=input.dtype,
                                  device=input.device)
            outputs = []
            for idx in self._indices:
                outputs.append(
                    torch.from_numpy(self._dset[idx, self._time,
                                                ...]).to(device=input.device))
            return torch.stack(outputs, dim=0)

    importanceBaseline1 = ImportanceModel(IMPORTANCE_BASELINE1)
    importanceBaseline2 = ImportanceModel(IMPORTANCE_BASELINE2)
    importanceBaselineLuminance = LuminanceImportanceModel()
    importanceModels = [ImportanceModel(f) for f in NETWORKS]

    # load reconstruction networks
    print("load reconstruction networks")

    class ReconstructionModel:
        def __init__(self, file):
            if file == RECON_BASELINE:

                class Inpainting(nn.Module):
                    def forward(self, x, mask):
                        input = x[:, 0:6, :, :].contiguous(
                        )  # mask, normal xyz, depth, ao
                        mask = x[:, 6, :, :].contiguous()
                        return torch.ops.renderer.fast_inpaint(mask, input)

                self._net = Inpainting()
                self._upscaling = 1
                self._name = "inpainting"
                self.disableTemporal = True
            else:
                self._name = file[0]
                file = os.path.join(NETWORK_DIR, file[1] + "_recon.pt")
                extra_files = torch._C.ExtraFilesMap()
                extra_files['settings.json'] = ""
                self._net = torch.jit.load(file,
                                           map_location=device,
                                           _extra_files=extra_files)
                self._settings = json.loads(extra_files['settings.json'])
                self.disableTemporal = False
                requiresMask = self._settings.get('expectMask', False)
                if self._settings.get("interpolateInput", False):
                    self._originalNet = self._net

                    class Inpainting2(nn.Module):
                        def __init__(self, orignalNet, requiresMask):
                            super().__init__()
                            self._n = orignalNet
                            self._requiresMask = requiresMask

                        def forward(self, x, mask):
                            input = x[:, 0:6, :, :].contiguous(
                            )  # mask, normal xyz, depth, ao
                            mask = x[:, 6, :, :].contiguous()
                            inpainted = torch.ops.renderer.fast_inpaint(
                                mask, input)
                            x[:, 0:6, :, :] = inpainted
                            if self._requiresMask:
                                return self._n(x, mask)
                            else:
                                return self._n(x)

                    self._net = Inpainting2(self._originalNet, requiresMask)

        def name(self):
            return self._name

        def __repr__(self):
            return self.name()

        def call(self, input, mask, prev_warped_out):
            input = torch.cat([input, prev_warped_out], dim=1)
            output = self._net(input, mask)
            return output

    class ReconstructionModelPostTrain:
        """
        Reconstruction model that are trained as dense reconstruction networks
        after the adaptive training.
        They don't recive the sampling mask as input, but can start with PDE-based inpainting
        """
        def __init__(self, name: str, model_path: str, inpainting: str):
            assert inpainting == 'fast' or inpainting == 'pde', "inpainting must be either 'fast' or 'pde', but got %s" % inpainting
            self._inpainting = inpainting

            self._name = name
            file = os.path.join(POSTTRAIN_NETWORK_DIR, model_path)
            extra_files = torch._C.ExtraFilesMap()
            extra_files['settings.json'] = ""
            self._net = torch.jit.load(file,
                                       map_location=device,
                                       _extra_files=extra_files)
            self._settings = json.loads(extra_files['settings.json'])
            assert self._settings.get(
                'upscale_factor', None) == 1, "selected file is not a 1x SRNet"
            self.disableTemporal = False

        def name(self):
            return self._name

        def __repr__(self):
            return self.name()

        def call(self, input, prev_warped_out):
            # no sampling and no AO
            input_no_sampling = input[:, 0:5, :, :].contiguous(
            )  # mask, normal xyz, depth
            sampling_mask = input[:, 6, :, :].contiguous()
            # perform inpainting
            if self._inpainting == 'pde':
                inpainted = torch.ops.renderer.pde_inpaint(
                    sampling_mask,
                    input_no_sampling,
                    200,
                    1e-4,
                    5,
                    2,  # m0, epsilon, m1, m2
                    0,  # mc -> multigrid recursion count. =0 disables the multigrid hierarchy
                    9,
                    0)  # ms, m3
            else:
                inpainted = torch.ops.renderer.fast_inpaint(
                    sampling_mask, input_no_sampling)
            # run network
            input = torch.cat([inpainted, prev_warped_out], dim=1)
            output = self._net(input)
            if isinstance(output, tuple):
                output = output[0]
            return output

    reconBaseline = ReconstructionModel(RECON_BASELINE)
    reconModels = [ReconstructionModel(f) for f in NETWORKS]
    reconPostModels = [
        ReconstructionModelPostTrain(name, file, inpainting)
        for (name, file, inpainting) in POSTTRAIN_NETWORKS
    ]
    allReconModels = reconModels + reconPostModels

    NETWORK_COMBINATIONS = \
        [(importanceBaseline1, reconBaseline), (importanceBaseline2, reconBaseline)] + \
        [(importanceBaselineLuminance, reconBaseline)] + \
        [(importanceBaseline1, reconNet) for reconNet in allReconModels] + \
        [(importanceBaseline2, reconNet) for reconNet in allReconModels] + \
        [(importanceBaselineLuminance, reconNet) for reconNet in allReconModels] + \
        [(importanceNet, reconBaseline) for importanceNet in importanceModels] + \
        list(zip(importanceModels, reconModels)) + \
        [(importanceNet, reconPostModel) for importanceNet in importanceModels for reconPostModel in reconPostModels]
    #NETWORK_COMBINATIONS = list(zip(importanceModels, reconModels))
    print("Network combinations:")
    for (i, r) in NETWORK_COMBINATIONS:
        print("  %s - %s" % (i.name(), r.name()))

    # load sampling patterns
    print("load sampling patterns")
    with h5py.File(SAMPLING_FILE, 'r') as f:
        sampling_pattern = dict([(name, torch.from_numpy(f[name][...]).to(device)) \
            for name in SAMPLING_PATTERNS])

    # create shading
    shading = ScreenSpaceShading(device)
    shading.fov(30)
    shading.ambient_light_color(np.array([0.1, 0.1, 0.1]))
    shading.diffuse_light_color(np.array([1.0, 1.0, 1.0]))
    shading.specular_light_color(np.array([0.0, 0.0, 0.0]))
    shading.specular_exponent(16)
    shading.light_direction(np.array([0.1, 0.1, 1.0]))
    shading.material_color(np.array([1.0, 0.3, 0.0]))
    AMBIENT_OCCLUSION_STRENGTH = 1.0
    shading.ambient_occlusion(1.0)
    shading.inverse_ao = False

    #heatmap
    HEATMAP_CFG = [(min, mean) for min in HEATMAP_MIN for mean in HEATMAP_MEAN
                   if min < mean]
    print("heatmap configs:", HEATMAP_CFG)

    #########################
    # DEFINE STATISTICS
    #########################
    ssimLoss = SSIM(size_average=False)
    ssimLoss.to(device)
    psnrLoss = PSNR()
    psnrLoss.to(device)
    lpipsColor = lpips.PerceptualLoss(model='net-lin',
                                      net='alex',
                                      use_gpu=True)
    MIN_FILLING = 0.05
    NUM_BINS = 200

    class Statistics:
        def __init__(self):
            self.histogram_color_withAO = np.zeros(NUM_BINS, dtype=np.float64)
            self.histogram_color_noAO = np.zeros(NUM_BINS, dtype=np.float64)
            self.histogram_depth = np.zeros(NUM_BINS, dtype=np.float64)
            self.histogram_normal = np.zeros(NUM_BINS, dtype=np.float64)
            self.histogram_mask = np.zeros(NUM_BINS, dtype=np.float64)
            self.histogram_ao = np.zeros(NUM_BINS, dtype=np.float64)
            self.histogram_counter = 0

        def create_datasets(self, hdf5_file: h5py.File, stats_name: str,
                            histo_name: str, num_samples: int,
                            extra_info: dict):

            self.expected_num_samples = num_samples
            stats_shape = (num_samples, len(list(StatField)))
            self.stats_file = hdf5_file.require_dataset(stats_name,
                                                        stats_shape,
                                                        dtype='f',
                                                        exact=True)
            self.stats_file.attrs['NumFields'] = len(list(StatField))
            for field in list(StatField):
                self.stats_file.attrs['Field%d' % field.value] = field.name
            for key, value in extra_info.items():
                self.stats_file.attrs[key] = value
            self.stats_index = 0

            histo_shape = (NUM_BINS, len(list(HistoField)))
            self.histo_file = hdf5_file.require_dataset(histo_name,
                                                        histo_shape,
                                                        dtype='f',
                                                        exact=True)
            self.histo_file.attrs['NumFields'] = len(list(HistoField))
            for field in list(HistoField):
                self.histo_file.attrs['Field%d' % field.value] = field.name
            for key, value in extra_info.items():
                self.histo_file.attrs[key] = value

        def add_timestep_sample(self, pred_mnda, gt_mnda, sampling_mask):
            """
            adds a timestep sample:
            pred_mnda: prediction: mask, normal, depth, AO
            gt_mnda: ground truth: mask, normal, depth, AO
            """
            B = pred_mnda.shape[0]

            #shading
            shading.ambient_occlusion(AMBIENT_OCCLUSION_STRENGTH)
            pred_color_withAO = shading(pred_mnda)
            gt_color_withAO = shading(gt_mnda)
            shading.ambient_occlusion(0.0)
            pred_color_noAO = shading(pred_mnda)
            gt_color_noAO = shading(gt_mnda)

            #apply border
            pred_mnda = pred_mnda[:, :, LOSS_BORDER:-LOSS_BORDER,
                                  LOSS_BORDER:-LOSS_BORDER]
            pred_color_withAO = pred_color_withAO[:, :,
                                                  LOSS_BORDER:-LOSS_BORDER,
                                                  LOSS_BORDER:-LOSS_BORDER]
            pred_color_noAO = pred_color_noAO[:, :, LOSS_BORDER:-LOSS_BORDER,
                                              LOSS_BORDER:-LOSS_BORDER]
            gt_mnda = gt_mnda[:, :, LOSS_BORDER:-LOSS_BORDER,
                              LOSS_BORDER:-LOSS_BORDER]
            gt_color_withAO = gt_color_withAO[:, :, LOSS_BORDER:-LOSS_BORDER,
                                              LOSS_BORDER:-LOSS_BORDER]
            gt_color_noAO = gt_color_noAO[:, :, LOSS_BORDER:-LOSS_BORDER,
                                          LOSS_BORDER:-LOSS_BORDER]

            mask = gt_mnda[:, 0:1, :, :] * 0.5 + 0.5

            # PSNR
            psnr_mask = psnrLoss(pred_mnda[:, 0:1, :, :],
                                 gt_mnda[:, 0:1, :, :]).cpu().numpy()
            psnr_normal = psnrLoss(pred_mnda[:, 1:4, :, :],
                                   gt_mnda[:, 1:4, :, :],
                                   mask=mask).cpu().numpy()
            psnr_depth = psnrLoss(pred_mnda[:, 4:5, :, :],
                                  gt_mnda[:, 4:5, :, :],
                                  mask=mask).cpu().numpy()
            psnr_ao = psnrLoss(pred_mnda[:, 5:6, :, :],
                               gt_mnda[:, 5:6, :, :],
                               mask=mask).cpu().numpy()
            psnr_color_withAO = psnrLoss(pred_color_withAO,
                                         gt_color_withAO,
                                         mask=mask).cpu().numpy()
            psnr_color_noAO = psnrLoss(pred_color_noAO,
                                       gt_color_noAO,
                                       mask=mask).cpu().numpy()

            # SSIM
            ssim_mask = ssimLoss(pred_mnda[:, 0:1, :, :],
                                 gt_mnda[:, 0:1, :, :]).cpu().numpy()
            pred_mnda = gt_mnda + mask * (pred_mnda - gt_mnda)
            ssim_normal = ssimLoss(pred_mnda[:, 1:4, :, :],
                                   gt_mnda[:, 1:4, :, :]).cpu().numpy()
            ssim_depth = ssimLoss(pred_mnda[:, 4:5, :, :],
                                  gt_mnda[:, 4:5, :, :]).cpu().numpy()
            ssim_ao = ssimLoss(pred_mnda[:, 5:6, :, :],
                               gt_mnda[:, 5:6, :, :]).cpu().numpy()
            ssim_color_withAO = ssimLoss(pred_color_withAO,
                                         gt_color_withAO).cpu().numpy()
            ssim_color_noAO = ssimLoss(pred_color_noAO,
                                       gt_color_noAO).cpu().numpy()

            # Perceptual
            lpips_color_withAO = torch.cat([
                lpipsColor(
                    pred_color_withAO[b], gt_color_withAO[b], normalize=True)
                for b in range(B)
            ],
                                           dim=0).cpu().numpy()
            lpips_color_noAO = torch.cat([
                lpipsColor(
                    pred_color_noAO[b], gt_color_noAO[b], normalize=True)
                for b in range(B)
            ],
                                         dim=0).cpu().numpy()

            # Samples
            samples = torch.mean(sampling_mask, dim=(1, 2, 3)).cpu().numpy()

            # Write samples to file
            for b in range(B):
                assert self.stats_index < self.expected_num_samples, "Adding more samples than specified"
                self.stats_file[self.stats_index, :] = np.array([
                    psnr_mask[b], psnr_normal[b], psnr_depth[b], psnr_ao[b],
                    psnr_color_noAO[b], psnr_color_withAO[b], ssim_mask[b],
                    ssim_normal[b], ssim_depth[b], ssim_ao[b],
                    ssim_color_noAO[b], ssim_color_withAO[b],
                    lpips_color_noAO[b], lpips_color_withAO[b], samples[b]
                ],
                                                                dtype='f')
                self.stats_index += 1

            # Histogram
            self.histogram_counter += 1

            mask_diff = F.l1_loss(gt_mnda[:, 0, :, :],
                                  pred_mnda[:, 0, :, :],
                                  reduction='none')
            histogram, _ = np.histogram(mask_diff.cpu().numpy(),
                                        bins=NUM_BINS,
                                        range=(0, 1),
                                        density=True)
            self.histogram_mask += (
                histogram /
                (NUM_BINS * B) - self.histogram_mask) / self.histogram_counter

            #normal_diff = (-F.cosine_similarity(gt_mnda[0,1:4,:,:], pred_mnda[0,1:4,:,:], dim=0)+1)/2
            normal_diff = F.l1_loss(gt_mnda[:, 1:4, :, :],
                                    pred_mnda[:, 1:4, :, :],
                                    reduction='none').sum(dim=0) / 6
            histogram, _ = np.histogram(normal_diff.cpu().numpy(),
                                        bins=NUM_BINS,
                                        range=(0, 1),
                                        density=True)
            self.histogram_normal += (histogram /
                                      (NUM_BINS * B) - self.histogram_normal
                                      ) / self.histogram_counter

            depth_diff = F.l1_loss(gt_mnda[:, 4, :, :],
                                   pred_mnda[:, 4, :, :],
                                   reduction='none')
            histogram, _ = np.histogram(depth_diff.cpu().numpy(),
                                        bins=NUM_BINS,
                                        range=(0, 1),
                                        density=True)
            self.histogram_depth += (
                histogram /
                (NUM_BINS * B) - self.histogram_depth) / self.histogram_counter

            ao_diff = F.l1_loss(gt_mnda[:, 5, :, :],
                                pred_mnda[:, 5, :, :],
                                reduction='none')
            histogram, _ = np.histogram(ao_diff.cpu().numpy(),
                                        bins=NUM_BINS,
                                        range=(0, 1),
                                        density=True)
            self.histogram_ao += (histogram / (NUM_BINS * B) -
                                  self.histogram_ao) / self.histogram_counter

            color_diff = F.l1_loss(gt_color_withAO[:, 0, :, :],
                                   pred_color_withAO[:, 0, :, :],
                                   reduction='none')
            histogram, _ = np.histogram(color_diff.cpu().numpy(),
                                        bins=NUM_BINS,
                                        range=(0, 1),
                                        density=True)
            self.histogram_color_withAO += (
                histogram / (NUM_BINS * B) -
                self.histogram_color_withAO) / self.histogram_counter

            color_diff = F.l1_loss(gt_color_noAO[:, 0, :, :],
                                   pred_color_noAO[:, 0, :, :],
                                   reduction='none')
            histogram, _ = np.histogram(color_diff.cpu().numpy(),
                                        bins=NUM_BINS,
                                        range=(0, 1),
                                        density=True)
            self.histogram_color_noAO += (
                histogram / (NUM_BINS * B) -
                self.histogram_color_noAO) / self.histogram_counter

        def close_stats_file(self):
            self.stats_file.attrs['NumEntries'] = self.stats_index

        def write_histogram(self):
            """
            After every sample for the current dataset was processed, write
            a histogram of the errors in a new file
            """
            for i in range(NUM_BINS):
                self.histo_file[i, :] = np.array([
                    i / NUM_BINS, (i + 1) / NUM_BINS, self.histogram_mask[i],
                    self.histogram_normal[i], self.histogram_depth[i],
                    self.histogram_ao[i], self.histogram_color_withAO[i],
                    self.histogram_color_noAO[i]
                ])

    #########################
    # DATASET
    #########################
    class FullResDataset(torch.utils.data.Dataset):
        def __init__(self, file):
            self.hdf5_file = h5py.File(file, 'r')
            self.dset = self.hdf5_file['gt']
            print("Dataset shape:", self.dset.shape)

        def __len__(self):
            return self.dset.shape[0]

        def num_timesteps(self):
            return self.dset.shape[1]

        def __getitem__(self, idx):
            return (self.dset[idx, ...], np.array(idx))

    #########################
    # COMPUTE STATS for each dataset
    #########################
    for dataset_name, dataset_file in DATASETS:
        dataset_file = os.path.join(DATASET_PREFIX, dataset_file)
        print("Compute statistics for", dataset_name)

        # init luminance importance map
        importanceBaselineLuminance.setTestFile(dataset_file)
        if importanceBaselineLuminance.isAvailable():
            print("Luminance-contrast importance map is available")

        # create output file
        os.makedirs(OUTPUT_FOLDER, exist_ok=True)
        output_file = os.path.join(OUTPUT_FOLDER, dataset_name + '.hdf5')
        print("Save to", output_file)
        with h5py.File(output_file, 'a') as output_hdf5_file:

            # load dataset
            set = FullResDataset(dataset_file)
            data_loader = torch.utils.data.DataLoader(set,
                                                      batch_size=BATCH_SIZE,
                                                      shuffle=False)

            # define statistics
            StatsCfg = collections.namedtuple(
                "StatsCfg", "stats importance recon heatmin heatmean pattern")
            statistics = []
            for (inet, rnet) in NETWORK_COMBINATIONS:
                for (heatmin, heatmean) in HEATMAP_CFG:
                    for pattern in SAMPLING_PATTERNS:
                        stats_info = {
                            'importance': inet.name(),
                            'reconstruction': rnet.name(),
                            'heatmin': heatmin,
                            'heatmean': heatmean,
                            'pattern': pattern
                        }
                        stats_filename = "Stats_%s_%s_%03d_%03d_%s" % (
                            inet.name(), rnet.name(), heatmin * 100,
                            heatmean * 100, pattern)
                        histo_filename = "Histogram_%s_%s_%03d_%03d_%s" % (
                            inet.name(), rnet.name(), heatmin * 100,
                            heatmean * 100, pattern)
                        s = Statistics()
                        s.create_datasets(output_hdf5_file, stats_filename,
                                          histo_filename,
                                          len(set) * set.num_timesteps(),
                                          stats_info)
                        statistics.append(
                            StatsCfg(stats=s,
                                     importance=inet,
                                     recon=rnet,
                                     heatmin=heatmin,
                                     heatmean=heatmean,
                                     pattern=pattern))
            print(len(statistics),
                  " different combinations are performed per sample")

            # compute statistics
            try:
                with torch.no_grad():
                    num_minibatch = len(data_loader)
                    pg = ProgressBar(num_minibatch, 'Evaluation', length=50)
                    for iteration, (batch, batch_indices) in enumerate(
                            data_loader, 0):
                        pg.print_progress_bar(iteration)
                        batch = batch.to(device)
                        importanceBaselineLuminance.setIndices(batch_indices)
                        B, T, C, H, W = batch.shape

                        # try out each combination
                        for s in statistics:
                            #print(s)
                            # get input to evaluation
                            importanceNetUpscale = s.importance.networkUpscaling(
                            )
                            importancePostUpscale = UPSCALING // importanceNetUpscale
                            crop_low = torch.nn.functional.interpolate(
                                batch.reshape(B * T, C, H, W),
                                scale_factor=1 / UPSCALING,
                                mode='area').reshape(B, T, C, H // UPSCALING,
                                                     W // UPSCALING)
                            pattern = sampling_pattern[s.pattern][:H, :W]
                            crop_high = batch

                            # loop over timesteps
                            pattern = pattern.unsqueeze(0).unsqueeze(0)
                            previous_importance = None
                            previous_output = None
                            reconstructions = []
                            for j in range(T):
                                importanceBaselineLuminance.setTime(j)
                                # extract flow (always the last two channels of crop_high)
                                flow = crop_high[:, j, C - 2:, :, :]

                                # compute importance map
                                importance_input = crop_low[:, j, :5, :, :]
                                if j == 0 or s.importance.disableTemporal:
                                    previous_input = torch.zeros(
                                        B,
                                        1,
                                        importance_input.shape[2] *
                                        importanceNetUpscale,
                                        importance_input.shape[3] *
                                        importanceNetUpscale,
                                        dtype=crop_high.dtype,
                                        device=crop_high.device)
                                else:
                                    flow_low = F.interpolate(
                                        flow,
                                        scale_factor=1 / importancePostUpscale)
                                    previous_input = models.VideoTools.warp_upscale(
                                        previous_importance, flow_low, 1,
                                        False)
                                importance_map = s.importance.call(
                                    importance_input, previous_input)
                                if len(importance_map.shape) == 3:
                                    importance_map = importance_map.unsqueeze(
                                        1)
                                previous_importance = importance_map

                                target_mean = s.heatmean
                                if USE_BINARY_SEARCH_ON_MEAN:
                                    # For regular sampling, the normalization does not work properly,
                                    # use binary search on the heatmean instead
                                    def f(x):
                                        postprocess = importance.PostProcess(
                                            s.heatmin, x,
                                            importancePostUpscale,
                                            LOSS_BORDER //
                                            importancePostUpscale, 'basic')
                                        importance_map2 = postprocess(
                                            importance_map)[0].unsqueeze(1)
                                        sampling_mask = (
                                            importance_map2 >= pattern).to(
                                                dtype=importance_map.dtype)
                                        samples = torch.mean(
                                            sampling_mask).item()
                                        return samples

                                    target_mean = binarySearch(
                                        f, s.heatmean, s.heatmean, 10, 0, 1)
                                    #print("Binary search for #samples, mean start={}, result={} with samples={}, original={}".
                                    #      format(s.heatmean, s.heatmean, f(target_mean), f(s.heatmean)))

                                # normalize and upscale importance map
                                postprocess = importance.PostProcess(
                                    s.heatmin, target_mean,
                                    importancePostUpscale,
                                    LOSS_BORDER // importancePostUpscale,
                                    'basic')
                                importance_map = postprocess(
                                    importance_map)[0].unsqueeze(1)
                                #print("mean:", torch.mean(importance_map).item())

                                # create samples
                                sample_mask = (importance_map >= pattern).to(
                                    dtype=importance_map.dtype)

                                reconstruction_input = torch.cat(
                                    (
                                        sample_mask *
                                        crop_high[:, j, 0:
                                                  5, :, :],  # mask, normal x, normal y, normal z, depth
                                        sample_mask * torch.ones(
                                            B,
                                            1,
                                            H,
                                            W,
                                            dtype=crop_high.dtype,
                                            device=crop_high.device),  # ao
                                        sample_mask),  # sample mask
                                    dim=1)

                                # warp previous output
                                if j == 0 or s.recon.disableTemporal:
                                    previous_input = torch.zeros(
                                        B,
                                        6,
                                        H,
                                        W,
                                        dtype=crop_high.dtype,
                                        device=crop_high.device)
                                else:
                                    previous_input = models.VideoTools.warp_upscale(
                                        previous_output, flow, 1, False)

                                # run reconstruction network
                                reconstruction = s.recon.call(
                                    reconstruction_input, sample_mask,
                                    previous_input)

                                # clamp
                                reconstruction_clamped = torch.cat(
                                    [
                                        torch.clamp(
                                            reconstruction[:, 0:1, :, :], -1,
                                            +1),  # mask
                                        ScreenSpaceShading.normalize(
                                            reconstruction[:, 1:4, :, :],
                                            dim=1),
                                        torch.clamp(
                                            reconstruction[:, 4:5, :, :], 0,
                                            +1),  # depth
                                        torch.clamp(reconstruction[:,
                                                                   5:6, :, :],
                                                    0, +1)  # ao
                                    ],
                                    dim=1)
                                reconstructions.append(reconstruction_clamped)

                                # save for next frame
                                previous_output = reconstruction_clamped

                            #endfor: timesteps

                            # compute statistics
                            reconstructions = torch.cat(reconstructions, dim=0)
                            crops_high = torch.cat(
                                [crop_high[:, j, :6, :, :] for j in range(T)],
                                dim=0)
                            sample_masks = torch.cat([sample_mask] * T, dim=0)
                            s.stats.add_timestep_sample(
                                reconstructions, crops_high, sample_masks)

                        # endfor: statistic
                    # endfor: batch

                    pg.print_progress_bar(num_minibatch)
                # end no_grad()
            finally:
                # close files
                for s in statistics:
                    s.stats.write_histogram()
                    s.stats.close_stats_file()
Exemple #26
0
    cA, (cH, cV, cD) = pywt.dwt2(x_bic, 'haar')
    input_data = np.array([[cA, cH, cV, cD]])
    input_data = input_data.transpose([0, 2, 3, 1])

    # predict by pretrained model
    result = model.predict(input_data, batch_size=1, verbose=1)
    result = np.squeeze(result)
    # inverse Wavelet transform
    rA, rH, rV, rD = result[:, :, 0], result[:, :, 1], result[:, :,
                                                              2], result[:, :,
                                                                         3]
    x_sr = pywt.idwt2((rA, (rH, rV, rD)), 'haar')

    # compute metrics, remove border first
    psnr_val = PSNR(
        x[scale:-scale, scale:-scale],
        x_sr[scale:-scale, scale:-scale] + x_bic[scale:-scale, scale:-scale])
    ssim_val = SSIM(
        x[scale:-scale, scale:-scale],
        x_sr[scale:-scale, scale:-scale] + x_bic[scale:-scale, scale:-scale])
    psnr_list.append(psnr_val)
    ssim_list.append(ssim_val)
    print('%s\tPSNR: %.4f\tSSIM: %.4f' % (image_name, psnr_val, ssim_val))

    # display images
    if option.display:
        plt.figure()
        plt.subplot(221)
        plt.imshow(x_bic)
        plt.title('bicubic'), plt.axis('off')
        plt.subplot(222)
Exemple #27
0
def ARCNN_SRCNN(args, required_width, required_height, output1_MeanPSNR,
                output2_MeanPSNR, output3_MeanPSNR, MeanTime, PatchName_List,
                PSNR1_List, PSNR2_List, PSNR3_List, TimeConsume_List):
    test_list = os.listdir(args.TestPatch_dir)
    for num, im_name in enumerate(test_list[:2]):  ###这是输入im
        start_time = time.time()
        outputs_dir = os.getcwd() + '\\output\\JPEG_{} SR_X{}\\'.format(
            args.JPEG_factor, args.SR_scale)
        output_dir = os.path.join(outputs_dir, im_name[:-4])
        mkdir(output_dir)
        im_LRHQ_name = im_name.replace('.bmp', '_LRHQ.bmp')
        im_LRLQ_name = im_name.replace('.bmp', '_LRLQ.jpg')
        im_path = os.path.join(args.TestPatch_dir, im_name)
        im_GroundTrue = cv2.imread(im_path, -1)  # 512
        im_LRHQ = dowmsample(im_GroundTrue, args.SR_scale, output_dir,
                             im_LRHQ_name)  # 512 -> 170
        im_LRLQ = jpeg(
            im_LRHQ, args.JPEG_factor, output_dir,
            im_LRLQ_name)  # jpeg fuction: no need to replace im_name here

        # output1
        im_LRRLQ = interpolation(im_LRLQ, args.SR_scale)  # numpy
        output1_path = os.path.join(output_dir,
                                    im_name.replace('.bmp', '_output1.jpg'))
        cv2.imwrite(output1_path, im_LRRLQ)
        psnr1 = PSNR(im_path, output1_path, required_width, required_height)
        output1_MeanPSNR.update(val=psnr1, n=1)

        # output2
        #### TODO:change the channel可能是这个
        #####
        img_format = '.jpg'  # jpg or png
        ###
        output2_name = im_name.replace('.bmp',
                                       ('_output2.jpg').format(args.SR_scale))
        output2_path = os.path.join(output_dir, output2_name)
        ###
        output2 = SRCNN2(args, output1_path)
        output2.save(output2_path)
        psnr2 = PSNR(im_path, output2_path, required_width, required_height)
        output2_MeanPSNR.update(val=psnr2, n=1)
        ### TODO:没有import Image, only SRCNN using PIL

        # output3
        OutputTemp_name = im_name.replace('.bmp', '_LRHQQ.png')
        OutputTemp_path = os.path.join(output_dir, OutputTemp_name)
        im_LRLQ_file = os.path.join(output_dir, im_LRLQ_name)
        im_LRLQ2 = cv2.imread(im_LRLQ_file, -1)
        LRHQQ = denoise(args, im_LRLQ2, OutputTemp_path)  # im_LRLQ: numpy
        height = np.size(LRHQQ, 0)
        width = np.size(LRHQQ, 1)
        im_LRRHQQ = cv2.resize(
            LRHQQ, (int(height * args.SR_scale), int(width * args.SR_scale)),
            interpolation=cv2.INTER_CUBIC)
        OutputTemp_name2 = im_name.replace(
            '.bmp', '_LRRHQQ.png'.format(args.JPEG_factor))
        OutputTemp_path2 = os.path.join(output_dir, OutputTemp_name2)
        cv2.imwrite(OutputTemp_path2, im_LRRHQQ)
        # TODO: STORE THE im_LRRHQQ and read, then input the new SRCNN2()
        # convert_to_PIL class
        im_LRRHQQ_PIL = pil_image.fromarray(im_LRRHQQ)
        output3 = SRCNN2(args, OutputTemp_path2)
        output3_name = im_name.replace('.bmp', ('_output3' + '.png').format(
            args.JPEG_factor, args.SR_scale))
        output3_path = os.path.join(output_dir, output3_name)
        #
        output3.save(output3_path)
        psnr3 = PSNR(im_path, output3_path, required_width, required_height)
        output3_MeanPSNR.update(val=psnr3, n=1)

        end_time = time.time()
        MeanTime.update(val=end_time - start_time, n=1)
        PatchName_List.append(im_name[:-4])
        PSNR1_List.append(float(psnr1))
        PSNR2_List.append(float(psnr2))
        PSNR3_List.append(float(psnr3))
        TimeConsume_List.append(end_time - start_time)
Exemple #28
0
                batch_S3 = model_G(F.pad(torch.rot90(val_L, 2, [2,3]), (0,1,0,1), mode='reflect'))
                batch_S3 = torch.rot90(batch_S3, 2, [2,3])

                batch_S4 = model_G(F.pad(torch.rot90(val_L, 3, [2,3]), (0,1,0,1), mode='reflect'))
                batch_S4 = torch.rot90(batch_S4, 1, [2,3])
                
                batch_S = ( torch.clamp(batch_S1,-1,1)*127 + torch.clamp(batch_S2,-1,1)*127 )
                batch_S += ( torch.clamp(batch_S3,-1,1)*127 + torch.clamp(batch_S4,-1,1)*127 )
                batch_S /= 255.0


                # Output 
                image_out = (batch_S).cpu().data.numpy()
                image_out = np.clip(image_out[0], 0. , 1.)      # CxHxW
                image_out = np.transpose(image_out, [1, 2, 0])  # HxWxC

                # Save to file
                image_out = ((image_out)*255).astype(np.uint8)
                Image.fromarray(image_out).save('result/{}/{}.png'.format(str(VERSION), fn.split('/')[-1]))

                # PSNR on Y channel
                img_gt = (val_H*255).astype(np.uint8)
                CROP_S = 4
                psnrs.append(PSNR(_rgb2ycbcr(img_gt)[:,:,0], _rgb2ycbcr(image_out)[:,:,0], CROP_S))

        print('AVG PSNR: Validation: {}'.format(np.mean(np.asarray(psnrs))))

        writer.add_scalar('PSNR_valid', np.mean(np.asarray(psnrs)), i)
        writer.flush()
    def generate(self,
                 test_data,
                 batch_size,
                 trained_model,
                 save_to='deblur_test/',
                 customized=False,
                 save=True):
        # generate deblured image
        if customized:
            x_test = test_data
        else:
            y_test, x_test = test_data['B'], test_data['A']
        size = x_test.shape[0]
        save_to = save_to + trained_model
        with tf.Session() as sess:
            saver = tf.train.Saver()
            sess.run(tf.global_variables_initializer())

            print("Load the model from: {}".format(trained_model))
            saver.restore(sess, 'model/{}'.format(trained_model))
            print("Model restored.")

            ##Generate deblurred images
            generated = []
            for index in range(int(size / batch_size)):
                _input = x_test[index * batch_size:(index + 1) * batch_size]
                generated_test = sess.run(self.fake_B,
                                          feed_dict={
                                              self.real_A: _input,
                                              self.training: False
                                          })
                generated = generated + [
                    deprocess_image(img) for img in generated_test
                ]
            if not (index + 1) * batch_size == size:
                _input = x_test[((index + 1) * batch_size):]
                generated_test = sess.run(self.fake_B,
                                          feed_dict={
                                              self.real_A: _input,
                                              self.training: False
                                          })
                generated = generated + [
                    deprocess_image(img) for img in generated_test
                ]
            generated = np.array(generated)
            # generated_test = sess.run(self.fake_B, feed_dict={self.real_A: x_test, self.training:False})
            # generated = np.array([deprocess_image(img) for img in generated_test])
            x_test = deprocess_image(x_test)

            if not os.path.exists(save_to):
                os.makedirs(save_to)
            ##save image
            if save:
                if customized:
                    for i in range(generated.shape[0]):
                        x = x_test[i, :, :, :]
                        img = generated[i, :, :, :]
                        output = np.concatenate((img, x), axis=1)
                        im = Image.fromarray(output.astype(np.uint8))
                        im.save(save_to + '/' + str(i) + '.png')
                else:
                    y_test = deprocess_image(y_test)
                    for i in range(generated.shape[0]):
                        y = y_test[i, :, :, :]
                        x = x_test[i, :, :, :]
                        img = generated[i, :, :, :]
                        output = np.concatenate((y, img, x), axis=1)
                        im = Image.fromarray(output.astype(np.uint8))
                        im.save(save_to + '/' + str(i) + '.png')

            ##Calculate Peak Signal Noise Ratio(PSNR)
            if not customized:
                if not save:
                    y_test = deprocess_image(y_test)
                psnr = 0
                for i in range(size):
                    y = y_test[i, :, :, :]
                    img = generated[i, :, :, :]
                    psnr = psnr + PSNR(y, img)
                    # print(PSNR(y,img))
                psnr_mean = psnr / size
                print("PSNR of testing data: " + str(psnr_mean))
        return generated
Exemple #30
0
import argparse

parser = argparse.ArgumentParser(description='Test function')
parser.add_argument('--test', metavar='test', type=str, help='test directory')
parser.add_argument('--network',
                    metavar='network',
                    type=str,
                    help='network weight')
args = parser.parse_args()

val_in, val_out = load_test(directory=args.test)
model = load_model(args.network)

prediction = model.predict(val_in, batch_size=1, verbose=1)

Result = PSNR(val_out, prediction)
sess = tf.Session()
RR = sess.run(Result)

print(RR)

for img_count in range(prediction.shape[0]):
    img_in = val_in[img_count, :, :, :]
    img_out = prediction[img_count, :, :, :]
    img_gt = val_out[img_count, :, :, :]
    scipy.misc.imsave('./Result/LR' + '{0:03d}'.format(img_count) + '.png',
                      img_in)
    scipy.misc.imsave('./Result/HR' + '{0:03d}'.format(img_count) + '.png',
                      img_out)
    scipy.misc.imsave('./Result/GT' + '{0:03d}'.format(img_count) + '.png',
                      img_gt)