def get_hr_hat_from_resid(self, lr_images, reconsts):
        tmp1 = lr_images.data.cpu().numpy().transpose(0,2,3,1)*255
        image_list = [np.array(Image.fromarray(tmp1.astype(np.uint8)[i]).resize((128,128), Image.BICUBIC)) for i in range(len(lr_images))]
        images_hr_bicubic= np.stack(image_list)
        #return this ^
        images_hr_bicubic = images_hr_bicubic.transpose(0,3,1,2)
        images_hr_bicubic = Scaling(images_hr_bicubic)
        images_hr_bicubic = torch.from_numpy(images_hr_bicubic).float().to(self.device) # NUMPY to TORCH
        hr_images_hat = reconsts + images_hr_bicubic
        hr_images_hat = hr_images_hat.data.cpu().numpy()
        hr_images_hat = Scaling01(hr_images_hat)
        hr_images_hat = torch.from_numpy(hr_images_hat).float().to(self.device) # NUMPY to TORCH

        return hr_images_hat
Exemple #2
0
 def get_trio_images(self, lr_image,hr_image, reconst):
     tmp1 = lr_image.data.cpu().numpy().transpose(0,2,3,1)*255
     image_list = [np.array(Image.fromarray(tmp1.astype(np.uint8)[i]).resize((128,128), Image.BICUBIC)) for i in range(self.data_loader.batch_size)]
     image_hr_bicubic= np.stack(image_list)
     image_hr_bicubic_single = np.squeeze(image_hr_bicubic)
     print('hr_bicubic_single:', image_hr_bicubic_single.shape)
     #return this ^
     image_hr_bicubic = image_hr_bicubic.transpose(0,3,1,2)
     image_hr_bicubic = Scaling(image_hr_bicubic)
     image_hr_bicubic = torch.from_numpy(image_hr_bicubic).double().to(self.device) # NUMPY to TORCH
     hr_image_hat = reconst  
     hr_image_hat = hr_image_hat.data.cpu().numpy()
     hr_image_hat = np.squeeze(hr_image_hat).transpose((1, 2, 0))
     hr_image_hat = (hr_image_hat*255).astype(np.uint8)
     print('hr_image_hat : ', hr_image_hat.shape)
     #return this ^
     hr_image = hr_image.data.cpu().numpy().transpose(0,2,3,1)*255
     hr_image = np.squeeze(hr_image.astype(np.uint8))
     #return this ^
     return Image.fromarray(image_hr_bicubic_single), Image.fromarray(hr_image_hat), Image.fromarray(hr_image)
Exemple #3
0
    def create_grid(self, lr_image,hr_image, reconst):
        'generate grid image: LR Image | HR image Hat (from model) | HR image (original)'
        'lr_image = lr_image tensor from dataloader (can be batch)'
        'hr_image = hr_image tensor from dataloader (can be batch)'
        'reconst = output of model (HR residual)'
        tmp1 = lr_image.data.cpu().numpy().transpose(0,2,3,1)*255
        image_list = [np.array(Image.fromarray(tmp1.astype(np.uint8)[i]).resize((128,128), Image.BICUBIC)) for i in range(self.data_loader.batch_size)]
        image_hr_bicubic= np.stack(image_list).transpose(0,3,1,2)
        image_hr_bicubic = Scaling(image_hr_bicubic)
        image_hr_bicubic = torch.from_numpy(image_hr_bicubic).double().to(self.device) # NUMPY to TORCH
        hr_image_hat = reconst               
        hr_image_hat = hr_image_hat.data.cpu().numpy()
        hr_image_hat = torch.from_numpy(hr_image_hat).double().to(self.device) # NUMPY to TORCH

        pairs = torch.cat((image_hr_bicubic.data, \
                                hr_image_hat.data,\
                                hr_image.data), dim=3)
        grid = make_grid(pairs, 1) 
        tmp = np.squeeze(grid.cpu().numpy().transpose((1, 2, 0)))
        grid = (255 * tmp).astype(np.uint8)
        return grid
Exemple #4
0
    def test(self): #receives single image --> can be easily modified to handle multiple images        
        'Takes single LR image as input. Returns LR image + (models approx) HR image concatenated'
        'image location must be given by flag --test_image_path'
        self.model.eval()
        step = self.start_step + 1 # if not loading trained start = 0 
        lr_image = Image.open(self.test_image_path)
        lr_image_size = lr_image.size[0]
        #CONSIDER RGB IMAGE
        
        from utils import Kernels, load_kernels
        K, P = load_kernels(file_path='kernels/', scale_factor=2)
        randkern = Kernels(K, P)

        # LR_image_scaled + LR_residual_scaled (CONCAT) ---> TO TORCH
        lr_image_scaled = Scaling(lr_image)
        lr_image_with_kernel = randkern.ConcatDegraInfo(lr_image_scaled)
        lr_image_with_kernel = torch.from_numpy(lr_image_with_kernel).float().to(self.device) # NUMPY to TORCH

        # LR_image to torch

        lr_image_scaled = torch.from_numpy(lr_image_scaled).float().to(self.device) # NUMPY to TORCH

        #Transpose - Permute since for model we need input with channels first
        lr_image_scaled = lr_image_scaled.permute(2,0,1) 
        lr_image_with_kernel = lr_image_with_kernel.permute(2,0,1)

        lr_image_with_kernel = lr_image_with_kernel.unsqueeze(0) #just add one dimension (index on batch)
        lr_image_scaled = lr_image_scaled.unsqueeze(0)

        lr_image, x = lr_image_scaled.to(torch.float64), lr_image_with_kernel.to(torch.float64) 
        lr_image, x = lr_image.to(self.device), x.to(self.device)

        x = x.to(torch.float64)

        reconst = self.model(x)

        tmp1 = lr_image.data.cpu().numpy().transpose(0,2,3,1)*255
        image_list = [np.array(Image.fromarray(tmp1.astype(np.uint8)[i]).resize((128,128), Image.BICUBIC)) \
                      for i in range(self.data_loader.batch_size)]
        image_hr_bicubic= np.stack(image_list)
        image_hr_bicubic_single = np.squeeze(image_hr_bicubic)
        #return this ^
        image_hr_bicubic = image_hr_bicubic.transpose(0,3,1,2)
        image_hr_bicubic = Scaling(image_hr_bicubic)
        image_hr_bicubic = torch.from_numpy(image_hr_bicubic).double().to(self.device) # NUMPY to TORCH
        hr_image_hat = reconst
        hr_image_hat_np = hr_image_hat.data.cpu().numpy()
        hr_image_hat_np_scaled = hr_image_hat_np #just to try different types of scaling. It already comes scaled
        hr_image_hat_np_scaled = np.squeeze(hr_image_hat_np_scaled).transpose((1, 2, 0))
        hr_image_hat_np_png = (hr_image_hat_np_scaled*255).astype(np.uint8)
        #return this ^

        #Saving Image Bicubic and HR Image Hat
        Image.fromarray(image_hr_bicubic_single).save('./results/HR_bicub_images/'+ os.path.basename(self.test_image_path)+'_hr_bic_{}.png'.format(step))
        Image.fromarray(hr_image_hat_np_png).save('./results/HR_HAT_images/'+ os.path.basename(self.test_image_path)+'_hr_hat_{}.png'.format(step))

        #Create Grid
        hr_image_hat_np_scaled = hr_image_hat_np #It's already scaled (comes out of model scaled)
        hr_image_hat_torch = torch.from_numpy(hr_image_hat_np_scaled).double().to(self.device) # NUMPY to TORCH

        pairs = torch.cat((image_hr_bicubic.data, \
                        hr_image_hat_torch.data), dim=3)
        grid = make_grid(pairs, 1) 
        tmp = np.squeeze(grid.cpu().numpy().transpose((1, 2, 0)))
        tmp = (255 * tmp).astype(np.uint8)
        random_number = np.random.rand(1)[0]
        Image.fromarray(tmp).save('./results/grids/'+ os.path.basename(self.test_image_path).split('.')[0]+'_grid_{}.png'.format(step))
    def test(self): #receives single image --> can be easily modified to handle multiple images
        'Takes single LR image as input. Returns LR image + (models approx) HR image concatenated'
        'image location must be given by flag --test_image_path'
        self.model.eval()
        step = self.start_step + 1 # if not loading trained start = 0 
        lr_image = Image.open(self.test_image_path)
        lr_image_size = lr_image.size[0]
        #CONSIDER RGB IMAGE
        
        from utils import Kernels, load_kernels
        K, P = load_kernels(file_path='kernels/', scale_factor=2)
        randkern = Kernels(K, P)

        # get LR_RESIDUAL --> [-1,1]
        transform_to_vlr = transforms.Compose([
                            transforms.Lambda(lambda x: randkern.RandomBlur(x)), #random blur
                            transforms.Lambda(lambda x: random_downscale(x,self.scale_factor)), #random downscale
                            transforms.Resize((lr_image_size, lr_image_size), Image.BICUBIC) #upscale pro tamanho LR
                    ])
        lr_image_hat = transform_to_vlr(lr_image)
        lr_residual = np.array(lr_image).astype(np.float32) - np.array(lr_image_hat).astype(np.float32)
        lr_residual_scaled = Scaling(lr_residual)

         # LR_image_scaled + LR_residual_scaled (CONCAT) ---> TO TORCH

        #lr_image_with_kernel = self.randkern.ConcatDegraInfo(lr_image_scaled)
        #lr_image_with_resid  = np.concatenate((lr_image_with_kernel, lr_residual_scaled), axis=-1)
        lr_image_scaled = Scaling(lr_image)
        lr_image_with_resid  = np.concatenate((lr_image_scaled, lr_residual_scaled), axis=-1)
        lr_image_with_resid = torch.from_numpy(lr_image_with_resid).float().to(self.device) # NUMPY to TORCH

        # LR_image to torch

        lr_image_scaled = torch.from_numpy(lr_image_scaled).float().to(self.device) # NUMPY to TORCH

        #Transpose - Permute since for model we need input with channels first
        lr_image_scaled = lr_image_scaled.permute(2,0,1) 
        lr_image_with_resid = lr_image_with_resid.permute(2,0,1)

        lr_image_with_resid = lr_image_with_resid.unsqueeze(0) #just add one dimension (index on batch)
        lr_image_scaled = lr_image_scaled.unsqueeze(0)

        lr_image, x = lr_image_scaled, lr_image_with_resid 
        lr_image, x = lr_image.to(self.device), x.to(self.device)


        reconst = self.model(x)

        tmp1 = lr_image.data.cpu().numpy().transpose(0,2,3,1)*255
        image_list = [np.array(Image.fromarray(tmp1.astype(np.uint8)[i]).resize((128,128), Image.BICUBIC)) \
                      for i in range(self.data_loader.batch_size)]
        image_hr_bicubic= np.stack(image_list)
        image_hr_bicubic_single = np.squeeze(image_hr_bicubic)
        #return this ^
        image_hr_bicubic = image_hr_bicubic.transpose(0,3,1,2)
        image_hr_bicubic = Scaling(image_hr_bicubic)
        image_hr_bicubic = torch.from_numpy(image_hr_bicubic).float().to(self.device) # NUMPY to TORCH
        hr_image_hat = reconst + image_hr_bicubic
        hr_image_hat_np = hr_image_hat.data.cpu().numpy()
        hr_image_hat_np_scaled = Scaling01(hr_image_hat_np)
        hr_image_hat_np_scaled = np.squeeze(hr_image_hat_np_scaled).transpose((1, 2, 0))
        hr_image_hat_np_png = (hr_image_hat_np_scaled*255).astype(np.uint8)
        #return this ^

        #Saving Image Bicubic and HR Image Hat
        Image.fromarray(image_hr_bicubic_single).save('./results/HR_bicub_images/'+ os.path.basename(self.test_image_path)+'_hr_bic_{}.png'.format(step))
        Image.fromarray(hr_image_hat_np_png).save('./results/HR_HAT_images/'+ os.path.basename(self.test_image_path)+'_hr_hat_{}.png'.format(step))

        #Create Grid
        hr_image_hat_np_scaled = Scaling01(hr_image_hat_np)
        hr_image_hat_torch = torch.from_numpy(hr_image_hat_np_scaled).float().to(self.device) # NUMPY to TORCH

        pairs = torch.cat((image_hr_bicubic.data, \
                        hr_image_hat_torch.data), dim=3)
        grid = make_grid(pairs, 1) 
        tmp = np.squeeze(grid.cpu().numpy().transpose((1, 2, 0)))
        tmp = (255 * tmp).astype(np.uint8)
        random_number = np.random.rand(1)[0]        
        Image.fromarray(tmp).save('./results/grids/'+ os.path.basename(self.test_image_path).split('.')[0]+'_grid_{}.png'.format(step))