def __getitem__(self, idx): lr, hr, lr2, hr2, filename = self._load_file(idx) lr, hr, lr2, hr2 = self._get_patch(lr, hr, lr2, hr2) lr, hr = common.set_channel([lr, hr], self.args.n_colors) lr2, hr2 = common.set_channel([lr2, hr2], self.args.n_colors) lr_tensor, hr_tensor = common.np2Tensor([lr, hr], self.args.rgb_range) lr_tensor2, hr_tensor2 = common.np2Tensor([lr2, hr2], self.args.rgb_range) return lr_tensor, hr_tensor, lr_tensor2, hr_tensor2, filename
def __getitem__(self, idx): try: lr, hr, filename = self._load_file(idx) except: print(filename) lr, hr = common.set_channel(lr, hr, n_channels=self.args.n_colors) pair = self.get_patch(lr, hr) pair = common.set_channel(*pair, n_channels=self.args.n_colors) pair_t = common.np2Tensor(*pair, rgb_range=self.args.rgb_range) return pair_t[0], pair_t[1], filename
def __getitem__(self, idx): filename = os.path.split(self.filelist[idx])[-1] filename, _ = os.path.splitext(filename) lr = misc.imread(self.filelist[idx]) lr = common.set_channel([lr], self.args.n_colors)[0] lr_tensor = common.np2Tensor([lr], self.args.rgb_range)[0] hr_filename = os.path.split(self.hr_filelist[idx])[-1] hr_filename, _ = os.path.splitext(hr_filename) hr = misc.imread(self.hr_filelist[idx]) hr = common.set_channel([hr], self.args.n_colors)[0] hr_tensor = common.np2Tensor([hr], self.args.rgb_range)[0] return lr_tensor, hr_tensor, filename
def __getitem__(self, idx): if self.train: lr, hr, filename = self._load_file(idx) lr, hr = self.get_patch(*[lr, hr]) lr, hr = common.set_channel(lr, hr, n_channels=self.args.n_colors) lr_tensor, hr_tensor = common.np2Tensor( lr, hr, rgb_range=self.args.rgb_range) return lr_tensor, hr_tensor, filename else: lr, hr, filename = self._load_file(idx) lr, hr = self.get_patch(*[lr, hr]) lr, hr = common.set_channel(lr, hr, n_channels=self.args.n_colors) lr_tensor, hr_tensor = common.np2Tensor( lr, hr, rgb_range=self.args.rgb_range) return lr_tensor, hr_tensor, filename
def __getitem__(self, idx): filename = os.path.splitext(os.path.basename(self.filelist[idx]))[0] if '.hdr' in self.filelist[idx]: lr = cv2.imread(self.filelist[idx], cv2.IMREAD_ANYDEPTH) lr = cv2.cvtColor(lr, cv2.COLOR_BGR2RGB) #self.args.max_val = np.mean(lr) #lr = lr*(255/self.args.max_val) #breakpoint() #lr = lr/100000 self.args.non_hdr = False self.args.rgb_range = np.max(lr) else: lr = imageio.imread(self.filelist[idx]) self.args.non_hdr = True #self.args.rgb_range = 255 #breakpoint() lr, = common.set_channel(lr, n_channels=self.args.n_colors) lr_t, = common.np2Tensor(lr, rgb_range=self.args.rgb_range) #print(lr_t, "is of type", type(lr)) #breakpoint() return lr_t, -1, filename
def __getitem__(self, idx): scale = self.scale[self.idx_scale] if self.train: new_idx = random.randint(0, self.end) lr, hr, filename = self._load_file(new_idx) while lr.shape[0] < self.flags.batch_image_size or lr.shape[ 1] < self.flags.batch_image_size: new_idx = random.randint(0, self.end) lr, hr, filename = self._load_file(new_idx) else: lr, hr, filename = self._load_file(idx) if self.train: hr, lr = common.random_crop(hr, lr, self.flags.batch_image_size, scale) # LR patch size lr, hr = common.random_flip_and_rotate(im1=lr, im2=hr) lr = util.resize_image_by_pil(hr, 1 / scale) # lr, hr = self.get_patch(lr, hr) lr, hr = common.set_channel(lr, hr, n_channels=self.flags.channels) lr_tensor, hr_tensor = torch.from_numpy( lr / 255. - 0.5).float(), torch.from_numpy(hr / 255. - 0.5).float() # lr_tensor, hr_tensor = common.np2Tensor( # lr, hr, rgb_range=self.flags.rgb_range # ) lr_tensor, hr_tensor = lr_tensor.view(1, self.flags.batch_image_size, self.flags.batch_image_size, self.flags.channels).squeeze(),\ hr_tensor.view(1, self.flags.batch_image_size*scale, self.flags.batch_image_size*scale, self.flags.channels).squeeze() return lr_tensor, hr_tensor, filename
def __getitem__(self, idx): img_input, img_tar = self._load_file(idx) img_input, img_tar = common.set_channel([img_input, img_tar], self.args.n_colors) img_input, img_tar = self._get_patch(img_input, img_tar) input_tensor, tar_tensor = common.np2Tensor([img_input, img_tar], self.args.rgb_range) return input_tensor, tar_tensor
def __getitem__(self, idx): filename = os.path.splitext(os.path.basename(self.filelist[idx]))[0] lr = imageio.imread(self.filelist[idx]) lr, = common.set_channel(lr, n_channels=self.args.n_colors) lr_t, = common.np2Tensor(lr, rgb_range=self.args.rgb_range) return lr_t, -1, filename
def __getitem__(self, idx): #lr, hr, filename = self._load_file(idx) if (idx % 100 ) > 2 | (idx % 100) < 99: lr1, hr1, _ = self._load_file(idx-2) lr2, hr2, _ = self._load_file(idx-1) lr3, hr3, filename = self._load_file(idx) lr4, hr4, _ = self._load_file(idx+1) lr5, hr5, _ = self._load_file(idx+2) else: lr1, hr1, _ = self._load_file(idx) lr2, hr2, _ = self._load_file(idx) lr3, hr3, filename = self._load_file(idx) lr4, hr4, _ = self._load_file(idx) lr5, hr5, _ = self._load_file(idx) lr1, hr1 = self._get_patch(lr1, hr1) lr2, hr2 = self._get_patch(lr2, hr2) lr3, hr3 = self._get_patch(lr3, hr3) lr4, hr4 = self._get_patch(lr4, hr4) lr5, hr5 = self._get_patch(lr5, hr5) lr = np.concatenate((lr1, lr2, lr3, lr4, lr5),axis=-1) hr = hr3 #lr, hr = self._get_patch(lr, hr) lr, hr = common.set_channel([lr, hr], self.args.n_colors) lr_tensor, hr_tensor = common.np2Tensor([lr, hr], self.args.rgb_range) return lr_tensor, hr_tensor, filename
def __getitem__(self, idx): filename = os.path.split(self.filelist[idx])[-1] filename, _ = os.path.splitext(filename) lr = imageio.imread(self.filelist[idx]) lr = common.set_channel([lr], self.args.n_colors)[0] return common.np2Tensor([lr], self.args.rgb_range)[0], -1, filename
def __getitem__(self, idx): # lr, hr, filename = self._load_file(idx) # pair = self.get_patch(lr, hr) # pair = common.set_channel(*pair, n_channels=self.args.n_colors) # pair_t = common.np2Tensor(*pair, rgb_range=self.args.rgb_range) # # return pair_t[0], pair_t[1], filename lr, hr, filename = self._load_file(idx) # print('1',lr.shape, np.max(lr)) # print(lr,hr) lr = lr[:,:,0] lr, hr = self.get_patch(lr, hr) lr = np.expand_dims(lr,2) # print('2',lr.shape, np.max(lr)) lr, hr = common.set_channel([lr, hr], self.args.n_colors) #lr, hr = self.get_patch(lr,hr) # print(lr,hr) # print('3',lr.shape, np.max(lr)) #print(lr.shape,hr.shape) #exit(0) lr_tensor, hr_tensor = common.np2Tensor([lr, hr], self.args.rgb_range) # print(lr_tensor,hr_tensor) # exit(0) return lr_tensor, hr_tensor, filename
def __getitem__(self, idx): lr, hr, videoname, filename, offset = self._load_file(idx) pair = self.get_patch(lr, hr, offset) pair = common.set_channel(*pair, n_channels=self.args.n_colors) pair_t = common.np2Tensor(*pair, rgb_range=self.args.rgb_range) return pair_t[0], pair_t[1], videoname, filename
def __getitem__(self, idx): filename = os.path.split(self.filelist[idx])[-1] filename_hr = os.path.join( '../HR/{}/x{}'.format(self.args.testset, str(self.args.scale[0])), self.imnamelist[idx].replace('LRBI', 'HR')) filename, _ = os.path.splitext(filename) hr = misc.imread(filename_hr) hr = common.set_channel([hr], self.args.n_colors)[0] lr = misc.imread(self.filelist[idx]) lr = common.set_channel([lr], self.args.n_colors)[0] return common.np2Tensor([lr], self.args.rgb_range)[0], common.np2Tensor( [hr], self.args.rgb_range)[0], filename
def __getitem__(self, idx): hr, filename = self._load_file(idx) hr = self.get_patch(hr) hr = [common.set_channel(img, n_channels=self.args.n_colors) for img in hr] hr_tensor = [common.np2Tensor(img, rgb_range=self.args.rgb_range) for img in hr] return torch.stack(hr_tensor, 0), filename
def __getitem__(self, idx): #idx 第几张图片的意思 lr, hr, filename = self._load_file(idx) lr, hr = self._get_patch(lr, hr) lr, hr = common.set_channel([lr, hr], self.args.n_colors) lr, hr, degrade = self._sr_down(lr, hr) lr_tensor, hr_tensor = common.np2Tensor([lr, hr], self.args.rgb_range) return lr_tensor, hr_tensor, filename, degrade
def __getitem__(self, idx): lr, labels, filename = self._load_file(idx) pair = self.get_patch(lr, labels) pair = common.set_channel(*pair, n_channels=self.args.n_colors) pair_t = common.np2Tensor(*pair, rgb_range=self.args.rgb_range) #lr, hr, edge, diff #return pair_t[0], pair_t[1],pair_t[2], pair_t[3], filename return pair_t[0], pair_t[1:], filename
def __getitem__(self, idx): filename = os.path.split(self.filelist[idx])[-1] filename, _ = os.path.splitext(filename) lr = misc.imread(self.filelist[idx]) lr = self.input_transform(lr,self.args.std_lr_width, self.args.std_lr_height, self.args.gauss_std) lr = common.set_channel([lr], self.args.n_colors)[0] return (common.np2Tensor([lr], self.args.rgb_range)[0], ""), filename
def __getitem__(self, idx): lr, hr = self._load_file(idx) lr, hr = self._get_patch(lr, hr) lr, hr = common.set_channel(lr, hr, n_channels=self.opt.n_colors) lr_tensor, hr_tensor = common.np2Tensor(lr, hr, rgb_range=self.opt.rgb_range) return lr_tensor, hr_tensor
def __getitem__(self, idx): lr, nl, mk, hr, filename = self._load_file(idx) lr, nl, mk, hr = self._get_patch(lr, nl, mk, hr) lr, hr = common.set_channel([lr, hr], self.args.n_colors) #print('The size of lr, hr images are {}, {}'.format(lr.shape, hr.shape)) lr_tensor, nl_tensor, mk_tensor, hr_tensor = common.np2Tensor( [lr, nl, mk, hr], self.args.rgb_range) # if self.model_flag.lower() == 'finetune': return lr_tensor, nl_tensor, mk_tensor, hr_tensor, filename
def __getitem__(self, idx): filename = os.path.split(self.filelist[idx])[-1] filename, _ = os.path.splitext(filename) lr = misc.imread(self.filelist[idx]) if self.args.n_channel_in == self.args.n_channel_out: lr = common.set_channel([lr], self.args.n_channel_out)[0] return common.np2Tensor([lr], self.args.rgb_range)[0], -1, filename
def __getitem__(self, idx): lr, hr, filename, pts, facebb = self._load_file(idx) lr, hr, pts = self.get_patch(lr, hr, pts) # generate heatmaps and masks height, width = hr.shape[0], hr.shape[1] pts = common.apply_bound(pts, width, height) heatmaps, mask = common.generate_label_map(pts, height // self.downsample, width // self.downsample, self.sigma, self.downsample, self.heatmap_type) # H*W*C # heatmaps, mask = common.generate_label_map(pts, height//2, width//2, self.sigma, 2, self.heatmap_type) # H*W*C # cv2.imshow("heatmaps", np.sum(heatmaps[:, :, 0:-1],2)) heatmaps = torch.from_numpy(heatmaps.transpose( (2, 0, 1))).type(torch.FloatTensor) mask = torch.from_numpy(mask.transpose( (2, 0, 1))).type(torch.ByteTensor) # print(mask.size()) #(69, 1, 1) pair = [lr, hr] # pair = self.get_patch(lr, hr) pair = common.set_channel(*pair, n_channels=self.args.n_colors) pair.append(common.resize_bi(pair[0], self.scale[0], 'INTER_CUBIC')) # pair: lr, hr, lr_large pair_t = common.np2Tensor(*pair, rgb_range=self.args.rgb_range) ''' # print(pair[0].shape, pair[1].shape, pair[2].shape, pair[3].shape) # print(pair_t[0].size(), pair_t[1].size(), pair_t[2].size()) # (28, 28, 3) (224, 224, 3) (28, 28, 69) (224, 224, 3) # print(pair[0].shape, pair[1].shape, np.sum(pair[2],2).shape, len(filename)) # show pts img = hr.copy() for i in range(pts.shape[1]): cv2.circle(img,(int(pts[0, i]), int(pts[1, i])), 2, (0,255,0), -1) cv2.imshow("hr",img) # show heatmap cv2.imshow("lr",pair[0]) # cv2.imshow("hr",pair[1]) # cv2.imshow("heatmaps", np.sum(pair[2][:, :, 0:-1],2)) cv2.imshow("lr_large", pair[2]) # gt = pts[0].numpy() # for i in range(gt.shape[1]): # cv2.circle(gt[1],(int(gt[0, i]), int(gt[1, i])), 4, (0,255,0), -1) # cv2.circle(pred[1],(int(pred[0, i]), int(pred[1, i])), 5, (0,255,0), -1) hr_np = pair_t[1].numpy().transpose(1,2,0)#.detach().cpu() cv2.imshow("hr_tensor",hr_np) cv2.waitKey(0) ''' # lr, hr, lr_large, heatmaps,mask return pair_t[0], pair_t[1], pair_t[ 2], heatmaps, mask, pts, filename, facebb
def __getitem__(self, idx): if self.args.process: lrs, hrs, filenames = self._load_file_from_loaded_data(idx) else: lrs, hrs, filenames = self._load_file(idx) patches = [self.get_patch(lr, hr) for lr, hr in zip(lrs, hrs)] lrs = np.array([patch[0] for patch in patches]) hrs = np.array([patch[1] for patch in patches]) lrs = np.array(common.set_channel(*lrs, n_channels=self.args.n_colors)) hrs = np.array(common.set_channel(*hrs, n_channels=self.args.n_colors)) lr_tensors = common.np2Tensor(*lrs, rgb_range=self.args.rgb_range, n_colors=self.args.n_colors) hr_tensors = common.np2Tensor(*hrs, rgb_range=self.args.rgb_range, n_colors=self.args.n_colors) return torch.stack(lr_tensors), torch.stack(hr_tensors), filenames
def __getitem__(self, idx): lr, hr, filename = self._load_file(idx) pair = self.get_patch(lr, hr) #patch 以及数据增强 #是输入rgb 还是 y通道 使用skimage pair = common.set_channel( *pair, n_channels=self.args.n_colors) # *解耦 从而可以传入*args pair_t = common.np2Tensor(*pair, rgb_range=self.args.rgb_range) return pair_t[0], pair_t[1], filename
def __getitem__(self, idx): if len(self.remain_idx) == 0 and self.train: filename_hr, filename_lr = self._load_file() # lr, hr, filename = self._load_file(idx) lr, hr = self._get_patch() #lr, hr) lr, hr = common.set_channel([lr, hr], self.args.n_colors) lr_tensor, hr_tensor = common.np2Tensor([lr, hr], self.args.rgb_range) return lr_tensor, hr_tensor, '' #filename_hr
def __getitem__(self, idx): filename = os.path.split(self.filelist[idx])[-1] filename, _ = os.path.splitext(filename) lr = imageio.imread(self.filelist[idx]) lr = common.set_channel(lr, n_channels=self.args.n_colors) lr_t, = common.np2Tensor(lr, rgb_range=self.args.rgb_range) #lr_cb,lr_cr = common.np2Tensor(lr_cb,lr_cr, rgb_range=self.args.rgb_range) return lr_t, -1, filename
def __getitem__(self, idx): scale = self.scale[self.idx_scale] idx = self._get_index(idx) img_in, img_tar = self._load_file(idx) img_in, img_tar, pi, ai = self._get_patch(img_in, img_tar) img_in, img_tar = common.set_channel( img_in, img_tar, self.args.n_colors) return common.np2Tensor(img_in, img_tar, self.args.rgb_range)
def test(self): torch.set_grad_enabled(False) # self.ckp.write_log('\nEvaluation on video:') self.model.eval() timer_test = utility.timer() for idx_scale, scale in enumerate(self.scale): vidcap = cv2.VideoCapture(self.args.dir_demo) total_frames = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT)) W = int(scale * vidcap.get(cv2.CAP_PROP_FRAME_WIDTH)) H = int(scale * vidcap.get(cv2.CAP_PROP_FRAME_HEIGHT)) vidwri = cv2.VideoWriter( '{}/{}_x{}.avi'.format(self.args.save_dir, self.filename, scale), cv2.VideoWriter_fourcc(*'XVID'), vidcap.get(cv2.CAP_PROP_FPS), ( W, #int(scale * vidcap.get(cv2.CAP_PROP_FRAME_WIDTH)), H, #int(scale * vidcap.get(cv2.CAP_PROP_FRAME_HEIGHT)) )) tqdm_test = tqdm(range(total_frames), ncols=80) lr = torch.zeros([1, 3, H // scale, W // scale]) estimate = torch.zeros(1, 3, H, W) for _ in tqdm_test: lr_ = lr success, lr = vidcap.read() if not success: break lr = (common.set_channel([lr], n_channel=self.args.n_colors)) lr = (common.np2Tensor(np.array([lr]), rgb_range=self.args.rgb_range)) lr, lr_, estimate = self.prepare([lr[0], lr_, estimate]) with torch.no_grad(): sr, lre = self.model( lr, lr_, estimate ) #forward_chop(lr[0], self.model, scale=scale)#self.model(lr[0], idx_scale) estimate = sr sr = utility.quantize(sr, self.args.rgb_range).squeeze(0) normalized = sr * 255 / self.args.rgb_range ndarr = normalized.byte().permute(1, 2, 0).cpu().numpy() vidwri.write(ndarr) vidcap.release() vidwri.release() ''' self.ckp.write_log( 'Total: {:.2f}s\n'.format(timer_test.toc()), refresh=True )''' torch.set_grad_enabled(True)
def __getitem__(self, idx): folder = self.filelist[idx].split('/')[-2] filename = folder + '/' + os.path.splitext( os.path.basename(self.filelist[idx]))[0] # print("working on {}".format(filename)) lr = imageio.imread(self.filelist[idx]) lr, = common.set_channel(lr, n_channels=self.args.n_colors) lr_t, = common.np2Tensor(lr, rgb_range=self.args.rgb_range) print("working on {}".format(filename)) return lr_t, -1, filename
def __getitem__(self, idx): img_in = misc.imread(self.filelist[idx]) if len(img_in.shape) == 2: img_in = np.expand_dims(img_in, 2) img_in, img_tar = common.set_channel(img_in, img_in, self.args.n_colors) img_tar = misc.imresize( img_tar, self.scale[self.idx_scale] * 100, interp='bicubic') return common.np2Tensor(img_in, img_tar, self.args.rgb_range)
def __getitem__(self, idx): lr, hr, filename = self._load_file(idx) pair = self.get_patch(lr, hr) pair = common.set_channel(*pair, n_channels=self.args.n_colors) pair = common.add_noise(*pair, noise_type=self.args.noise_type, noise_param=self.args.noise_param) pair_t = common.np2Tensor(*pair, rgb_range=self.args.rgb_range) return pair_t[0], pair_t[1], filename