def read_img_seq(path, require_mod_crop=False, scale=1): """Read a sequence of images from a given folder path. Args: path (list[str] | str): List of image paths or image folder path. require_mod_crop (bool): Require mod crop for each image. Default: False. scale (int): Scale factor for mod_crop. Default: 1. Returns: Tensor: size (t, c, h, w), RGB, [0, 1]. """ if isinstance(path, list): img_paths = path else: img_paths = sorted([osp.join(path, v) for v in mmcv.scandir(path)]) imgs = [mmcv.imread(v).astype(np.float32) / 255. for v in img_paths] # imgs = [] # for v in img_paths: # imm = mmcv.imread(v).astype(np.float32) / 255. # w = v[:-4]+'_40.png' # imm_3d = mmcv.imread(w).astype(np.float32) / 255. # imm_concat_3d = np.concatenate((imm,imm_3d),axis=2) # print(imm_concat_3d.shape) # print(imm_3d.shape) # imgs.append(imm_concat_3d) if require_mod_crop: imgs = [mod_crop(img, scale) for img in imgs] imgs = totensor(imgs, bgr2rgb=True, float32=True) imgs = torch.stack(imgs, dim=0) return imgs
def calculate_lpips(img1, img2, crop_border, input_order='HWC'): """Calculate LPIPS metric. We use the official params estimated from the pristine dataset. We use the recommended block size (96, 96) without overlaps. Args: img (ndarray): Input image whose quality needs to be computed. The input image must be in range [0, 255] with float/int type. The input_order of image can be 'HW' or 'HWC' or 'CHW'. (BGR order) If the input order is 'HWC' or 'CHW', it will be converted to gray or Y (of YCbCr) image according to the ``convert_to`` argument. crop_border (int): Cropped pixels in each edge of an image. These pixels are not involved in the metric calculation. input_order (str): Whether the input order is 'HW', 'HWC' or 'CHW'. Default: 'HWC'. Returns: float: LPIPS result. """ assert img1.shape == img2.shape, ( f'Image shapes are differnet: {img1.shape}, {img2.shape}.') if input_order not in ['HWC', 'CHW']: raise ValueError( f'Wrong input_order {input_order}. Supported input_orders are ' '"HWC" and "CHW"') img1 = reorder_image(img1, input_order=input_order) img2 = reorder_image(img2, input_order=input_order) img1 = img1.astype(np.float64) img2 = img2.astype(np.float64) if crop_border != 0: img1 = img1[crop_border:-crop_border, crop_border:-crop_border, ...] img2 = img2[crop_border:-crop_border, crop_border:-crop_border, ...] img1 = img1.astype(np.float32) img2 = img2.astype(np.float32) img1, img2 = totensor([img1, img2], bgr2rgb=False, float32=True) img1 = img1.unsqueeze(0) img2 = img2.unsqueeze(0) # image should be RGB, IMPORTANT: normalized to [-1,1] img1 = (img1 / 255. - 0.5) * 2 img2 = (img2 / 255. - 0.5) * 2 loss_fn_alex = lpips.LPIPS(net='alex', verbose=False) # best forward scores metric = loss_fn_alex(img1, img2).squeeze(0).float().detach().cpu().numpy() return metric.mean()
def __getitem__(self, index): if self.file_client is None: self.file_client = FileClient(self.io_backend_opt.pop('type'), **self.io_backend_opt) # random reverse if self.random_reverse and random.random() < 0.5: self.neighbor_list.reverse() scale = self.opt['scale'] gt_size = self.opt['gt_size'] key = self.keys[index] clip, seq = key.split('/') # key example: 00001/0001 # get the GT frame (im4.png) if self.is_lmdb: img_gt_path = f'{key}/im4' else: img_gt_path = self.gt_root / clip / seq / 'im4.png' img_bytes = self.file_client.get(img_gt_path, 'gt') img_gt = mmcv.imfrombytes(img_bytes).astype(np.float32) / 255. # get the neighboring LQ frames img_lqs = [] for neighbor in self.neighbor_list: if self.is_lmdb: img_lq_path = f'{clip}/{seq}/im{neighbor}' else: img_lq_path = self.lq_root / clip / seq / f'im{neighbor}.png' img_bytes = self.file_client.get(img_lq_path, 'lq') img_lq = mmcv.imfrombytes(img_bytes).astype(np.float32) / 255. img_lqs.append(img_lq) # randomly crop img_gt, img_lqs = paired_random_crop(img_gt, img_lqs, gt_size, scale, img_gt_path) # augmentation - flip, rotate img_lqs.append(img_gt) img_results = augment(img_lqs, self.opt['use_flip'], self.opt['use_rot']) img_results = totensor(img_results) img_lqs = torch.stack(img_results[0:-1], dim=0) img_gt = img_results[-1] # img_lqs: (t, c, h, w) # img_gt: (c, h, w) # key: str return {'lq': img_lqs, 'gt': img_gt, 'key': key}
def __getitem__(self, index): if self.file_client is None: self.file_client = FileClient(self.io_backend_opt.pop('type'), **self.io_backend_opt) # load lq image lq_path = self.paths[index] img_bytes = self.file_client.get(lq_path) img_lq = mmcv.imfrombytes(img_bytes).astype(np.float32) / 255. # TODO: color space transform # BGR to RGB, HWC to CHW, numpy to tensor img_lq = totensor(img_lq, bgr2rgb=True, float32=True) return {'lq': img_lq, 'lq_path': lq_path}
def __getitem__(self, index): if self.file_client is None: self.file_client = FileClient(self.io_backend_opt.pop('type'), **self.io_backend_opt) lq_path = self.paths[index]['lq_path'] img_bytes = self.file_client.get(lq_path, 'lq') img_lq = mmcv.imfrombytes(img_bytes).astype(np.float32) / 255. # TODO: color space transform # BGR to RGB, HWC to CHW, numpy to tensor img_lq = totensor(img_lq, bgr2rgb=True, float32=True) # normalize if self.mean is not None or self.std is not None: normalize(img_lq, self.mean, self.std, inplace=True) return {'lq': img_lq, 'lq_path': lq_path}
def __getitem__(self, index): if self.file_client is None: self.file_client = FileClient(self.io_backend_opt.pop('type'), **self.io_backend_opt) # load gt image gt_path = self.paths[index] img_bytes = self.file_client.get(gt_path) img_gt = mmcv.imfrombytes(img_bytes).astype(np.float32) / 255. # random horizontal flip img_gt = augment(img_gt, hflip=self.opt['use_hflip'], rotation=False) # BGR to RGB, HWC to CHW, numpy to tensor img_gt = totensor(img_gt, bgr2rgb=True, float32=True) # normalize normalize(img_gt, self.mean, self.std, inplace=True) return {'gt': img_gt, 'gt_path': gt_path}
def __getitem__(self, index): if self.file_client is None: self.file_client = FileClient( self.io_backend_opt.pop('type'), **self.io_backend_opt) scale = self.opt['scale'] lq_map_type = self.opt['lq_map_type'] gt_map_type = self.opt['gt_map_type'] # Load gt and lq images. Dimension order: HWC; channel order: RGGB; # HDR image range: [0, +inf], float32. gt_path = self.paths[index]['gt_path'] lq_path = self.paths[index]['lq_path'] img_gt = self.file_client.get(gt_path) img_lq = self.file_client.get(lq_path) # tone mapping img_lq = self._tonemap(img_lq, type=lq_map_type) img_gt = self._tonemap(img_gt, type=gt_map_type) # expand dimension img_gt = self._expand_dim(img_gt) img_lq = self._expand_dim(img_lq) # augmentation if self.opt['phase'] == 'train': gt_size = self.opt['gt_size'] # random crop img_gt, img_lq = paired_random_crop(img_gt, img_lq, gt_size, scale, gt_path) # flip, rotation img_gt, img_lq = augment([img_gt, img_lq], self.opt['use_flip'], self.opt['use_rot']) # TODO: color space transform # BGR to RGB, HWC to CHW, numpy to tensor img_gt, img_lq = totensor([img_gt, img_lq], bgr2rgb=False, float32=True) return { 'lq': img_lq, 'gt': img_gt, 'lq_path': lq_path, 'gt_path': gt_path }
def read_img_seq(path, require_mod_crop=False, scale=1): """Read a sequence of images from a given folder path. Args: path (list[str] | str): List of image paths or image folder path. require_mod_crop (bool): Require mod crop for each image. Default: False. scale (int): Scale factor for mod_crop. Default: 1. Returns: Tensor: size (t, c, h, w), RGB, [0, 1]. """ if isinstance(path, list): img_paths = path else: img_paths = sorted([osp.join(path, v) for v in mmcv.scandir(path)]) imgs = [mmcv.imread(v).astype(np.float32) / 255. for v in img_paths] if require_mod_crop: imgs = [mod_crop(img, scale) for img in imgs] imgs = totensor(imgs, bgr2rgb=True, float32=True) imgs = torch.stack(imgs, dim=0) return imgs
def __getitem__(self, index): if self.file_client is None: self.file_client = FileClient(self.io_backend_opt.pop('type'), **self.io_backend_opt) scale = self.opt['scale'] # Load gt and lq images. Dimension order: HWC; channel order: BGR; # image range: [0, 1], float32. gt_path = self.paths[index]['gt_path'] img_bytes = self.file_client.get(gt_path, 'gt') img_gt = mmcv.imfrombytes(img_bytes).astype(np.float32) / 255. img_gt_h = img_gt.shape[0] img_gt_w = img_gt.shape[1] lq_path = self.paths[index]['lq_path'] img_bytes = self.file_client.get(lq_path, 'lq') img_lq = np.copy(np.frombuffer(img_bytes, dtype='float32')).reshape( img_gt_h // scale, img_gt_w // scale, -1) # No augmentation for training # if self.opt['phase'] == 'train': # gt_size = self.opt['gt_size'] # # random crop # img_gt, img_lq = paired_random_crop(img_gt, img_lq, gt_size, scale, # gt_path) # # flip, rotation # img_gt, img_lq = augment([img_gt, img_lq], self.opt['use_flip'], # self.opt['use_rot']) # TODO: color space transform # BGR to RGB, HWC to CHW, numpy to tensor img_gt, img_lq = totensor([img_gt, img_lq], bgr2rgb=True, float32=True) return { 'lq': img_lq, 'gt': img_gt, 'lq_path': lq_path, 'gt_path': gt_path }
def __getitem__(self, index): if self.file_client is None: self.file_client = FileClient(self.io_backend_opt.pop('type'), **self.io_backend_opt) scale = self.opt['scale'] gt_size = self.opt['gt_size'] key = self.keys[index] clip_name, frame_name = key.split('/') # key example: 000/00000000 center_frame_idx = int(frame_name) # determine the neighboring frames interval = random.choice(self.interval_list) # ensure not exceeding the borders start_frame_idx = center_frame_idx - self.num_half_frames * interval end_frame_idx = center_frame_idx + self.num_half_frames * interval # each clip has 100 frames starting from 0 to 99 while (start_frame_idx < 0) or (end_frame_idx > 99): center_frame_idx = random.randint(0, 99) start_frame_idx = (center_frame_idx - self.num_half_frames * interval) end_frame_idx = center_frame_idx + self.num_half_frames * interval frame_name = f'{center_frame_idx:08d}' neighbor_list = list( range(center_frame_idx - self.num_half_frames * interval, center_frame_idx + self.num_half_frames * interval + 1, interval)) # random reverse if self.random_reverse and random.random() < 0.5: neighbor_list.reverse() assert len(neighbor_list) == self.num_frame, ( f'Wrong length of neighbor list: {len(neighbor_list)}') # get the GT frame (as the center frame) if self.is_lmdb: img_gt_path = f'{clip_name}/{frame_name}' else: img_gt_path = self.gt_root / clip_name / f'{frame_name}.png' img_bytes = self.file_client.get(img_gt_path, 'gt') img_gt = mmcv.imfrombytes(img_bytes).astype(np.float32) / 255. # get the neighboring LQ frames img_lqs = [] for neighbor in neighbor_list: if self.is_lmdb: img_lq_path = f'{clip_name}/{neighbor:08d}' else: img_lq_path = self.lq_root / clip_name / f'{neighbor:08d}.png' img_bytes = self.file_client.get(img_lq_path, 'lq') img_lq = mmcv.imfrombytes(img_bytes).astype(np.float32) / 255. img_lqs.append(img_lq) # get flows if self.flow_root is not None: img_flows = [] # read previous flows for i in range(self.num_half_frames, 0, -1): if self.is_lmdb: flow_path = f'{clip_name}/{frame_name}_p{i}' else: flow_path = (self.flow_root / clip_name / f'{frame_name}_p{i}.png') img_bytes = self.file_client.get(flow_path, 'flow') cat_flow = mmcv.imfrombytes( img_bytes, flag='grayscale') # uint8, [0, 255] dx, dy = np.split(cat_flow, 2, axis=0) flow = mmcv.video.dequantize_flow( dx, dy, max_val=20, denorm=False) # we use max_val 20 here. img_flows.append(flow) # read next flows for i in range(1, self.num_half_frames + 1): if self.is_lmdb: flow_path = f'{clip_name}/{frame_name}_n{i}' else: flow_path = (self.flow_root / clip_name / f'{frame_name}_n{i}.png') img_bytes = self.file_client.get(flow_path, 'flow') cat_flow = mmcv.imfrombytes(img_bytes, flag='grayscale') dx, dy = np.split(cat_flow, 2, axis=0) flow = mmcv.video.dequantize_flow( dx, dy, max_val=20, denorm=False) # we use max_val 20 here. img_flows.append(flow) # for random crop, here, img_flows and img_lqs have the same # spatial size img_lqs.extend(img_flows) # randomly crop img_gt, img_lqs = paired_random_crop(img_gt, img_lqs, gt_size, scale, img_gt_path) if self.flow_root is not None: img_lqs, img_flows = img_lqs[:self.num_frame], img_lqs[self. num_frame:] # augmentation - flip, rotate img_lqs.append(img_gt) if self.flow_root is not None: img_results, img_flows = augment(img_lqs, self.opt['use_flip'], self.opt['use_rot'], img_flows) else: img_results = augment(img_lqs, self.opt['use_flip'], self.opt['use_rot']) img_results = totensor(img_results) img_lqs = torch.stack(img_results[0:-1], dim=0) img_gt = img_results[-1] if self.flow_root is not None: img_flows = totensor(img_flows) # add the zero center flow img_flows.insert(self.num_half_frames, torch.zeros_like(img_flows[0])) img_flows = torch.stack(img_flows, dim=0) # img_lqs: (t, c, h, w) # img_flows: (t, 2, h, w) # img_gt: (c, h, w) # key: str if self.flow_root is not None: return {'lq': img_lqs, 'flow': img_flows, 'gt': img_gt, 'key': key} else: return {'lq': img_lqs, 'gt': img_gt, 'key': key}
def __getitem__(self, index): if self.file_client is None: self.file_client = FileClient(self.io_backend_opt.pop('type'), **self.io_backend_opt) # random reverse if self.random_reverse and random.random() < 0.5: self.neighbor_list.reverse() scale = self.opt['scale'] gt_size = self.opt['gt_size'] key = self.keys[index] clip, seq = key.split('/') # key example: 00001/0001 # get the GT frame (im4.png) if self.is_lmdb: img_gt_path = f'{key}/im4' else: img_gt_path = self.gt_root / clip / seq / 'im4.png' img_bytes = self.file_client.get(img_gt_path, 'gt') img_gt = mmcv.imfrombytes(img_bytes).astype(np.float32) / 255. ### get 160 img_160_path = self.lq_root / clip / seq / 'im4_hr.png' img_bytes_160 = self.file_client.get(img_160_path, 'gt') img_160 = mmcv.imfrombytes(img_bytes_160).astype(np.float32) / 255. # get the neighboring LQ frames img_gt_160 = [] img_gt_160.append(img_gt) img_gt_160.append(img_160) # ###visualization # path='/home/wei/exp/EDVR/visualization' # number = 1 # ###visualization img_lqs = [] for neighbor in self.neighbor_list: if self.is_lmdb: img_lq_path = f'{clip}/{seq}/im{neighbor}' else: img_lq_path = self.lq_root / clip / seq / f'im{neighbor}.png' img_bytes = self.file_client.get(img_lq_path, 'lq') img_lq = mmcv.imfrombytes(img_bytes).astype(np.float32) / 255. # img_con_3d = np.vstack((img_lq, img_3d)) # ##visualization # number +=1 # visual_lq = img_con_3d[:,:,:3] # visual_lq = Image.fromarray((visual_lq).astype(np.uint8)).convert("RGB") # visual_lq.save(path+'/'+str(number)+'_lq.png') # visual_3d = img_con_3d[:,:,3:] # visual_3d = Image.fromarray((visual_3d).astype(np.uint8)).convert("RGB") # visual_3d.save(path+'/'+str(number)+'_3d.png') # ##visualization img_lqs.append(img_lq) # randomly crop img_gt, img_lqs = paired_random_crop(img_gt_160, img_lqs, gt_size, scale, img_gt_path) img_160_input = img_gt[1] img_gt = img_gt[0] # augmentation - flip, rotate img_lqs.append(img_160_input) img_lqs.append(img_gt) img_results = augment(img_lqs, self.opt['use_flip'], self.opt['use_rot']) img_results = totensor(img_results) hr_3d = img_results[-2] img_lqs = torch.stack(img_results[0:-2], dim=0) img_gt = img_results[-1] # img_lqs: (t, c, h, w) # img_gt: (c, h, w) # key: str return {'lq': img_lqs, 'gt': img_gt, 'hr_3d': hr_3d, 'key': key}
def __getitem__(self, index): if self.file_client is None: self.file_client = FileClient(self.io_backend_opt.pop('type'), **self.io_backend_opt) # random reverse if self.random_reverse and random.random() < 0.5: self.neighbor_list.reverse() scale = self.opt['scale'] gt_size = self.opt['gt_size'] key = self.keys[index] clip, seq = key.split('/') # key example: 00001/0001 # get the GT frame (im4.png) if self.is_lmdb: img_gt_path = f'{key}/im4' else: img_gt_path = self.gt_root / clip / seq / 'im4.png' img_bytes = self.file_client.get(img_gt_path, 'gt') img_gt = mmcv.imfrombytes(img_bytes).astype(np.float32) / 255. # get the neighboring LQ frames img_lqs = [] for neighbor in self.neighbor_list: if self.is_lmdb: img_lq_path = f'{clip}/{seq}/im{neighbor}' else: img_lq_path = self.lq_root / clip / seq / f'im{neighbor}.png' img_bytes = self.file_client.get(img_lq_path, 'lq') img_lq = mmcv.imfrombytes(img_bytes).astype(np.float32) / 255. img_lqs.append(img_lq) # randomly crop img_gt, img_lqs = paired_random_crop(img_gt, img_lqs, gt_size, scale, img_gt_path) # augmentation - flip, rotate img_lqs.append(img_gt) img_results = augment(img_lqs, self.opt['use_flip'], self.opt['use_rot']) img_results = totensor(img_results) img_lqs = torch.stack(img_results[0:-1], dim=0) img_gt = img_results[-1] # img_lqs: (t, c, h, w) # img_gt: (c, h, w) # key: str ### get 18 # ztm = np.load(path_flow,allow_pickle=True) # result_7 = [] # for test in ztm: # test = np.transpose(test, [2,1,0]) # width = test.shape[1] # height = test.shape[2] # ndarray=np.pad(test,((0,0),(1,1),(1,1)),'constant', constant_values=0) # result=[] # for i in range(0,3): # for j in range(0,3): # result.append(ndarray[:,i:i+448,j:j+448]) # result = np.array(result).reshape(18,448,448) # #result = np.repeat(result,8,axis=0) # result_7.append(np.array(result)) # save_path = path_flow.replace('flow.npy','flow_7.npy') # np.save(save_path,np.array(result_7)) ### get18 #return np.array(result_7) return {'lq': img_lqs, 'gt': img_gt, 'key': key}
def __getitem__(self, index): if self.file_client is None: self.file_client = FileClient(self.io_backend_opt.pop('type'), **self.io_backend_opt) scale = self.opt['scale'] lq_map_type = self.opt['lq_map_type'] gt_map_type = self.opt['gt_map_type'] crop_scale = self.opt.get('crop_scale', None) # Load gt and lq images. Dimension order: HWC; channel order: RGGB; # HDR image range: [0, +inf], float32. gt_path = self.paths[index]['gt_path'] lq_path = self.paths[index]['lq_path'] psf_path = self.paths[index]['psf_path'] img_gt = self.file_client.get(gt_path) img_lq = self.file_client.get(lq_path) psf_code = self.file_client.get(psf_path) # tone mapping img_lq = self._tonemap(img_lq, type=lq_map_type) img_gt = self._tonemap(img_gt, type=gt_map_type) # expand dimension img_gt = self._expand_dim(img_gt) img_lq = self._expand_dim(img_lq) # Rescale for random crop if crop_scale != None: h, w, _ = img_lq.shape img_lq = cv2.resize(img_lq, (int(w * crop_scale), int(h * crop_scale)), interpolation=cv2.INTER_LINEAR) img_gt = cv2.resize(img_gt, (int(w * crop_scale), int(h * crop_scale)), interpolation=cv2.INTER_LINEAR) # augmentation if self.opt['phase'] == 'train': gt_size = self.opt['gt_size'] # random crop img_gt, img_lq = paired_random_crop(img_gt, img_lq, gt_size, scale, gt_path) # flip, rotation img_gt, img_lq = augment([img_gt, img_lq], self.opt['use_flip'], self.opt['use_rot']) # TODO: color space transform # BGR to RGB, HWC to CHW, numpy to tensor img_gt, img_lq = totensor([img_gt, img_lq], bgr2rgb=False, float32=True) psf_code = torch.from_numpy(psf_code)[..., None, None] return { 'lq': img_lq, 'gt': img_gt, 'psf_code': psf_code, 'lq_path': lq_path, 'gt_path': gt_path, 'psf_path': psf_path, }