def __getitem__(self, item): key = self.keys[item] # load gt frames gt_seq = [] for frm_path in retrieve_files(osp.join(self.gt_seq_dir, key)): frm = cv2.imread(frm_path)[..., ::-1] gt_seq.append(frm) gt_seq = np.stack(gt_seq) # thwc|rgb|uint8 # load lr frames lr_seq = [] for frm_path in retrieve_files(osp.join(self.lr_seq_dir, key)): frm = cv2.imread(frm_path)[..., ::-1].astype(np.float32) / 255.0 lr_seq.append(frm) lr_seq = np.stack(lr_seq) # thwc|rgb|float32 # convert to tensor gt_tsr = torch.from_numpy(np.ascontiguousarray(gt_seq)) # uint8 lr_tsr = torch.from_numpy(np.ascontiguousarray(lr_seq)) # float32 # gt: thwc|rgb|uint8 | lr: thwc|rgb|float32 return { 'gt': gt_tsr, 'lr': lr_tsr, 'seq_idx': key, 'frm_idx': sorted(os.listdir(osp.join(self.gt_seq_dir, key))) }
def compute_sequence_metrics(self, seq, true_seq_dir, pred_seq_dir, pred_seq=None): # clear self.reset_per_sequence() # initialize metric_dict for the current sequence self.seq_idx_curr = seq self.metric_dict[self.seq_idx_curr] = OrderedDict( {metric: [] for metric in self.metric_opt.keys()}) # retrieve files true_img_lst = base_utils.retrieve_files(true_seq_dir, 'jpg') pred_img_lst = base_utils.retrieve_files(pred_seq_dir, 'jpg') # compute metrics for each frame for i in range(len(true_img_lst)): self.true_img_cur = cv2.imread(true_img_lst[i]) if self.true_img_cur is None: continue self.true_img_cur = self.true_img_cur[..., ::-1] # bgr2rgb # use a given pred_seq or load from disk if pred_seq is not None: self.pred_img_cur = pred_seq[i] # hwc|rgb|uint8 else: self.pred_img_cur = cv2.imread(pred_img_lst[i]) if self.pred_img_cur is None: continue self.pred_img_cur = self.pred_img_cur[..., ::-1] # pred_img and true_img may have different sizes # crop the larger one to match the smaller one true_h, true_w = self.true_img_cur.shape[:-1] pred_h, pred_w = self.pred_img_cur.shape[:-1] min_h, min_w = min(true_h, pred_h), min(true_w, pred_w) self.true_img_cur = self.true_img_cur[:min_h, :min_w, :] self.pred_img_cur = self.pred_img_cur[:min_h, :min_w, :] # compute metrics for the current frame self.compute_frame_metrics() # update self.true_img_pre = self.true_img_cur self.pred_img_pre = self.pred_img_cur
def __getitem__(self, item): key = self.keys[item] # load gt frames and generate lr frames gt_seq = [] for frm_path in retrieve_files(osp.join(self.gt_seq_dir, key)): frm = cv2.imread(frm_path)[..., ::-1] gt_seq.append(frm) # print(len(gt_seq)) gt_seq = np.stack(gt_seq) # thwc|rgb|uint8 # convert to tensor gt_tsr = torch.from_numpy(np.ascontiguousarray(gt_seq)) # uint8 # lr_tsr = self.apply_BD(gt_tsr) # float32 # gt: thwc|rgb||uint8 | lr: thwc|rgb|float32 return { 'gt': gt_tsr, 'seq_idx': key, 'frm_idx': sorted(os.listdir(osp.join(self.gt_seq_dir, key))) }