def read(filenames, max_d, interval_scale): ref_name, ref_cam_name, srcs_name, srcs_cam_name, gt_name, masks_name, skip = [ filenames[attr] for attr in ['ref', 'ref_cam', 'srcs', 'srcs_cam', 'gt', 'masks', 'skip'] ] ref, *srcs = [cv2.imread(fn) for fn in [ref_name] + srcs_name] ref_cam, *srcs_cam = [ load_cam(fn, max_d, interval_scale) for fn in [ref_cam_name] + srcs_cam_name ] gt = np.expand_dims(load_pfm(gt_name), -1) masks = [ np.expand_dims(cv2.imread(fn, cv2.IMREAD_GRAYSCALE), -1) for fn in masks_name ] # masks = [(np.ones_like(gt)*255).astype(np.uint8) for fn in masks_name] return { 'ref': ref, 'ref_cam': ref_cam, 'srcs': srcs, 'srcs_cam': srcs_cam, 'gt': gt, 'masks': masks, 'skip': skip }
def read(filenames): ref_name, ref_cam_name, srcs_name, srcs_cam_name, ref_depth_name, srcs_depth_name, ref_probs_name = [ filenames[attr] for attr in [ 'ref', 'ref_cam', 'srcs', 'srcs_cam', 'ref_depth', 'srcs_depth', 'ref_probs' ] ] ref, *srcs = [cv2.imread(fn) for fn in [ref_name] + srcs_name] ref_cam, *srcs_cam = [ load_cam(fn, 0, 1) for fn in [ref_cam_name] + srcs_cam_name ] ref_depth, *srcs_depth = [ np.expand_dims(load_pfm(fn), axis=-1) for fn in [ref_depth_name] + srcs_depth_name ] ref_probs = [ np.expand_dims(load_pfm(fn), axis=-1) for fn in ref_probs_name ] return { 'ref': ref, 'ref_cam': ref_cam, 'ref_depth': ref_depth, 'srcs': srcs, 'srcs_cam': srcs_cam, 'srcs_depth': srcs_depth, 'ref_probs': ref_probs, 'skip': filenames['skip'], 'id': filenames['id'] }
def read(filenames, max_d, interval_scale): ref_name, ref_cam_name, srcs_name, srcs_cam_name, skip = [filenames[attr] for attr in ['ref', 'ref_cam', 'srcs', 'srcs_cam', 'skip']] ref, *srcs = [cv2.imread(fn) for fn in [ref_name] + srcs_name] ref_cam, *srcs_cam = [load_cam(fn, max_d, interval_scale) for fn in [ref_cam_name] + srcs_cam_name] gt = np.zeros((ref.shape[0], ref.shape[1], 1)) masks = [np.zeros((ref.shape[0], ref.shape[1], 1)) for i in range(len(srcs))] return { 'ref': ref, 'ref_cam': ref_cam, 'srcs': srcs, 'srcs_cam': srcs_cam, 'gt': gt, 'masks': masks, 'skip': skip }
def read(filenames, num_src): ref_name, ref_cam_name, srcs_name, srcs_cam_name, gt_name = [ filenames[attr] for attr in ['ref', 'ref_cam', 'srcs', 'srcs_cam', 'gt'] ] ref, *srcs = [cv2.imread(fn) for fn in [ref_name] + srcs_name[:num_src]] ref_cam, *srcs_cam = [ load_cam(fn) for fn in [ref_cam_name] + srcs_cam_name[:num_src] ] gt = np.expand_dims(load_pfm(gt_name), -1) masks = [(np.ones_like(gt) * 255).astype(np.uint8) for _ in range(len(srcs))] return { 'ref': ref, 'ref_cam': ref_cam, 'srcs': srcs, 'srcs_cam': srcs_cam, 'gt': gt, 'masks': masks, 'skip': 0 }
def read(filenames, max_d, interval_scale): ref_name, ref_cam_name, srcs_name, srcs_cam_name, gt_name, skip = [ filenames[attr] for attr in ['ref', 'ref_cam', 'srcs', 'srcs_cam', 'gt', 'skip'] ] ref, *srcs = [ cv2.imread(fn) if fn != 'dummy' else None for fn in [ref_name] + srcs_name ] srcs = [ src if src is not None else np.ones_like(ref, dtype=np.uint8) for src in srcs ] ref_cam, *srcs_cam = [ load_cam(fn, max_d, interval_scale) if fn != 'dummy' else None for fn in [ref_cam_name] + srcs_cam_name ] srcs_cam = [ src_cam if src_cam is not None else np.ones_like(ref_cam, dtype=np.float32) for src_cam in srcs_cam ] gt = np.expand_dims(load_pfm(gt_name), -1) masks = [(np.ones_like(gt) * 255).astype(np.uint8) for _ in range(len(srcs))] if ref_cam[1, 3, 0] <= 0: skip = 1 print(f'depth start <= 0') return { 'ref': ref, 'ref_cam': ref_cam, 'srcs': srcs, 'srcs_cam': srcs_cam, 'gt': gt, 'masks': masks, 'skip': skip }
parser.add_argument('--cam_scale', type=float, default=1) # parser.add_argument('--show_result', action='store_true', default=False) parser.add_argument('--downsample', type=float, default=None) args = parser.parse_args() pthresh = [float(v) for v in args.pthresh.split(',')] num_src = args.view pair = load_pair(args.pair, min_views=num_src) n_views = len(pair['id_list']) views = {} for i, id in tqdm(enumerate(pair['id_list']), 'load data', n_views): image = cv2.imread(f'{args.data}/{id.zfill(8)}.jpg').transpose(2,0,1)[::-1] cam = load_cam(f'{args.data}/cam_{id.zfill(8)}_flow3.txt', 256, 1) depth = np.expand_dims(load_pfm(f'{args.data}/{id.zfill(8)}_flow3.pfm'), axis=0) probs = np.stack([load_pfm(f'{args.data}/{id.zfill(8)}_flow{k+1}_prob.pfm') for k in range(3)], axis=0) views[id] = { 'image': image, # 13hw (after next step) 'cam': cam, # 1244 'depth': depth, # 11hw 'prob': probs, # 13hw } recursive_apply(views[id], lambda arr: torch.from_numpy(np.ascontiguousarray(arr)).float().unsqueeze(0)) for i, id in tqdm(enumerate(pair['id_list']), 'prob filter', n_views): views[id]['mask'] = prob_filter(views[id]['prob'].cuda(), pthresh).cpu() # 11hw bool views[id]['depth'] *= views[id]['mask'] update = {}