def extract_flow(args): from tools.infer_liteflownet import infer output_file = infer(args) flow_list = [x for x in os.listdir(output_file) if '.flo' in x] flow_start_no = min([int(x[:5]) for x in flow_list]) zero_flow = cvb.read_flow(os.path.join(output_file, flow_list[0])) cvb.write_flow(zero_flow*0, os.path.join(output_file, '%05d.rflo' % flow_start_no)) args.DATA_ROOT = output_file
def test_read_flow(): flow = cvb.read_flow(osp.join(osp.dirname(__file__), 'data/optflow.flo')) assert flow.ndim == 3 and flow.shape[-1] == 2 flow_same = cvb.read_flow(flow) assert_array_equal(flow, flow_same) flow = cvb.read_flow( osp.join(osp.dirname(__file__), 'data/optflow.jpg'), quantize=True, denorm=True) assert flow.ndim == 3 and flow.shape[-1] == 2 with pytest.raises(IOError): cvb.read_flow(osp.join(osp.dirname(__file__), 'data/color.jpg')) with pytest.raises(ValueError): cvb.read_flow(np.zeros((100, 100, 1))) with pytest.raises(TypeError): cvb.read_flow(1)
def flow(self): if self.i == -1: # initialization self.args.data_list = infer_liteflownet.generate_flow_list( self.args.frame_dir) print('====> Loading', self.args.pretrained_model_liteflownet) self.Flownet = LiteFlowNet(self.args.pretrained_model_liteflownet) self.Flownet.to(self.args.device) self.Flownet.eval() dataset_ = FlowInfer.FlowInfer(self.args.data_list, size=self.args.img_size) self.flow_dataloader = iter( DataLoader(dataset_, batch_size=1, shuffle=False, num_workers=0)) self.i += 1 complete = False with torch.no_grad(): try: f1, f2, output_path_ = next(self.flow_dataloader) f1 = f1.to(self.args.device) f2 = f2.to(self.args.device) flow = infer_liteflownet.estimate(self.Flownet, f1, f2) output_path = output_path_[0] output_file = os.path.dirname(output_path) os.makedirs(output_file, exist_ok=True) flow_numpy = flow[0].permute(1, 2, 0).data.cpu().numpy() cvb.write_flow(flow_numpy, output_path) except StopIteration: complete = True if self.i == len(self.flow_dataloader) - 1 or complete: print('LiteFlowNet Inference has been finished!') flow_list = [ x for x in os.listdir(self.args.flow_root) if '.flo' in x ] flow_start_no = min([int(x[:5]) for x in flow_list]) del self.flow_dataloader, self.Flownet zero_flow = cvb.read_flow( os.path.join(self.args.flow_root, flow_list[0])) cvb.write_flow( zero_flow * 0, os.path.join(self.args.flow_root, '%05d.rflo' % flow_start_no)) self.args.DATA_ROOT = self.args.flow_root self.i = -1 self.state += 1
def test_write_flow(): flow = np.random.rand(100, 100, 2).astype(np.float32) # write to a .flo file _, filename = tempfile.mkstemp() cvb.write_flow(flow, filename) flow_from_file = cvb.read_flow(filename) assert_array_equal(flow, flow_from_file) os.remove(filename) # write to two .jpg files tmp_dir = tempfile.gettempdir() cvb.write_flow(flow, osp.join(tmp_dir, 'test_flow.jpg'), quantize=True) assert osp.isfile(osp.join(tmp_dir, 'test_flow_dx.jpg')) assert osp.isfile(osp.join(tmp_dir, 'test_flow_dy.jpg')) os.remove(osp.join(tmp_dir, 'test_flow_dx.jpg')) os.remove(osp.join(tmp_dir, 'test_flow_dy.jpg'))
def __getitem__(self, idx): flow_dir = self.data_items[idx][0] video_class_no = self.data_items[idx][1] if self.config.get_mask: mask_dir = self.data_items[idx][2] if self.isTest: output_dirs = self.data_items[idx][-1] flow_set = [] mask_set = [] flow_mask_cat_set = [] flow_masked_set = [] if self.config.MASK_MODE == 'bbox': tmp_bbox = im.random_bbox(self.config) tmp_mask = im.bbox2mask(self.config, tmp_bbox) tmp_mask = tmp_mask[0, 0, :, :] fix_mask = np.expand_dims(tmp_mask, axis=2) elif self.config.MASK_MODE == 'mid-bbox': tmp_mask = im.mid_bbox_mask(self.config) tmp_mask = tmp_mask[0, 0, :, :] fix_mask = np.expand_dims(tmp_mask, axis=2) for i in range(11): tmp_flow = cvb.read_flow(flow_dir[i]) if self.config.get_mask: tmp_mask = cv2.imread(mask_dir[i], cv2.IMREAD_UNCHANGED) tmp_mask = self._mask_tf(tmp_mask) else: if self.config.FIX_MASK: tmp_mask = fix_mask.copy() else: tmp_bbox = im.random_bbox(self.config) tmp_mask = im.bbox2mask(self.config, tmp_bbox) tmp_mask = tmp_mask[0, 0, :, :] tmp_mask = np.expand_dims(tmp_mask, axis=2) tmp_flow = self._flow_tf(tmp_flow) tmp_flow_masked = tmp_flow * (1. - tmp_mask) if self.config.INITIAL_HOLE: tmp_flow_resized = cv2.resize(tmp_flow, (self.size[1] // 2, self.size[0] // 2)) tmp_mask_resized = cv2.resize(tmp_mask, (self.size[1] // 2, self.size[0] // 2), cv2.INTER_NEAREST) tmp_flow_masked_small = tmp_flow_resized tmp_flow_masked_small[:, :, 0] = rf.regionfill(tmp_flow_resized[:, :, 0], tmp_mask_resized) tmp_flow_masked_small[:, :, 1] = rf.regionfill(tmp_flow_resized[:, :, 1], tmp_mask_resized) tmp_flow_masked = tmp_flow_masked + \ tmp_mask * cv2.resize(tmp_flow_masked_small, (self.size[1], self.size[0])) flow_masked_set.append(tmp_flow_masked) flow_set.append(tmp_flow) mask_set.append(tmp_mask) mask_set.append(tmp_mask) tmp_flow_mask_cat = np.concatenate((tmp_flow_masked, tmp_mask), axis=2) flow_mask_cat_set.append(tmp_flow_mask_cat) flow_mask_cat = np.concatenate(flow_mask_cat_set, axis=2) flow_masked = np.concatenate(flow_masked_set, axis=2) gt_flow = np.concatenate(flow_set, axis=2) mask = np.concatenate(mask_set, axis=2) flow_mask_cat = torch.from_numpy(flow_mask_cat).permute(2, 0, 1).contiguous().float() flow_masked = torch.from_numpy(flow_masked).permute(2, 0, 1).contiguous().float() gt_flow = torch.from_numpy(gt_flow).permute(2, 0, 1).contiguous().float() mask = torch.from_numpy(mask).permute(2, 0, 1).contiguous().float() if self.isTest: return flow_mask_cat, flow_masked, gt_flow, mask, output_dirs return flow_mask_cat, flow_masked, gt_flow, mask
def __getitem__(self, idx): flow_dir = self.data_items[idx][0] video_class_no = self.data_items[idx][1] gt_dir = self.data_items[idx][2] if self.config.get_mask: mask_dirs = self.data_items[idx][3] if self.isTest: output_dirs = self.data_items[idx][-1] mask_set = [] flow_mask_cat_set = [] flow_masked_set = [] gt_flow_set = [] gt_dir_set = [gt_dir[5], gt_dir[16]] for p in gt_dir_set: tmp_flow = cvb.read_flow(p) tmp_flow = self._flow_tf(tmp_flow) gt_flow_set.append(tmp_flow) if self.config.MASK_MODE == 'bbox': tmp_bbox = im.random_bbox(self.config) tmp_mask = im.bbox2mask(self.config, tmp_bbox) tmp_mask = tmp_mask[0, 0, :, :] fix_mask = np.expand_dims(tmp_mask, axis=2) elif self.config.MASK_MODE == 'mid-bbox': tmp_mask = im.mid_bbox_mask(self.config) tmp_mask = tmp_mask[0, 0, :, :] fix_mask = np.expand_dims(tmp_mask, axis=2) f_flow_dir = flow_dir[:11] r_flow_dir = flow_dir[11:] for i in range(11): tmp_flow = cvb.read_flow(f_flow_dir[i]) if self.config.get_mask: tmp_mask = cv2.imread(mask_dirs[i], cv2.IMREAD_UNCHANGED) tmp_mask = self._mask_tf(tmp_mask) else: if self.config.FIX_MASK: tmp_mask = fix_mask.copy() else: tmp_bbox = im.random_bbox(self.config) tmp_mask = im.bbox2mask(self.config, tmp_bbox) tmp_mask = tmp_mask[0, 0, :, :] tmp_mask = np.expand_dims(tmp_mask, axis=2) tmp_flow = self._flow_tf(tmp_flow) tmp_flow_masked = tmp_flow flow_masked_set.append(tmp_flow_masked) mask_set.append(tmp_mask) mask_set.append(tmp_mask) tmp_flow_mask_cat = np.concatenate((tmp_flow_masked, tmp_mask), axis=2) flow_mask_cat_set.append(tmp_flow_mask_cat) for i in range(11): tmp_flow = cvb.read_flow(r_flow_dir[i]) tmp_flow = self._flow_tf(tmp_flow) if self.config.get_mask: tmp_mask = cv2.imread(mask_dirs[i + 11], cv2.IMREAD_UNCHANGED) tmp_mask = self._mask_tf(tmp_mask) else: if self.config.FIX_MASK: tmp_mask = fix_mask.copy() else: tmp_bbox = im.random_bbox(self.config) tmp_mask = im.bbox2mask(self.config, tmp_bbox) tmp_mask = tmp_mask[0, 0, :, :] tmp_mask = np.expand_dims(tmp_mask, axis=2) tmp_flow_masked = tmp_flow flow_masked_set.append(tmp_flow_masked) mask_set.append(tmp_mask) mask_set.append(tmp_mask) tmp_flow_mask_cat = np.concatenate((tmp_flow_masked, tmp_mask), axis=2) flow_mask_cat_set.append(tmp_flow_mask_cat) flow_mask_cat = np.concatenate(flow_mask_cat_set, axis=2) flow_masked = np.concatenate(flow_masked_set, axis=2) gt_flow = np.concatenate(gt_flow_set, axis=2) mask = np.concatenate(mask_set, axis=2) flow_mask_cat = torch.from_numpy(flow_mask_cat).permute( 2, 0, 1).contiguous().float() flow_masked = torch.from_numpy(flow_masked).permute( 2, 0, 1).contiguous().float() gt_flow = torch.from_numpy(gt_flow).permute(2, 0, 1).contiguous().float() mask = torch.from_numpy(mask).permute(2, 0, 1).contiguous().float() if self.isTest: return flow_mask_cat, flow_masked, gt_flow, mask, output_dirs else: return flow_mask_cat, flow_masked, gt_flow, mask
#!/usr/bin/env python import cvbase as cvb import glob import argparse import os import matplotlib.pyplot as plt parser = argparse.ArgumentParser() parser.add_argument('arg1', type=str, help='Input dir') args = parser.parse_args() path = args.arg1 outdir = os.path.split(path) print(outdir) outdir = os.path.join(outdir[0], outdir[1] + '_vis') if not os.path.exists(outdir): os.mkdir(outdir) flos = glob.glob(os.path.join(path, '*flo')) print(len(flos), 'flo files found.') for flo in flos: flow = cvb.read_flow(flo) img = cvb.flow2rgb(flow) plt.imsave(os.path.join(outdir, os.path.basename(flo) + '.png'), img)
# convert .flo to rgb and visualize import numpy as np import cvbase as cvb from skimage.io import imsave flow_name = 'out.flo' flow = cvb.read_flow(flow_name) a = cvb.flow2rgb(flow) imsave('a.png', a)