def hookTrainData(self, sampleIdxs): assert len(sampleIdxs) > 0, 'we need a non-empty batch list' input_list, flow_list = [], [] for idx in sampleIdxs: img_list = self.trainList[idx] multi_input = [] multi_flow = [] for time_idx in xrange(self.time_step): imgData = cv2.imread( os.path.join(self.img_path, img_list[time_idx]), cv2.IMREAD_COLOR) multi_input.append( np.expand_dims( cv2.resize(imgData, (self.image_size[1], self.image_size[0])), 0)) # We have self.time_step images, but self.time_step - 1 flows. if time_idx != self.time_step - 1: flow = utils.readFlow( os.path.join(self.data_path, 'training', "flow", (img_list[time_idx][:-4] + ".flo"))) multi_flow.append(np.expand_dims(flow, 0)) input_list.append(np.concatenate(multi_input, axis=3)) flow_list.append(np.concatenate(multi_flow, axis=3)) return np.concatenate(input_list, axis=0), np.concatenate(flow_list, axis=0)
def hookTrainData(self, sampleIdxs): assert len(sampleIdxs) > 0, 'we need a non-empty batch list' source_list, target_list, flow_gt = [], [], [] for idx in sampleIdxs: frameID = self.trainList[idx] prev_img = frameID + "_img1.ppm" next_img = frameID + "_img2.ppm" source = cv2.imread(os.path.join(self.img_path, prev_img), cv2.IMREAD_COLOR) target = cv2.imread(os.path.join(self.img_path, next_img), cv2.IMREAD_COLOR) # print source.shape flow = utils.readFlow( os.path.join(self.img_path, (frameID + "_flow.flo"))) # print flow.shape source = cv2.resize(source, (self.image_size[1], self.image_size[0])) source_list.append(np.expand_dims(source, 0)) target = cv2.resize(target, (self.image_size[1], self.image_size[0])) target_list.append(np.expand_dims(target, 0)) flow_gt.append(np.expand_dims(flow, 0)) return np.concatenate(source_list, axis=0), np.concatenate( target_list, axis=0), np.concatenate(flow_gt, axis=0)
def hookTrainData(self, sampleIdxs): assert len(sampleIdxs) > 0, 'we need a non-empty batch list' source_list, target_list, flow_gt = [], [], [] for idx in sampleIdxs: img_pair = self.trainList[idx] prev_img = img_pair[0] next_img = img_pair[1] source = cv2.imread(os.path.join(self.img_path, prev_img), cv2.IMREAD_COLOR) target = cv2.imread(os.path.join(self.img_path, next_img), cv2.IMREAD_COLOR) flow = utils.readFlow( os.path.join(self.data_path, 'training', "flow", (prev_img[:-4] + ".flo"))) if self.is_crop: source = cv2.resize(source, (self.crop_size[1], self.crop_size[0])) target = cv2.resize(target, (self.crop_size[1], self.crop_size[0])) source_list.append(np.expand_dims(source, 0)) target_list.append(np.expand_dims(target, 0)) flow_gt.append(np.expand_dims(flow, 0)) return np.concatenate(source_list, axis=0), np.concatenate( target_list, axis=0), np.concatenate(flow_gt, axis=0)
# sess.run(init) saver.restore(sess, model_path) test_dir = './middlebury_data/Grove2' # img1 = read_img(test_dir + '/' + 'frame10.png')[0:mdl_height, 0:mdl_width, :] # img2 = read_img(test_dir + '/' + 'frame11.png')[0:mdl_height, 0:mdl_width, :] # edge = np.expand_dims(read_img(test_dir + '/' + 'frame10_edge.png')[0:mdl_height, 0:mdl_width], 2) # miss = np.expand_dims(np.mean(np.abs(img1-img2), axis=2), 2) # flow_gt = readFlow(test_dir + '/' + 'flow10.flo')[0:mdl_height, 0:mdl_width, :] img1 = read_img(test_dir + '/' + 'frame10.png') img2 = read_img(test_dir + '/' + 'frame11.png') edge = np.expand_dims(read_img(test_dir + '/' + 'frame10_edge.png'), 2) miss = np.expand_dims(np.mean(np.abs(img1 - img2), axis=2), 2) flow_gt = readFlow(test_dir + '/' + 'flow10.flo') H = img1.shape[0] W = img1.shape[1] flow = np.zeros(shape=(H, W, 2), dtype=np.float32) for i in range(int(floor(H / float(mdl_height)))): for j in range(int(floor(W / float(mdl_width)))): start_y = i * mdl_height start_x = j * mdl_width img1_tile = img1[start_y:start_y + mdl_height, start_x:start_x + mdl_width, :] img2_tile = img2[start_y:start_y + mdl_height, start_x:start_x + mdl_width, :] edge_tile = edge[start_y:start_y + mdl_height,