def __getitem__(self, idx): rgb_data = cv2.imread(self.rgb_list[idx]) #sparse_depth_data = cv2.imread(self.sparse_depth_list[idx], 0) #continuous_depth_data = cv2.imread(self.continuous_depth_list[idx], 0) #ground_data = cv2.imread(self.ground_list[idx], 0) sparse_depth_data = depth_read(self.sparse_depth_list[idx]) continuous_depth_data = depth_read(self.continuous_depth_list[idx]) ground_data = depth_read(self.ground_list[idx]) identity = self.identity[idx] return (rgb_data, sparse_depth_data, continuous_depth_data, ground_data, identity)
def __getitem__(self, idx): img_left = Image.open(self.left_img_list[idx]) img_right = Image.open(self.right_img_list[idx]) if self.data_transform: img_left = self.data_transform(img_left) img_right = self.data_transform(img_right) if self.use_depth: depth_left = depth_read(self.left_depth_list[idx]) if self.depth_transform: depth_left = self.depth_transform(depth_left) sample = { 'img_left': img_left, 'img_right': img_right, 'depth_left': depth_left } else: sample = { 'img_left': img_left, 'img_right': img_right, 'img_left_dir': self.left_img_list[idx] } return sample
def showSampleDepth(self): NEXT_DIR = "2011_09_26_drive_0001_sync\proj_depth\groundtruth\image_02/" #imgDir = self.TRAIN_DEPTH_DIR + NEXT_DIR + "0000000082.png"; #testImg = rd.depth_read(imgDir) #plt.imshow(testImg) #plt.show() folders = self.getSubDirectories(self.TRAIN_DEPTH_DIR) for f in folders: SUB_DIR = "/proj_depth/groundtruth/image_02/" images = os.listdir(self.TRAIN_DEPTH_DIR + f + SUB_DIR) for i in range(len(images)): img = rd.depth_read(self.TRAIN_DEPTH_DIR + f + SUB_DIR + images[i]) plt.imshow(img) plt.show()
def get_batch( self, folder_list, fr_num, ): rgb_set = [] opf_set = [] dep_set = [] for b in range(opt.bs): video = folder_list[b].split()[0].split('image') video_num = int(folder_list[b].split()[1]) rgb_path = os.path.join(self.rgb_path, video[0], 'image' + video[1], 'data') opf_path = os.path.join(self.opf_path, video[0], 'image' + video[1], 'data') dep_path = os.path.join(self.dep_path, video[0], 'proj_depth/groundtruth/image' + video[1]) rgb = os.path.join( rgb_path, '%010d.png' % (opt.video_split * video_num + fr_num[b] + 5)) rgb_data = scipy.misc.imread(rgb) rgb_set.append(rgb_data) opf = os.path.join( opf_path, '%010d.flo' % (opt.video_split * video_num + fr_num[b] + 5)) opf_data = readFLO(opf) opf_set.append(opf_data[:, :, 0:2]) dep = os.path.join( dep_path, '%010d.png' % (opt.video_split * video_num + fr_num[b] + 5)) dep_data = depth_read(dep) dep_set.append(dep_data) return rgb_set, opf_set, dep_set
load_path = './results/flonexwGRU/' save_path = './results/flowGRU_median/' for i in range(len(test_list)): print(i) #< ----------------------- Data Path ----------------------- ># full = test_list[i].split()[0] split = test_list[i].split()[0].split('/') pred_path = os.path.join(load_path, split[0], split[1], '%s.png' % split[4].split('.')[0]) dep_path = os.path.join(DEP_path, split[0], split[1], '%s.png' % split[4].split('.')[0]) #< ----------------------- Data Load & Normalization ----------------------- ># out = depth_read(pred_path) dep_data = depth_read(dep_path) #< ----------------------- Median Scaling ----------------------- ># n = np.sum(dep_data != -1) out[dep_data < 0] = np.nan dep_data[dep_data < 0] = np.nan net_out = depth_read(pred_path) scale = np.nanmedian(dep_data) / np.nanmedian(out) scaled = net_out * scale scaled[scaled <= 1e-3] = 1e-3 scaled[scaled >= 80] = 80 #< ----------------------- Save ----------------------- >#
images = os.listdir(x_data_path) depths = os.listdir(y_data_path) print(len(images)) print(len(depths)) # load images and depth maps into arrays for im_name in images: im = x_data_path + im_name img = cv.imread(im,cv.IMREAD_GRAYSCALE) if img is not None: img = cv.resize(img, dsize=(284,75)) training_images.append(img) for depth in depths: dm = x_data_path + depth depth_map = rd.depth_read(dm) if depth_map is not None: depth_map = cv.resize(depth_map, dsize=(284,75)) training_depths.append(depth_map) input_shape = img.shape output_shape = depth_map.shape output_size = output_shape[0]*output_shape[1] training_images = (np.asarray(training_images))/255 training_depths = np.asarray(training_depths) # Using train_test_split for early debugging; will use specific driving scenes as test set eventually train_images, test_images, train_depths, test_depths = train_test_split(training_images, training_depths, test_size=0.2) train_images = train_images.reshape((len(train_images),284,75,1))