def __init__(self, batchSize=1): tf.reset_default_graph() self.img_inp = tf.placeholder(tf.float32, shape=[batchSize, HEIGHT, WIDTH, 3], name='image') training_flag = tf.constant(False, tf.bool) self.options = parse_args() self.global_pred_dict, _, _ = build_graph(self.img_inp, self.img_inp, training_flag, self.options) var_to_restore = tf.global_variables() config = tf.ConfigProto() config.gpu_options.allow_growth = True config.allow_soft_placement = True init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer()) self.sess = tf.Session(config=config) self.sess.run(init_op) loader = tf.train.Saver(var_to_restore) path = os.path.dirname(os.path.realpath(__file__)) checkpoint_dir = path + '/checkpoint/sample_np10_hybrid3_bl0_dl0_ds0_crfrnn5_sm0' loader.restore(self.sess, "%s/checkpoint.ckpt" % (checkpoint_dir)) return
def getPredictionCustom(options): tf.reset_default_graph() options.batchSize = 1 img_inp = tf.placeholder(tf.float32, shape=[1, HEIGHT, WIDTH, 3], name='image') training_flag = tf.constant(False, tf.bool) options.gpu_id = 0 global_pred_dict, local_pred_dict, deep_pred_dicts = build_graph( img_inp, img_inp, training_flag, options) var_to_restore = tf.global_variables() config = tf.ConfigProto() config.gpu_options.allow_growth = True config.allow_soft_placement = True init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer()) width_high_res = 640 height_high_res = 480 #image_list = glob.glob('../my_images/*.jpg') + glob.glob('../my_images/*.png') + glob.glob('../my_images/*.JPG') #image_list = glob.glob('../my_images/TV/*.jpg') + glob.glob('../my_images/TV/*.png') + glob.glob('../my_images/TV/*.JPG') #image_list = glob.glob('../my_images/TV/*.jpg') + glob.glob('../my_images/TV/*.png') + glob.glob('../my_images/TV/*.JPG') image_list = glob.glob(options.customImageFolder + '/*.jpg') + glob.glob( options.customImageFolder + '/*.png') + glob.glob(options.customImageFolder + '/*.JPG') options.visualizeImages = min(options.visualizeImages, len(image_list)) pred_dict = {} with tf.Session(config=config) as sess: sess.run(init_op) #var_to_restore = [v for v in var_to_restore if 'res4b22_relu_non_plane' not in v.name] loader = tf.train.Saver(var_to_restore) loader.restore(sess, "%s/checkpoint.ckpt" % (options.checkpoint_dir)) #loader.restore(sess, options.fineTuningCheckpoint) coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess=sess, coord=coord) try: predDepths = [] predPlanes = [] predSegmentations = [] predSemantics = [] predNonPlaneDepths = [] predNonPlaneNormals = [] predNonPlaneMasks = [] predBoundaries = [] images = [] infos = [] for index in xrange( min(options.startIndex + options.numImages, len(image_list))): if index % 10 == 0: print(('image', index)) pass t0 = time.time() print(('image', index)) img_ori = cv2.imread(image_list[index]) images.append(img_ori) img = cv2.resize(img_ori, (WIDTH, HEIGHT)) img = img.astype(np.float32) / 255 - 0.5 img = np.expand_dims(img, 0) global_pred = sess.run(global_pred_dict, feed_dict={img_inp: img}) if index < options.startIndex: continue pred_p = global_pred['plane'][0] pred_s = global_pred['segmentation'][0] pred_np_m = global_pred['non_plane_mask'][0] pred_np_d = global_pred['non_plane_depth'][0] pred_np_n = global_pred['non_plane_normal'][0] #if global_gt['info'][0][19] > 1 and global_gt['info'][0][19] < 4 and False: #pred_np_n = calcNormal(pred_np_d.squeeze(), global_gt['info'][0]) #pass #pred_b = global_pred['boundary'][0] predNonPlaneMasks.append(pred_np_m) predNonPlaneDepths.append(pred_np_d) predNonPlaneNormals.append(pred_np_n) #predBoundaries.append(pred_b) all_segmentations = np.concatenate([pred_s, pred_np_m], axis=2) info = np.zeros(20) if options.estimateFocalLength: focalLength = estimateFocalLength(img_ori) info[0] = focalLength info[5] = focalLength info[2] = img_ori.shape[1] / 2 info[6] = img_ori.shape[0] / 2 info[16] = img_ori.shape[1] info[17] = img_ori.shape[0] info[10] = 1 info[15] = 1 info[18] = 1000 info[19] = 5 else: info[0] = 2800.71 info[2] = 1634.45 info[5] = 2814.01 info[6] = 1224.18 info[16] = img_ori.shape[1] info[17] = img_ori.shape[0] info[10] = 1 info[15] = 1 info[18] = 1000 info[19] = 5 pass # print(focalLength) # cv2.imwrite('test/image.png', ((img[0] + 0.5) * 255).astype(np.uint8)) # cv2.imwrite('test/segmentation.png', drawSegmentationImage(pred_s, blackIndex=options.numOutputPlanes)) # exit(1) infos.append(info) width_high_res = img_ori.shape[1] height_high_res = img_ori.shape[0] plane_depths = calcPlaneDepths(pred_p, width_high_res, height_high_res, info) pred_np_d = np.expand_dims( cv2.resize(pred_np_d.squeeze(), (width_high_res, height_high_res)), -1) all_depths = np.concatenate([plane_depths, pred_np_d], axis=2) all_segmentations = np.stack([ cv2.resize(all_segmentations[:, :, planeIndex], (width_high_res, height_high_res)) for planeIndex in xrange(all_segmentations.shape[-1]) ], axis=2) segmentation = np.argmax(all_segmentations, 2) pred_d = all_depths.reshape( -1, options.numOutputPlanes + 1)[np.arange(height_high_res * width_high_res), segmentation.reshape(-1)].reshape( height_high_res, width_high_res) if 'semantics' in global_pred: #cv2.imwrite('test/semantics.png', drawSegmentationImage(np.argmax(global_pred['semantics'][0], axis=-1))) #exit(1) predSemantics.append( np.argmax(global_pred['semantics'][0], axis=-1)) else: predSemantics.append(np.zeros((HEIGHT, WIDTH))) pass predDepths.append(pred_d) predPlanes.append(pred_p) predSegmentations.append(all_segmentations) continue pred_dict['plane'] = np.array(predPlanes) pred_dict['segmentation'] = np.array(predSegmentations) pred_dict['depth'] = np.array(predDepths) #pred_dict['semantics'] = np.array(predSemantics) pred_dict['np_depth'] = np.array(predNonPlaneDepths) #pred_dict['np_normal'] = np.array(predNonPlaneNormals) pred_dict['np_mask'] = np.array(predNonPlaneMasks) pred_dict['image'] = np.array(images) pred_dict['info'] = np.array(infos) #pred_dict['boundary'] = np.array(predBoundaries) pass except tf.errors.OutOfRangeError: print('Done training -- epoch limit reached') finally: # When done, ask the threads to stop. coord.request_stop() pass # Wait for threads to finish. coord.join(threads) sess.close() pass return pred_dict
def getPredictionScanNet(options): tf.reset_default_graph() options.batchSize = 1 min_after_dequeue = 1000 reader = RecordReaderAll() if options.dataset == 'SUNCG': filename_queue = tf.train.string_input_producer( [options.dataFolder + '/planes_SUNCG_val.tfrecords'], num_epochs=10000) elif options.dataset == 'NYU_RGBD': filename_queue = tf.train.string_input_producer( [options.dataFolder + '/planes_nyu_rgbd_val.tfrecords'], num_epochs=1) options.deepSupervision = 0 options.predictLocal = 0 elif options.dataset == 'matterport': filename_queue = tf.train.string_input_producer( [options.dataFolder + '/planes_matterport_val.tfrecords'], num_epochs=1) else: filename_queue = tf.train.string_input_producer( [options.dataFolder + '/planes_scannet_val.tfrecords'], num_epochs=1) pass img_inp, global_gt_dict, local_gt_dict = reader.getBatch( filename_queue, numOutputPlanes=options.numOutputPlanes, batchSize=options.batchSize, min_after_dequeue=min_after_dequeue, getLocal=True, random=False) training_flag = tf.constant(False, tf.bool) options.gpu_id = 0 global_pred_dict, local_pred_dict, deep_pred_dicts = build_graph( img_inp, img_inp, training_flag, options) var_to_restore = tf.global_variables() config = tf.ConfigProto() config.gpu_options.allow_growth = True config.allow_soft_placement = True init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer()) width_high_res = 640 height_high_res = 480 pred_dict = {} with tf.Session(config=config) as sess: sess.run(init_op) #var_to_restore = [v for v in var_to_restore if 'res4b22_relu_non_plane' not in v.name] loader = tf.train.Saver(var_to_restore) loader.restore(sess, "%s/checkpoint.ckpt" % (options.checkpoint_dir)) #loader.restore(sess, options.fineTuningCheckpoint) coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess=sess, coord=coord) try: predDepths = [] predPlanes = [] predSegmentations = [] predNonPlaneDepths = [] predNonPlaneNormals = [] predNonPlaneMasks = [] images = [] infos = [] for index in xrange(options.startIndex + options.numImages): if index % 10 == 0: print(('image', index)) pass t0 = time.time() img, global_gt, global_pred = sess.run( [img_inp, global_gt_dict, global_pred_dict]) if index < options.startIndex: continue image = cv2.resize(((img[0] + 0.5) * 255).astype(np.uint8), (width_high_res, height_high_res)) images.append(image) infos.append(global_gt['info'][0]) pred_p = global_pred['plane'][0] pred_s = global_pred['segmentation'][0] pred_np_m = global_pred['non_plane_mask'][0] pred_np_d = global_pred['non_plane_depth'][0] pred_np_n = global_pred['non_plane_normal'][0] if global_gt['info'][0][19] > 1 and global_gt['info'][0][ 19] < 4 and False: pred_np_n = calcNormal(pred_np_d.squeeze(), global_gt['info'][0]) pass #pred_b = global_pred['boundary'][0] predNonPlaneMasks.append(pred_np_m) predNonPlaneDepths.append(pred_np_d) predNonPlaneNormals.append(pred_np_n) #predBoundaries.append(pred_b) all_segmentations = np.concatenate([pred_s, pred_np_m], axis=2) plane_depths = calcPlaneDepths(pred_p, width_high_res, height_high_res, global_gt['info'][0]) pred_np_d = np.expand_dims( cv2.resize(pred_np_d.squeeze(), (width_high_res, height_high_res)), -1) all_depths = np.concatenate([plane_depths, pred_np_d], axis=2) all_segmentations = np.stack([ cv2.resize(all_segmentations[:, :, planeIndex], (width_high_res, height_high_res)) for planeIndex in xrange(all_segmentations.shape[-1]) ], axis=2) segmentation = np.argmax(all_segmentations, 2) pred_d = all_depths.reshape( -1, options.numOutputPlanes + 1)[np.arange(height_high_res * width_high_res), segmentation.reshape(-1)].reshape( height_high_res, width_high_res) predDepths.append(pred_d) predPlanes.append(pred_p) predSegmentations.append(all_segmentations) continue pred_dict['plane'] = np.array(predPlanes) pred_dict['segmentation'] = np.array(predSegmentations) pred_dict['depth'] = np.array(predDepths) #pred_dict['semantics'] = np.array(predSemantics) pred_dict['np_depth'] = np.array(predNonPlaneDepths) #pred_dict['np_normal'] = np.array(predNonPlaneNormals) pred_dict['np_mask'] = np.array(predNonPlaneMasks) pred_dict['image'] = np.array(images) pred_dict['info'] = np.array(infos) pass except tf.errors.OutOfRangeError: print('Done training -- epoch limit reached') finally: # When done, ask the threads to stop. coord.request_stop() pass # Wait for threads to finish. coord.join(threads) sess.close() pass return pred_dict
def getPrediction(options, layout_planes): print(options.test_dir) if not os.path.exists(options.test_dir): os.system("mkdir -p %s"%options.test_dir) pass indices, room_layouts = getGroundTruth(options) #image_list = glob.glob('/home/chenliu/Projects/Data/LSUN/images/*.jpg') #image_list = glob.glob('/mnt/vision/NYU_RGBD/images/*.png') if options.dataset == 'NYU_RGBD': image_list = ['/mnt/vision/NYU_RGBD/images/' + ('%08d' % (image_index + 1)) + '.png' for image_index in indices] else: image_list = [filename.replace('RoomLayout_Hedau', 'RoomLayout_Hedau/Images').replace('_labels.mat', '.jpg') for filename in indices] #image_list = glob.glob('/mnt/vision/RoomLayout_Hedau/Images/*.png') + glob.glob('/mnt/vision/RoomLayout_Hedau/Images/*.jpg') pass #print(len(image_list)) #exit(1) options.numImages = min(options.numImages, len(image_list)) tf.reset_default_graph() training_flag = tf.constant(False, tf.bool) img_inp = tf.placeholder(tf.float32, shape=[1, HEIGHT, WIDTH, 3], name='image') options.gpu_id = 0 if 'sample' or 'hybrid_' in options.checkpoint_dir: global_pred_dict, local_pred_dict, deep_pred_dicts = build_graph_sample(img_inp, img_inp, training_flag, options) else: global_pred_dict, local_pred_dict, deep_pred_dicts = build_graph(img_inp, img_inp, training_flag, options) pass var_to_restore = tf.global_variables() config=tf.ConfigProto() config.gpu_options.allow_growth=True config.allow_soft_placement=True init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer()) info = np.zeros(20) info[0] = 5.1885790117450188e+02 info[2] = 3.2558244941119034e+02 info[5] = 5.1946961112127485e+02 info[6] = 2.5373616633400465e+02 info[10] = 1 info[15] = 1 info[16] = 640 info[17] = 480 info[18] = 1000 info[19] = 1 pred_dict = {} print(np.concatenate([np.expand_dims(np.arange(22), 1), ColorPalette(22).getColorMap()], axis=1)) planeAreaThresholds = [WIDTH * HEIGHT / 400, WIDTH * HEIGHT / 400, WIDTH * HEIGHT / 400] dotThreshold = np.cos(np.deg2rad(60)) width_high_res = 640 height_high_res = 480 with tf.Session(config=config) as sess: sess.run(init_op) #var_to_restore = [v for v in var_to_restore if 'res4b22_relu_non_plane' not in v.name] loader = tf.train.Saver(var_to_restore) loader.restore(sess, "%s/checkpoint.ckpt"%(options.checkpoint_dir)) #loader.restore(sess, options.fineTuningCheckpoint) coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess=sess, coord=coord) try: total_accuracy = 0 predSegmentations = [] predPlaneDepths = [] predAllSegmentations = [] predNormals = [] for index in xrange(options.startIndex + options.numImages): if index < options.startIndex: continue if options.imageIndex >= 0 and index != options.imageIndex: continue if index % 10 == 0: print(('image', index)) pass # print(image_list[index]) # import PIL.Image # img = PIL.Image.open(image_list[index]) # print(img._getexif()) # print(img.shape) # exit(1) #print(image_list[index]) img_ori = cv2.imread(image_list[index]) img = cv2.resize(img_ori, (WIDTH, HEIGHT)) img = img.astype(np.float32) / 255 - 0.5 t0=time.time() global_pred = sess.run(global_pred_dict, feed_dict={img_inp: np.expand_dims(img, 0)}) pred_p = global_pred['plane'][0] pred_s = global_pred['segmentation'][0] pred_np_m = global_pred['non_plane_mask'][0] pred_np_d = global_pred['non_plane_depth'][0] pred_np_n = global_pred['non_plane_normal'][0] pred_b = global_pred['boundary'][0] if options.dataset != 'NYU_RGBD': info = np.zeros(info.shape) focalLength = estimateFocalLength(img_ori) info[0] = focalLength info[5] = focalLength info[2] = img_ori.shape[1] / 2 info[6] = img_ori.shape[0] / 2 info[16] = img_ori.shape[1] info[17] = img_ori.shape[0] info[10] = 1 info[15] = 1 info[18] = 1000 info[19] = 5 width_high_res = img_ori.shape[1] height_high_res = img_ori.shape[0] pass #all_segmentations = np.concatenate([pred_s, pred_np_m], axis=2) all_segmentations = pred_s plane_depths = calcPlaneDepths(pred_p, width_high_res, height_high_res, info) all_segmentations = softmax(all_segmentations) #segmentation = np.argmax(all_segmentations[:, :, :pred_s.shape[-1]], 2) segmentation = np.argmax(all_segmentations, 2) planeNormals = pred_p / np.maximum(np.linalg.norm(pred_p, axis=-1, keepdims=True), 1e-4) predSegmentations.append(segmentation) predPlaneDepths.append(plane_depths) predAllSegmentations.append(all_segmentations) predNormals.append(planeNormals) continue #print(pred_p) if True: #all_depths = np.concatenate([plane_depths, np.expand_dims(cv2.resize(pred_np_d.squeeze(), (width_high_res, height_high_res)), -1)], axis=2) #pred_d = all_depths.reshape(-1, options.numOutputPlanes + 1)[np.arange(WIDTH * HEIGHT), segmentation.reshape(-1)].reshape(HEIGHT, WIDTH) #cv2.imwrite(options.test_dir + '/' + str(index) + '_depth_pred.png', drawDepthImage(pred_d)) if options.imageIndex >= 0: for planeIndex in xrange(options.numOutputPlanes): cv2.imwrite(options.test_dir + '/mask_' + str(planeIndex) + '.png', drawMaskImage(all_segmentations[:, :, planeIndex])) #cv2.imwrite(options.test_dir + '/mask_' + str(planeIndex) + '_depth.png', drawDepthImage(plane_depths[:, :, planeIndex])) continue pass cv2.imwrite(options.test_dir + '/' + str(index) + '_segmentation_pred.png', drawSegmentationImage(all_segmentations[:, :, :options.numOutputPlanes], blackIndex=options.numOutputPlanes)) cv2.imwrite(options.test_dir + '/' + str(index) + '_image.png', img_ori) layout_plane_inds = [] for layoutIndex, planeInds in enumerate(layout_planes[:2]): maxArea = 0 for planeIndex in planeInds: area = (all_segmentations[:, :, planeIndex]).sum() #area = (segmentation == planeIndex).sum() if area > maxArea: layout_plane_index = planeIndex maxArea = area pass continue if maxArea > planeAreaThresholds[layoutIndex]: layout_plane_inds.append(layout_plane_index) else: layout_plane_inds.append(-1) pass continue # wallPlanes = [] # for planeIndex in layout_planes[2]: # area = (all_segmentations[:, :, planeIndex]).sum() # #area = (segmentation == planeIndex).sum() # if area > planeAreaThresholds[2]: # wallPlanes.append([planeIndex, area]) # pass # #print(planeIndex, area) # continue # if options.imageIndex >= 0: # print(wallPlanes) # pass # wallPlanes = sorted(wallPlanes, key=lambda x: -x[1]) # while True: # hasChange = False # for wallPlaneIndex, wallPlane in enumerate(wallPlanes): # newWallPlanes = [] # for otherWallPlane in wallPlanes[wallPlaneIndex + 1:]: # if np.dot(planeNormals[otherWallPlane[0]], planeNormals[wallPlane[0]]) > dotThreshold: # if options.imageIndex >= 0: # print('invalid', wallPlane, otherWallPlane) # pass # hasChange = True # continue # newWallPlanes.append(otherWallPlane) # continue # if hasChange: # wallPlanes = wallPlanes[:wallPlaneIndex + 1] + newWallPlanes # break # continue # if not hasChange: # break # continue # if options.imageIndex >= 0: # print(wallPlanes) # print(all_segmentations.sum(axis=(0, 1))) # print(wallPlanes) # print(planeNormals) # pass # if len(wallPlanes) > 3: # wallPlanes = wallPlanes[:3] # pass # angleWallPlanes = [] # for wallPlane in wallPlanes: # planeNormal = planeNormals[wallPlane[0]] # angle = np.rad2deg(np.arctan2(planeNormal[1], planeNormal[0])) # angleWallPlanes.append((angle, wallPlane)) # #direction = min(max(int(angle / 45), 0), 3) # #directionPlaneMask[direction] = wallPlane[0] # continue # walls = [-1, -1, -1] # minAngleDiff = 90 # for angle, wallPlane in angleWallPlanes: # if abs(angle - 90) < minAngleDiff: # walls[1] = wallPlane[0] # minAngleDiff = abs(angle - 90) # middleAngle = angle # pass # continue # if walls[1] >= 0: # maxScore = 0 # for angle, wallPlane in angleWallPlanes: # if angle > middleAngle + 1e-4: # if wallPlane[1] > maxScore: # walls[0] = wallPlane[0] # maxScore = wallPlane[1] # pass # pass # continue # maxScore = 0 # for angle, wallPlane in angleWallPlanes: # if angle < middleAngle - 1e-4: # if wallPlane[1] > maxScore: # walls[2] = wallPlane[0] # maxScore = wallPlane[1] # pass # pass # continue # pass walls = [] for planeIndex in layout_planes[2]: area = (all_segmentations[:, :, planeIndex]).sum() #area = (segmentation == planeIndex).sum() if area > planeAreaThresholds[2]: walls.append(planeIndex) pass #print(planeIndex, area) continue best_layout_plane_inds = layout_plane_inds + walls bestScore = 0 for numWalls in xrange(1, min(len(walls), 3) + 1): for selectedWalls in itertools.combinations(walls, numWalls): selected_plane_inds = np.array(layout_plane_inds + list(selectedWalls)) depths = [] for wall in selected_plane_inds: depths.append(plane_depths[:, :, wall]) continue depths.append(np.full((height, width), 10)) depths = np.stack(depths, axis=2) selected_plane_segmentation = np.argmin(depths, 2) emptyMask = selected_plane_segmentation == depths.shape[-1] - 1 selected_plane_segmentation = selected_plane_inds[np.minimum(selected_plane_segmentation.reshape(-1), selected_plane_inds.shape[0] - 1)].reshape(selected_plane_segmentation.shape) selected_plane_segmentation[emptyMask] = -1 #overlap = (selected_plane_segmentation == segmentation).sum() overlap = 0 for planeIndex in xrange(options.numOutputPlanes): overlap += segmentations[:, :, planeIndex][selected_plane_segmentation == planeIndex].sum() continue if overlap > bestScore: best_layout_plane_inds = selected_plane_inds bestScore = overlap pass continue continue layout_plane_inds = best_layout_plane_inds layout_plane_depths = [] for planeIndex in layout_plane_inds: if planeIndex >= 0: layout_plane_depths.append(plane_depths[:, :, planeIndex]) else: layout_plane_depths.append(np.ones((height_high_res, width_high_res)) * 10) pass continue # walls = [-1, -1, -1] # if directionPlaneMask[0] >= 0: # if directionPlaneMask[1] >= 0: # if directionPlaneMask[2] >= 0: # walls = [directionPlaneMask[0], directionPlaneMask[1], directionPlaneMask[2]] # elif directionPlaneMask[3] >= 0: # walls = [directionPlaneMask[0], directionPlaneMask[1], directionPlaneMask[3]] # else: # walls = [directionPlaneMask[0], directionPlaneMask[1], -1] # pass # else: # if directionPlaneMask[2] >= 0: # if directionPlaneMask[3] >= 0: # walls = [directionPlaneMask[0], directionPlaneMask[2], directionPlaneMask[3]] # else: # walls = [directionPlaneMask[0], directionPlaneMask[2], -1] # pass # else: # if directionPlaneMask[3] >= 0: # walls = [directionPlaneMask[0], -1, directionPlaneMask[3]] # else: # walls = [directionPlaneMask[0], -1, -1] # pass layout_plane_depths = np.stack(layout_plane_depths, axis=2) #print(layout_plane_depths.shape) #print(np.argmin(layout_plane_depths, axis=-1).shape) layout_pred = np.argmin(layout_plane_depths, axis=-1) + 1 layout_gt = room_layouts[index] layout_pred_img = drawSegmentationImage(layout_pred) #cv2.imwrite(options.test_dir + '/' + str(index) + '_layout_pred.png', layout_pred_img) #cv2.imwrite(options.test_dir + '/' + str(index) + '_layout_pred.png', img_ori / 2 + layout_pred_img / 2) layout_plane_inds = np.array(layout_plane_inds) layout_segmentation_img = layout_plane_inds[layout_pred.reshape(-1) - 1].reshape(layout_pred.shape) layout_segmentation_img[layout_segmentation_img == -1] = options.numOutputPlanes cv2.imwrite(options.test_dir + '/' + str(index) + '_layout_pred.png', drawSegmentationImage(layout_segmentation_img, blackIndex=options.numOutputPlanes)) cv2.imwrite(options.test_dir + '/' + str(index) + '_layout_gt.png', drawSegmentationImage(layout_gt, blackIndex=0)) pred_d = plane_depths.reshape(-1, options.numOutputPlanes)[np.arange(width_high_res * height_high_res), cv2.resize(segmentation, (width_high_res, height_high_res), interpolation=cv2.INTER_NEAREST).reshape(-1)].reshape(height_high_res, width_high_res) cv2.imwrite(options.test_dir + '/' + str(index) + '_depth_pred.png', drawDepthImage(pred_d)) #continue # numWalls = 0 # for wall in walls: # if wall >= 0: # numWalls += 1 # pass # continue numWalls = layout_plane_inds.shape[0] - 2 if numWalls == 2: gtMiddleWallMask = layout_gt == 4 leftWallScore = np.logical_and(layout_pred == 3, gtMiddleWallMask).sum() middleWallScore = np.logical_and(layout_pred == 4, gtMiddleWallMask).sum() rightWallScore = np.logical_and(layout_pred == 5, gtMiddleWallMask).sum() if leftWallScore > middleWallScore: layout_pred[layout_pred >= 3] += 1 pass if rightWallScore > middleWallScore: layout_pred[layout_pred >= 3] -= 1 pass pass if numWalls == 1: layout_pred[layout_pred == 3] += 1 pass # leftWallMask = layout_gt == 3 # middleWallMask = layout_gt == 4 # rightWallMask = layout_gt == 5 # if leftWallMask.sum() > middleWallMask.sum() and rightWallMask.sum() == 0: # layout_gt[np.logical_or(leftWallMask, middleWallMask)] += 1 # pass # if rightWallMask.sum() > middleWallMask.sum() and leftWallMask.sum() == 0: # layout_gt[np.logical_or(rightWallMask, middleWallMask)] -= 1 # pass # pass accuracy = float((layout_pred == layout_gt).sum()) / (width_high_res * height_high_res) print((index, accuracy)) total_accuracy += accuracy pass if options.imageIndex >= 0: exit(1) pass continue segmentations = np.array(predSegmentations) np.save('test/segmentation.npy', segmentations) planeDepths = np.array(predPlaneDepths) np.save('test/plane_depths.npy', planeDepths) predAllSegmentations = np.array(predAllSegmentations) np.save('test/all_segmentations.npy', predAllSegmentations) predNormals = np.array(predNormals) np.save('test/normals.npy', predNormals) print('accuracy', total_accuracy / options.numImages) pass except tf.errors.OutOfRangeError: print('Done training -- epoch limit reached') finally: # When done, ask the threads to stop. coord.request_stop() pass # Wait for threads to finish. coord.join(threads) sess.close() pass return pred_dict
def getPredictionCustom(options): tf.reset_default_graph() options.batchSize = 1 img_inp = tf.placeholder(tf.float32, shape=[1, HEIGHT, WIDTH, 3], name='image') training_flag = tf.constant(False, tf.bool) options.gpu_id = 0 global_pred_dict, local_pred_dict, deep_pred_dicts = build_graph(img_inp, img_inp, training_flag, options) var_to_restore = tf.global_variables() config=tf.ConfigProto() config.gpu_options.allow_growth=True config.allow_soft_placement=True init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer()) width_high_res = 640 height_high_res = 480 #image_list = glob.glob('../my_images/*.jpg') + glob.glob('../my_images/*.png') + glob.glob('../my_images/*.JPG') #image_list = glob.glob('../my_images/TV/*.jpg') + glob.glob('../my_images/TV/*.png') + glob.glob('../my_images/TV/*.JPG') #image_list = glob.glob('../my_images/TV/*.jpg') + glob.glob('../my_images/TV/*.png') + glob.glob('../my_images/TV/*.JPG') image_list = glob.glob(options.customImageFolder + '/*.jpg') + glob.glob(options.customImageFolder + '/*.png') + glob.glob(options.customImageFolder + '/*.JPG') options.visualizeImages = min(options.visualizeImages, len(image_list)) pred_dict = {} with tf.Session(config=config) as sess: sess.run(init_op) #var_to_restore = [v for v in var_to_restore if 'res4b22_relu_non_plane' not in v.name] loader = tf.train.Saver(var_to_restore) loader.restore(sess, "%s/checkpoint.ckpt"%(options.checkpoint_dir)) #loader.restore(sess, options.fineTuningCheckpoint) coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess=sess, coord=coord) try: predDepths = [] predPlanes = [] predSegmentations = [] predSemantics = [] predNonPlaneDepths = [] predNonPlaneNormals = [] predNonPlaneMasks = [] predBoundaries = [] images = [] infos = [] for index in xrange(min(options.startIndex + options.numImages, len(image_list))): if index % 10 == 0: print(('image', index)) pass t0=time.time() print(('image', index)) img_ori = cv2.imread(image_list[index]) images.append(img_ori) img = cv2.resize(img_ori, (WIDTH, HEIGHT)) img = img.astype(np.float32) / 255 - 0.5 img = np.expand_dims(img, 0) global_pred = sess.run(global_pred_dict, feed_dict={img_inp: img}) if index < options.startIndex: continue pred_p = global_pred['plane'][0] pred_s = global_pred['segmentation'][0] pred_np_m = global_pred['non_plane_mask'][0] pred_np_d = global_pred['non_plane_depth'][0] pred_np_n = global_pred['non_plane_normal'][0] #if global_gt['info'][0][19] > 1 and global_gt['info'][0][19] < 4 and False: #pred_np_n = calcNormal(pred_np_d.squeeze(), global_gt['info'][0]) #pass #pred_b = global_pred['boundary'][0] predNonPlaneMasks.append(pred_np_m) predNonPlaneDepths.append(pred_np_d) predNonPlaneNormals.append(pred_np_n) #predBoundaries.append(pred_b) all_segmentations = np.concatenate([pred_s, pred_np_m], axis=2) info = np.zeros(20) if options.estimateFocalLength: focalLength = estimateFocalLength(img_ori) info[0] = focalLength info[5] = focalLength info[2] = img_ori.shape[1] / 2 info[6] = img_ori.shape[0] / 2 info[16] = img_ori.shape[1] info[17] = img_ori.shape[0] info[10] = 1 info[15] = 1 info[18] = 1000 info[19] = 5 else: info[0] = 2800.71 info[2] = 1634.45 info[5] = 2814.01 info[6] = 1224.18 info[16] = img_ori.shape[1] info[17] = img_ori.shape[0] info[10] = 1 info[15] = 1 info[18] = 1000 info[19] = 5 pass # print(focalLength) # cv2.imwrite('test/image.png', ((img[0] + 0.5) * 255).astype(np.uint8)) # cv2.imwrite('test/segmentation.png', drawSegmentationImage(pred_s, blackIndex=options.numOutputPlanes)) # exit(1) infos.append(info) width_high_res = img_ori.shape[1] height_high_res = img_ori.shape[0] plane_depths = calcPlaneDepths(pred_p, width_high_res, height_high_res, info) pred_np_d = np.expand_dims(cv2.resize(pred_np_d.squeeze(), (width_high_res, height_high_res)), -1) all_depths = np.concatenate([plane_depths, pred_np_d], axis=2) all_segmentations = np.stack([cv2.resize(all_segmentations[:, :, planeIndex], (width_high_res, height_high_res)) for planeIndex in xrange(all_segmentations.shape[-1])], axis=2) segmentation = np.argmax(all_segmentations, 2) pred_d = all_depths.reshape(-1, options.numOutputPlanes + 1)[np.arange(height_high_res * width_high_res), segmentation.reshape(-1)].reshape(height_high_res, width_high_res) if 'semantics' in global_pred: #cv2.imwrite('test/semantics.png', drawSegmentationImage(np.argmax(global_pred['semantics'][0], axis=-1))) #exit(1) predSemantics.append(np.argmax(global_pred['semantics'][0], axis=-1)) else: predSemantics.append(np.zeros((HEIGHT, WIDTH))) pass predDepths.append(pred_d) predPlanes.append(pred_p) predSegmentations.append(all_segmentations) continue pred_dict['plane'] = np.array(predPlanes) pred_dict['segmentation'] = np.array(predSegmentations) pred_dict['depth'] = np.array(predDepths) #pred_dict['semantics'] = np.array(predSemantics) pred_dict['np_depth'] = np.array(predNonPlaneDepths) #pred_dict['np_normal'] = np.array(predNonPlaneNormals) pred_dict['np_mask'] = np.array(predNonPlaneMasks) pred_dict['image'] = np.array(images) pred_dict['info'] = np.array(infos) #pred_dict['boundary'] = np.array(predBoundaries) pass except tf.errors.OutOfRangeError: print('Done training -- epoch limit reached') finally: # When done, ask the threads to stop. coord.request_stop() pass # Wait for threads to finish. coord.join(threads) sess.close() pass return pred_dict
def getPredictionScanNet(options): tf.reset_default_graph() options.batchSize = 1 min_after_dequeue = 1000 reader = RecordReaderAll() if options.dataset == 'SUNCG': filename_queue = tf.train.string_input_producer([options.dataFolder + '/planes_SUNCG_val.tfrecords'], num_epochs=10000) elif options.dataset == 'NYU_RGBD': filename_queue = tf.train.string_input_producer([options.dataFolder + '/planes_nyu_rgbd_val.tfrecords'], num_epochs=1) options.deepSupervision = 0 options.predictLocal = 0 elif options.dataset == 'matterport': filename_queue = tf.train.string_input_producer([options.dataFolder + '/planes_matterport_val.tfrecords'], num_epochs=1) else: filename_queue = tf.train.string_input_producer([options.dataFolder + '/planes_scannet_val.tfrecords'], num_epochs=1) pass img_inp, global_gt_dict, local_gt_dict = reader.getBatch(filename_queue, numOutputPlanes=options.numOutputPlanes, batchSize=options.batchSize, min_after_dequeue=min_after_dequeue, getLocal=True, random=False) training_flag = tf.constant(False, tf.bool) options.gpu_id = 0 global_pred_dict, local_pred_dict, deep_pred_dicts = build_graph(img_inp, img_inp, training_flag, options) var_to_restore = tf.global_variables() config=tf.ConfigProto() config.gpu_options.allow_growth=True config.allow_soft_placement=True init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer()) width_high_res = 640 height_high_res = 480 pred_dict = {} with tf.Session(config=config) as sess: sess.run(init_op) #var_to_restore = [v for v in var_to_restore if 'res4b22_relu_non_plane' not in v.name] loader = tf.train.Saver(var_to_restore) loader.restore(sess, "%s/checkpoint.ckpt"%(options.checkpoint_dir)) #loader.restore(sess, options.fineTuningCheckpoint) coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess=sess, coord=coord) try: predDepths = [] predPlanes = [] predSegmentations = [] predNonPlaneDepths = [] predNonPlaneNormals = [] predNonPlaneMasks = [] images = [] infos = [] for index in xrange(options.startIndex + options.numImages): if index % 10 == 0: print(('image', index)) pass t0=time.time() img, global_gt, global_pred = sess.run([img_inp, global_gt_dict, global_pred_dict]) if index < options.startIndex: continue image = cv2.resize(((img[0] + 0.5) * 255).astype(np.uint8), (width_high_res, height_high_res)) images.append(image) infos.append(global_gt['info'][0]) pred_p = global_pred['plane'][0] pred_s = global_pred['segmentation'][0] pred_np_m = global_pred['non_plane_mask'][0] pred_np_d = global_pred['non_plane_depth'][0] pred_np_n = global_pred['non_plane_normal'][0] if global_gt['info'][0][19] > 1 and global_gt['info'][0][19] < 4 and False: pred_np_n = calcNormal(pred_np_d.squeeze(), global_gt['info'][0]) pass #pred_b = global_pred['boundary'][0] predNonPlaneMasks.append(pred_np_m) predNonPlaneDepths.append(pred_np_d) predNonPlaneNormals.append(pred_np_n) #predBoundaries.append(pred_b) all_segmentations = np.concatenate([pred_s, pred_np_m], axis=2) plane_depths = calcPlaneDepths(pred_p, width_high_res, height_high_res, global_gt['info'][0]) pred_np_d = np.expand_dims(cv2.resize(pred_np_d.squeeze(), (width_high_res, height_high_res)), -1) all_depths = np.concatenate([plane_depths, pred_np_d], axis=2) all_segmentations = np.stack([cv2.resize(all_segmentations[:, :, planeIndex], (width_high_res, height_high_res)) for planeIndex in xrange(all_segmentations.shape[-1])], axis=2) segmentation = np.argmax(all_segmentations, 2) pred_d = all_depths.reshape(-1, options.numOutputPlanes + 1)[np.arange(height_high_res * width_high_res), segmentation.reshape(-1)].reshape(height_high_res, width_high_res) predDepths.append(pred_d) predPlanes.append(pred_p) predSegmentations.append(all_segmentations) continue pred_dict['plane'] = np.array(predPlanes) pred_dict['segmentation'] = np.array(predSegmentations) pred_dict['depth'] = np.array(predDepths) #pred_dict['semantics'] = np.array(predSemantics) pred_dict['np_depth'] = np.array(predNonPlaneDepths) #pred_dict['np_normal'] = np.array(predNonPlaneNormals) pred_dict['np_mask'] = np.array(predNonPlaneMasks) pred_dict['image'] = np.array(images) pred_dict['info'] = np.array(infos) pass except tf.errors.OutOfRangeError: print('Done training -- epoch limit reached') finally: # When done, ask the threads to stop. coord.request_stop() pass # Wait for threads to finish. coord.join(threads) sess.close() pass return pred_dict
WIDTH = 256 HEIGHT = 192 ALL_TITLES = ['PlaneNet'] ALL_METHODS = [('sample_np10_hybrid3_bl0_dl0_ds0_crfrnn5_sm0', '', 0, 2)] batchSize=1 tf.reset_default_graph() img_inp = tf.placeholder(tf.float32, shape=[batchSize, HEIGHT, WIDTH, 3], name='image') training_flag = tf.constant(False, tf.bool) options = parse_args() global_pred_dict, _, _ = build_graph(img_inp, img_inp, training_flag, options) var_to_restore = tf.global_variables() config = tf.ConfigProto() config.gpu_options.allow_growth = True config.allow_soft_placement = True init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer()) sess = tf.Session(config=config) sess.run(init_op) loader = tf.train.Saver(var_to_restore) path = os.path.dirname(os.path.realpath(__file__)) checkpoint_dir = path + '/checkpoint/sample_np10_hybrid3_bl0_dl0_ds0_crfrnn5_sm0'