def evaluate_model(model_name, rgb_img, voxel_gt): # not running on any GPU, using only CPU config = tf.ConfigProto( device_count={'GPU': 0} ) # run on GPU # os.environ['CUDA_VISIBLE_DEVICES'] = '1' # # config = tf.ConfigProto( # log_device_placement=False # ) # config.gpu_options.allow_growth = False # config.gpu_options.allocator_type = 'BFC' with tf.Graph().as_default() as graph: with tf.Session(config=config) as sess: _, input, model = load_model_with_structure(model_name, graph, sess) fn = tf.reduce_sum(tf.cast(losses.is_free(voxel_gt) & losses.is_obstacle(model), dtype=tf.int32)) tp = tf.reduce_sum(tf.cast(losses.is_obstacle(voxel_gt) & losses.is_obstacle(model), dtype=tf.int32)) tn = tf.reduce_sum(tf.cast(losses.is_free(voxel_gt) & losses.is_free(model), dtype=tf.float32)) fp = tf.reduce_sum(tf.cast(losses.is_obstacle(voxel_gt) & losses.is_free(model), dtype=tf.float32)) pred_voxels, fn_val, tn_val, tp_val, fp_val = sess.run([model, fn, tn, tp, fp], feed_dict={ input: rgb_img }) return pred_voxels, fn_val, tn_val, tp_val, fp_val
def voxel_iou_error(voxel_gt, voxel_pred): # https://arxiv.org/pdf/1604.00449.pdf obst_pred = is_obstacle(voxel_pred) obst_gt = is_obstacle(voxel_gt) return tf.reduce_sum(tf.cast( obst_gt & obst_pred, dtype=tf.float32)) / tf.reduce_sum( tf.cast(obst_gt | obst_pred, dtype=tf.float32))
def voxel_true_positive_error(voxel_gt, voxel_pred): # formula is TP / FN + TP # formula is true positive / (false negative + true positive) # FN = false negative means voxel_gt == 1 & voxel_pred == 0 # TP = true positive means voxel_gt == 1 & voxel_pred == 1 fn = tf.reduce_sum( tf.cast(is_obstacle(voxel_gt) & is_free(voxel_pred), dtype=tf.int32)) tp = tf.reduce_sum( tf.cast(is_obstacle(voxel_gt) & is_obstacle(voxel_pred), dtype=tf.int32)) return tp / (fn + tp)
def predict_voxels_to_pointcloud(batch_rgb, batch_depths, model_names, batch=0): for i in range(Network.BATCH_SIZE): im = Image.fromarray(batch_rgb[i, :, :, :].astype(np.uint8)) im.save("evaluate-voxel/orig-rgb-{}-batch-{}.png".format(i, batch)) voxels = batch_depths[i, :, :, :] pcl = grid_voxelmap_to_pointcloud(voxels) save_pointcloud_csv(pcl.T[:, 0:3], "evaluate-voxel/orig-voxelmap-{}-batch-{}.csv".format(i, batch)) for model_name in model_names: pred_voxels = evaluate_model(model_name, batch_rgb) # metrics, pred_voxels = calculate_voxel_metrics(model_name, batch_rgb, batch_depths) # # print('metrics', metrics) # saving images for i in range(Network.BATCH_SIZE): pred_voxelmap = pred_voxels[i, :, :, :] np.save("evaluate-voxel/pred-voxelmap-{}-{}-batch-{}.npy".format(i, model_name, batch), pred_voxelmap) pcl = grid_voxelmap_to_pointcloud(losses.is_obstacle(pred_voxelmap)) pcl_values = grid_voxelmap_to_paraview_pointcloud(pred_voxelmap) save_pointcloud_csv(pcl.T[:, 0:3], "evaluate-voxel/pred-voxelmap-{}-{}-batch-{}.csv".format(i, model_name, batch)) save_pointcloud_csv(pcl_values.T[:, 0:4], "evaluate-voxel/pred-voxelmap-paraview-{}-{}-batch-{}.csv".format(i, model_name, batch), True)
def voxel_false_positive_error(voxel_gt, voxel_pred): # formula is FP / FP + TN # formula is false positive / (false positive + true negative) # TN = true negative means voxel_gt == 0 & voxel_pred == 0 # FP = false positive means voxel_gt == 0 & voxel_pred == 1 print('voxel_gt.shape', voxel_gt.shape) print('voxel_pred.shape', voxel_pred.shape) tn = tf.reduce_sum( tf.cast(is_free(voxel_gt) & is_free(voxel_pred), dtype=tf.float32)) fp = tf.reduce_sum( tf.cast(is_free(voxel_gt) & is_obstacle(voxel_pred), dtype=tf.float32)) return fp / (fp + tn)
def predict_voxels_to_pointcloud_multibatch(len_images, imgs, depths, model_names): # imgs = tf.Print(imgs, [imgs, depths], 'running input rgb batch') # for CPU # config = tf.ConfigProto(device_count={'GPU': 0}) # for GPU config = tf.ConfigProto(log_device_placement=False) config.gpu_options.allow_growth = False config.gpu_options.allocator_type = 'BFC' os.environ['CUDA_VISIBLE_DEVICES'] = '0' with tf.Session(config=config) as sess: for batch in range(int(len_images / Network.BATCH_SIZE)): batch_rgb, batch_depths = sess.run( [imgs, depths]) for i in range(Network.BATCH_SIZE): im = Image.fromarray(batch_rgb[i, :, :, :].astype(np.uint8)) im.save("evaluate-voxel/orig-rgb-{}-batch-{}.png".format(i, batch)) voxels = batch_depths[i, :, :, :] pcl = grid_voxelmap_to_pointcloud(voxels) save_pointcloud_csv(pcl.T[:, 0:3], "evaluate-voxel/orig-voxelmap-{}-batch-{}.csv".format(i, batch)) print('evaluation loaded, going to evaluation on dataset') for model_name in model_names: print('going to evaluate model {}'.format(model_name)) graph = tf.get_default_graph() with tf.Session(config=config) as sess: _, input, model = load_model_with_structure(model_name, graph, sess) for batch in range(int(len_images / Network.BATCH_SIZE)): batch_rgb, batch_depths = sess.run( [imgs, depths]) pred_voxels = inference(model, input, batch_rgb, sess) # saving images for i in range(Network.BATCH_SIZE): pred_voxelmap = pred_voxels[i, :, :, :] np.save("evaluate-voxel/pred-voxelmap-{}-{}-batch-{}.npy".format(i, model_name, batch), pred_voxelmap) pcl = grid_voxelmap_to_pointcloud(losses.is_obstacle(pred_voxelmap)) pcl_values = grid_voxelmap_to_paraview_pointcloud(pred_voxelmap) save_pointcloud_csv(pcl.T[:, 0:3], "evaluate-voxel/pred-voxelmap-{}-{}-batch-{}.csv".format(i, model_name, batch)) save_pointcloud_csv(pcl_values.T[:, 0:4], "evaluate-voxel/pred-voxelmap-paraview-{}-{}-batch-{}.csv".format(i, model_name, batch), True)
def voxelmap_to_depth(voxels): # this visualizes voxelmap as depth image depth_size = voxels.shape[3].value # by https://stackoverflow.com/questions/45115650/how-to-find-tensorflow-max-value-index-but-the-value-is-repeat indices = tf.range( 1, depth_size + 1 ) # so there is no multiplication by 0 on this side, only 0 in voxelmap will force the 0 indices = tf.expand_dims(indices, 0) indices = tf.expand_dims(indices, 0) indices = tf.expand_dims(indices, 0) depth = tf.argmax(tf.multiply( tf.cast(losses.is_obstacle(voxels), dtype=tf.int32), tf.tile( indices, [BATCH_SIZE, dataset.TARGET_HEIGHT, dataset.TARGET_WIDTH, 1])), axis=3, output_type=tf.int32) depth = tf.scalar_mul( tf.constant(255 / depth_size, dtype=tf.float32), tf.cast(depth, dtype=tf.float32 )) # normalizing to use all of classing png values return depth
def voxel_l1_dist_with_unknown(voxel_gt, voxel_pred): # https://arxiv.org/pdf/1612.00101.pdf simple l1 dist, but masked known_mask = tf.cast(losses.get_known_mask(voxel_gt), dtype=tf.float32) obst_pred = tf.cast(is_obstacle(voxel_pred), dtype=tf.float32) return tf.reduce_mean( tf.reduce_sum(known_mask * tf.abs(voxel_gt - obst_pred), [1, 2, 3]))