Example #1
0
def process(info_file, save_path, total_scenes_index, total_scenes_count):
    # gt depth data loader
    width, height = 640, 480
    transform = transforms.Compose([
        transforms.ResizeImage((width,height)),
        transforms.ToTensor(),
    ])
    dataset = SceneDataset(info_file, transform, frame_types=['depth'])
    dataloader = torch.utils.data.DataLoader(dataset, batch_size=None,
                                             batch_sampler=None, num_workers=2)
    scene = dataset.info['scene']

    # get info about tsdf
    file_tsdf_pred = os.path.join(save_path, '%s.npz'%scene)
    temp = TSDF.load(file_tsdf_pred)
    voxel_size = int(temp.voxel_size*100)
    
    # re-fuse to remove hole filling since filled holes are penalized in 
    # mesh metrics
    vol_dim = list(temp.tsdf_vol.shape)
    origin = temp.origin
    tsdf_fusion = TSDFFusion(vol_dim, float(voxel_size)/100, origin, color=False)
    device = tsdf_fusion.device

    # mesh renderer
    renderer = Renderer()
    mesh_file = os.path.join(save_path, '%s.ply'%scene)
    mesh = trimesh.load(mesh_file, process=False)
    mesh_opengl = renderer.mesh_opengl(mesh)

    for i, d in enumerate(dataloader):
        if i%25==0:
            print(total_scenes_index, total_scenes_count,scene, i, len(dataloader))

        depth_trgt = d['depth'].numpy()
        _, depth_pred = renderer(height, width, d['intrinsics'], d['pose'], mesh_opengl)

        temp = eval_depth(depth_pred, depth_trgt)
        if i==0:
            metrics_depth = temp
        else:
            metrics_depth = {key:value+temp[key] 
                             for key, value in metrics_depth.items()}

        # # play video visualizations of depth
        # viz1 = (np.clip((depth_trgt-.5)/5,0,1)*255).astype(np.uint8)
        # viz2 = (np.clip((depth_pred-.5)/5,0,1)*255).astype(np.uint8)
        # viz1 = cv2.applyColorMap(viz1, cv2.COLORMAP_JET)
        # viz2 = cv2.applyColorMap(viz2, cv2.COLORMAP_JET)
        # viz1[depth_trgt==0]=0
        # viz2[depth_pred==0]=0
        # viz = np.hstack((viz1,viz2))
        # cv2.imshow('test', viz)
        # cv2.waitKey(1)

        tsdf_fusion.integrate((d['intrinsics'] @ d['pose'].inverse()[:3,:]).to(device),
                              torch.as_tensor(depth_pred).to(device))


    metrics_depth = {key:value/len(dataloader) 
                     for key, value in metrics_depth.items()}

    # save trimed mesh
    file_mesh_trim = os.path.join(save_path, '%s_trim.ply'%scene)
    tsdf_fusion.get_tsdf().get_mesh().export(file_mesh_trim)

    # eval tsdf
    file_tsdf_trgt = dataset.info['file_name_vol_%02d'%voxel_size]
    metrics_tsdf = eval_tsdf(file_tsdf_pred, file_tsdf_trgt)

    # eval trimed mesh
    file_mesh_trgt = dataset.info['file_name_mesh_gt']
    metrics_mesh = eval_mesh(file_mesh_trim, file_mesh_trgt)

    # transfer labels from pred mesh to gt mesh using nearest neighbors
    file_attributes = os.path.join(save_path, '%s_attributes.npz'%scene)
    if os.path.exists(file_attributes):
        mesh.vertex_attributes = np.load(file_attributes)
        print(mesh.vertex_attributes)
        mesh_trgt = trimesh.load(file_mesh_trgt, process=False)
        mesh_transfer = project_to_mesh(mesh, mesh_trgt, 'semseg')
        semseg = mesh_transfer.vertex_attributes['semseg']
        # save as txt for benchmark evaluation
        np.savetxt(os.path.join(save_path, '%s.txt'%scene), semseg, fmt='%d')
        mesh_transfer.export(os.path.join(save_path, '%s_transfer.ply'%scene))

        # TODO: semseg val evaluation

    metrics = {**metrics_depth, **metrics_mesh, **metrics_tsdf}
    print(metrics)

    rslt_file = os.path.join(save_path, '%s_metrics.json'%scene)
    json.dump(metrics, open(rslt_file, 'w'))

    return scene, metrics
Example #2
0
def eval_scene(info_file, pathout):
    """ Evaluates COLMAP inference compared to ground truth

    Args:
        info_file: path to info_json file for the scene
        pathout: path where intermediate and final results are stored
    """

    info = load_info_json(info_file)
    dataset = info['dataset']
    scene = info['scene']
    frames = info['frames']

    fnames = os.listdir(
        os.path.join(pathout, dataset, scene, 'stereo', 'depth_maps'))
    frames = [
        frame for frame in frames
        if os.path.split(frame['file_name_image'])[1] +
        '.geometric.bin' in fnames
    ]

    # 2d depth metrics
    for i, frame in enumerate(frames):
        if i % 25 == 0:
            print(scene, i, len(fnames))

        fname_trgt = frame['file_name_depth']
        fname_pred = os.path.join(
            pathout, dataset, scene, 'stereo', 'depth_maps',
            os.path.split(frame['file_name_image'])[1] + '.geometric.bin')
        depth_trgt = imageio.imread(fname_trgt).astype('float32') / 1000
        depth_pred = read_array(fname_pred)
        depth_pred[
            depth_pred >
            5] = 0  # ignore depth beyond 5 meters as it is probably wrong
        depth_pred = resize(depth_pred, depth_trgt.shape)

        temp = eval_depth(depth_pred, depth_trgt)
        if i == 0:
            metrics_depth = temp
        else:
            metrics_depth = {
                key: value + temp[key]
                for key, value in metrics_depth.items()
            }
    metrics_depth = {
        key: value / len(frames)
        for key, value in metrics_depth.items()
    }

    # 3d point metrics
    fname_pred = os.path.join(pathout, dataset, scene, 'fused.ply')
    fname_trgt = info['file_name_mesh_gt']
    metrics_mesh = eval_mesh(fname_pred, fname_trgt)

    metrics = {**metrics_depth, **metrics_mesh}
    print(metrics)

    rslt_file = os.path.join(pathout, dataset, scene, 'metrics.json')
    json.dump(metrics, open(rslt_file, 'w'))

    return metrics