def get_symbolic_model(load_pickle=True):
    filename = "../data/omnistereo_symbolic.pkl"
    filename_jacobian_func = "../data/omnistereo_jacobian_marshalled_func.pkl"

    if load_pickle:
        omnistereo_symbolic = common_tools.load_obj_from_pickle(filename)
    else:
        omnistereo_symbolic = HyperCataStereoSymbolic()
        omnistereo_symbolic.compute_Jacobian_matrix_for_cov(use_chain=False)
        common_tools.save_obj_in_pickle(omnistereo_symbolic, filename, locals())

    jacobian_marshalled = omnistereo_symbolic.lambdify_jacobian_as_code_string()
    common_tools.save_obj_in_pickle(jacobian_marshalled, filename_jacobian_func, locals())

    return omnistereo_symbolic
def get_symbolic_model(load_pickle=True):
    filename = "../data/omnistereo_symbolic.pkl"
    filename_jacobian_func = "../data/omnistereo_jacobian_marshalled_func.pkl"

    if load_pickle:
        omnistereo_symbolic = common_tools.load_obj_from_pickle(filename)
    else:
        omnistereo_symbolic = HyperCataStereoSymbolic()
        omnistereo_symbolic.compute_Jacobian_matrix_for_cov(use_chain=False)
        common_tools.save_obj_in_pickle(omnistereo_symbolic, filename,
                                        locals())

    jacobian_marshalled = omnistereo_symbolic.lambdify_jacobian_as_code_string(
    )
    common_tools.save_obj_in_pickle(jacobian_marshalled,
                                    filename_jacobian_func, locals())

    return omnistereo_symbolic
def load_marshalled_func_from_file(filename):
    marshalled_func = common_tools.load_obj_from_pickle(filename)
    return load_function(marshalled_func)
Пример #4
0
def main_sos_vo():
    scene_path = osp.realpath(osp.expanduser(args.sequence_path[0]))
    gums_calibrated_filename = osp.realpath(
        osp.expanduser(args.calibrated_gums_file))
    visualize_VO = args.visualize_VO

    # HARD-CODED flags:
    # vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
    use_perfect_model_VO = False  # WARNING: It may be used only when using SYNTHETIC images
    only_visualize_frames_from_existing_VO_poses_file = False  # <<< SETME: Will not run VO if True, but just reload the file from an existing experiment
    # NOTE: For DEBUG, set use_multithreads_for_VO <-- False
    use_multithreads_for_VO = True  # <<< 3D Visualization interactivity (is more responsive) when running the VO as a threads
    step_for_scene_images = 1
    first_image_index = 0
    last_image_index = -1  # -1 for up to the last one
    # ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^

    scene_prefix_filename = "image-*.png"
    scene_path_omni = osp.join(scene_path, "omni")
    scene_img_filename_template = osp.join(scene_path_omni,
                                           scene_prefix_filename)
    num_scene_images = len(
        fnmatch.filter(listdir(scene_path_omni), scene_prefix_filename))
    scene_path_vo_results = osp.join(scene_path,
                                     "results-omni")  # VO Results path
    make_sure_path_exists(scene_path_vo_results)
    path_to_scene_name, scene_name = osp.split(scene_path)

    vis_name = "%s-%s" % (scene_name, "SOS")
    if only_visualize_frames_from_existing_VO_poses_file:
        from omnistereo.common_plot import replay_VO_visualization
        replay_VO_visualization(scene_path_vo_results=scene_path_vo_results,
                                first_image_index=first_image_index,
                                last_image_index=last_image_index,
                                step_for_poses=step_for_scene_images,
                                vis_name=vis_name)
    else:
        from omnistereo.pose_est_tools import driver_VO
        # Use the thread_name parameter to name the VO Visualization window:
        if use_perfect_model_VO:
            print("Using 'theoretical' model for", vis_name)
            from omnistereo.cata_hyper_model import get_theoretical_OmniStereo
            from omnistereo.common_cv import get_images
            calib_img_filename_template = ""  # SETME: needs to pass the filename template
            radial_bounds_filename = ""  # SETME: needs to pass the radial bounds file
            theoretical_params_filename = ""  # SETME: needs to pass the theoretical parameters file
            model_version = "new"
            is_synthetic = False
            mirror_images = get_images(calib_img_filename_template,
                                       indices_list=[0],
                                       show_images=False,
                                       return_names_only=False)
            use_existing_radial_bounds = True
            hyperbolic_model_theoretical = get_theoretical_OmniStereo(
                omni_img=mirror_images,
                radial_bounds_filename=radial_bounds_filename,
                theoretical_params_filename=theoretical_params_filename,
                model_version=model_version,
                is_synthetic=is_synthetic,
                use_existing_radial_bounds=use_existing_radial_bounds)
            driver_VO(camera_model=hyperbolic_model_theoretical,
                      scene_path=scene_path_omni,
                      scene_path_vo_results=scene_path_vo_results,
                      scene_img_filename_template=scene_img_filename_template,
                      depth_filename_template=None,
                      num_scene_images=num_scene_images,
                      visualize_VO=visualize_VO,
                      use_multithreads_for_VO=use_multithreads_for_VO,
                      step_for_scene_images=step_for_scene_images,
                      first_image_index=first_image_index,
                      last_image_index=last_image_index,
                      thread_name=vis_name)
        else:
            # Attempting to just load the calibrated GUMS model
            gums_calibrated = load_obj_from_pickle(gums_calibrated_filename)
            driver_VO(camera_model=gums_calibrated,
                      scene_path=scene_path_omni,
                      scene_path_vo_results=scene_path_vo_results,
                      scene_img_filename_template=scene_img_filename_template,
                      depth_filename_template=None,
                      num_scene_images=num_scene_images,
                      visualize_VO=visualize_VO,
                      use_multithreads_for_VO=use_multithreads_for_VO,
                      step_for_scene_images=step_for_scene_images,
                      first_image_index=first_image_index,
                      last_image_index=last_image_index,
                      thread_name=vis_name)

    from omnistereo.common_cv import clean_up
    clean_up(wait_key_time=1)
    print("GOODBYE!")
def main_demo():
    is_synthetic = True
    model_version = "old"  # Set to "old" for the PUBLISHED params, or "new" for the new one
    experiment_name = "simple"  # "simple", "VICON", "CVPR", "with_misalignment-4", etc.  # <<<<<- SET For example, "VICON" uses ground truth data, otherwise use "simple"

    # HYPERBOLIC Parameters (Used in Publication):
    #===========================================================================
    # k1 = 5.7319  # Unitless
    # k2 = 9.7443  # Unitless
    # Using millimeters
    # r_sys = 37.0
    # r_reflex = 17.226
    # r_cam = 7.25
    # c1 = 123.488
    # c2 = 241.803
    # d = 233.684
    #===========================================================================
    # vvvvvvvvvvvvvvvvvvvvvvv OPTIONS vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
    load_model_from_file = True  # <<< SETME: to load omnistereo model from a pickle or start anew
    show_panoramic_img = True
    show_3D_model = False
    get_pointclouds = True
    compute_new_3D_points = True
    dense_cloud = True
    manual_point_selection = False
    save_pcd_point_cloud = False

    # ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
    # vvvvvvvvvvvvvvvvvvvvvvv SETUP vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
    # vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
    data_root = "data"  # The root folder for all data
    if is_synthetic:
        model_type = "synthetic"
    else:
        model_type = "real"
    data_path_prefix = osp.join(data_root, model_type, model_version,
                                experiment_name)
    # For SHOWING OFF: virtual office
    scene_name = "scene"
    scene_path = osp.join(
        data_path_prefix, scene_name
    )  # Pose estimation experiment: Translation on x only by 0, 25 cm and 75 cm (wrt init)
    scene_img_filename_template = osp.join(
        scene_path, "office-*.png")  # With PUBLISHED parameters
    # scene_img_filename_template = osp.join(data_path_prefix, scene_path, "office" + model_version + "-*.png")  # NEW design
    img_indices = [
    ]  # Choosing a predefined set of images to work with out of the set
    img_index = 0  # <<<<<------ Choosing an arbitrary image to work with out of the set

    omnistereo_model_filename = osp.join(data_path_prefix,
                                         "omnistereo-hyperbolic.pkl")
    # ------------------------------------------------
    radial_bounds_filename = osp.join(data_path_prefix, "radial_bounds.pkl")
    # ------------------------------------------------
    points_3D_filename_template = osp.join(
        scene_path, "3d_points-" + model_version + "-*.pkl")

    if get_pointclouds:
        points_3D_filename_template = "3d_points-*.pkl"
        features_detected_filename_template = "sparse_correspondences-*.pkl"
        if dense_cloud:
            points_3D_path = osp.join(scene_path, "cloud_dense")
        else:
            points_3D_path = osp.join(scene_path, "cloud_sparse")
        make_sure_path_exists(points_3D_path)
        stereo_tuner_filename = osp.join(scene_path, "stereo_tuner.pkl")

    # ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
    from omnistereo.common_cv import has_opencv, get_images
    opencv_exists = has_opencv()

    omni_images_list = get_images(scene_img_filename_template,
                                  indices_list=img_indices,
                                  show_images=True)
    # Read params from file and scale to [mm] units since using [cm] (only those params with dimensions)
    theoretical_params_filename = osp.join(
        data_root, "parameters-%s.txt" % (model_version))

    if load_model_from_file:
        omnistereo_model = load_obj_from_pickle(omnistereo_model_filename)
    else:
        # omni_img_filename = scene_img_filename_template.replace("*", str(img_index), 1)
        # omni_img = cv2.imread(omni_img_filename, 1)
        omni_img = omni_images_list[img_index]
        omnistereo_model = init_omnistereo_theoretical(
            omni_img,
            radial_bounds_filename,
            theoretical_params_filename,
            model_version,
            is_synthetic=is_synthetic)
        pano_width = np.pi * np.linalg.norm(
            omnistereo_model.bot_model.lowest_img_point -
            omnistereo_model.bot_model.precalib_params.center_point)
        omnistereo_model.set_current_omni_image(
            omni_img,
            pano_width_in_pixels=pano_width,
            generate_panoramas=True,
            idx=img_index,
            view=True)
        save_obj_in_pickle(omnistereo_model, omnistereo_model_filename,
                           locals())

    #===========================================================================
    # sanity_check(omnistereo_model)
    #===========================================================================

    # Get pixel from pano test
    u, v, m_homo = omnistereo_model.top_model.panorama.get_panorama_pixel_coords_from_direction_angles(
        theta=np.deg2rad([10., 11, 80, -10]),
        psi=np.deg2rad([1, 12., 360, 60]))

    if show_panoramic_img and opencv_exists:
        pano_win_name_prefix = "DEMO - "
        omnistereo_model.view_all_panoramas(
            scene_img_filename_template,
            img_indices,
            win_name_modifier=pano_win_name_prefix,
            use_mask=True,
            mask_color_RGB=(0, 255, 0))

    if show_3D_model:  # Figure 4 (MDPI Sensors journal article)
        try:
            # Drawing forward projection from 3D points:
            xw, yw, zw = 80, 10, 100
            # Pw = [(xw, yw, zw), (-xw, yw, zw), (xw, -yw, zw), (-xw, -yw, zw)]
            Pw = [(xw, yw, zw)]
            from omnistereo.common_plot import draw_fwd_projection_omnistereo
            draw_fwd_projection_omnistereo(omnistereo_model,
                                           Pw,
                                           verbose=True,
                                           fig_size=None)
            plt.show()  # Show both figures in separate windows
        except ImportError:
            print("MPLOT3D could not be imported for 3D visualization!")

            try:
                # NOTE: drawing with visvis and PyQt4 is troublesome when OpenCV is displaying windows that are using Qt5!!!
                # Drawing just the model:
                from omnistereo.common_plot import draw_omnistereo_model_visvis
                draw_omnistereo_model_visvis(omnistereo_model,
                                             finish_drawing=True,
                                             show_grid_box=False,
                                             mirror_transparency=0.5,
                                             show_reference_frame=True)
                # common_plot.draw_model_mono_visvis(omnistereo_model.top_model.theoretical_model, finish_drawing=True, show_grid_box=False, show_reference_frame=True)

            except ImportError:
                print("VISVIS could not be imported for 3D visualization!")
                try:
                    # USING Vispy:
                    from omnistereo.common_plot import draw_omnistereo_model_vispy
                    draw_omnistereo_model_vispy(omnistereo_model,
                                                show_grid=True,
                                                backend='pyqt4')
                except ImportError:
                    print("VISPY could not be imported for 3D visualization!")

    # UNCOMMENT THE FOLLOWING CODE BLOCKS AS DESIRED:
    # vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv

    #===========================================================================
    # Draw a single mirror's profile in 2D
    # from omnistereo.common_plot import draw_fwd_projection
    # draw_fwd_projection(omnistereo_model.top_model)
    #===========================================================================

    # Drawing backprojected pixels AND covariance ellipsoids
    #===========================================================================
    # # Warning: Don't use anything less than 10 because it will be hard to visualize
    # pixels_to_skew = 0
    # #     delta_pixel = np.array([[65, 40, 20, 10]])
    # delta_pixel = np.array([[75., 47.4, 23.7]])
    # # For example, 1 pixel disparity produces a convergence about 50 m away
    # # whereas, with a disparity of 10 pixels, the diraction rays converge around 2.5 m away (horizontal range).
    # #     m1 = np.array([[[920, cam_mirror1.v_center, 1], [940, cam_mirror1.v_center, 1], [950, cam_mirror1.v_center, 1], [960, cam_mirror1.v_center, 1]]])
    # m1 = np.array([[[920, omnistereo_model.top_model.precalib_params.v_center, 1], [930, omnistereo_model.top_model.precalib_params.v_center, 1], [950, omnistereo_model.top_model.precalib_params.v_center, 1]]])
    # az1, el1 = omnistereo_model.top_model.get_direction_angles_from_pixel(m1)
    # u2, v2, m2_same_el1 = omnistereo_model.bot_model.get_pixel_from_direction_angles(az1, el1)
    # m2 = np.dstack((u2 - delta_pixel, v2 - pixels_to_skew))  # Needs to decrease delta_pixel pixels (disparity) on u2 (only for this example on the u-axis) so the elevation on mirror 2 increases for convergence
    # from omnistereo.common_plot import draw_bak_projection_visvis
    # draw_bak_projection_visvis(omnistereo_model, m1, m2, number_of_std_deviations=1, draw_covariance=True, line_thickness=2, show_grid_box=True, show_labels=False, plot_density_function=False)
    #===========================================================================

    # Drawing a single point backprojected AND its covariance ellipsoids
    #===========================================================================
    #===========================================================================
    # pixels_to_skew = 0
    # delta_pixel = np.array([[150]])
    # m1 = np.array([[[920, omnistereo_model.top_model.precalib_params.v_center, 1]]])
    # az1, el1 = omnistereo_model.top_model.get_direction_angles_from_pixel(m1)
    # m2 = np.dstack((m1[..., 0] - delta_pixel * np.cos(az1), m1[..., 1] - delta_pixel * np.sin(az1) - pixels_to_skew))  # Needs to decrease delta_pixel pixels (disparity) on u2 (only for this example on the u-axis) so the elevation on mirror 2 increases for convergence
    #===========================================================================
    #===========================================================================
    # Using visvis:
    # from omnistereo.common_plot import draw_bak_projection_visvis
    # draw_bak_projection_visvis(omnistereo_model, m1, m2, number_of_std_deviations=1, draw_covariance=True, plot_density_function=True)
    #===========================================================================
    #===========================================================================
    # Using matplotlib only:
    # from omnistereo.common_plot import draw_bak_projection
    # draw_bak_projection(omnistereo_model, m1, m2)
    #===========================================================================

    #===========================================================================
    # # Figure 9 (Sensors Journal article)
    # from omnistereo.common_plot import plot_k_vs_rsys_for_vFOV
    # plot_k_vs_rsys_for_vFOV(omnistereo_model.top_model, fig_size=None)
    #===========================================================================

    #===========================================================================
    # # Figure 10 (Sensors Journal article)
    # from omnistereo.common_plot import plot_k_vs_baseline_for_vFOV
    # plot_k_vs_baseline_for_vFOV(omnistereo_model, fig_size=None)
    #===========================================================================

    #===========================================================================
    # from omnistereo.common_plot import plot_mirror_profiles
    # plot_mirror_profiles(omnistereo_model)
    #===========================================================================

    #===========================================================================
    # # Figure 11 (Sensors Journal article)
    # from omnistereo.common_plot import plot_catadioptric_spatial_resolution_vs_k
    # plot_catadioptric_spatial_resolution_vs_k(omnistereo_model, fig_size=None, legend_location=None)
    #===========================================================================

    # Plotting spatial resolution

    #===========================================================================
    # from omnistereo.common_plot import plot_perspective_camera_spatial_resolution
    # plot_perspective_camera_spatial_resolution(omnistereo_model.top_model.precalib_params, in_2D=True)
    #===========================================================================

    #===========================================================================
    # from omnistereo.common_plot import plot_catadioptric_spatial_resolution_by_BakerNayar
    # plot_catadioptric_spatial_resolution_by_BakerNayar(omnistereo_model)
    #===========================================================================

    #===========================================================================
    # # Figure 12 (Sensors Journal article)
    # from omnistereo.common_plot import plot_catadioptric_spatial_resolution
    # plot_catadioptric_spatial_resolution(omnistereo_model, in_2D=True, eta_max=18, fig_size=None)
    #===========================================================================

    # Range variation:
    #===========================================================================
    # Pns_high = omnistereo_model.get_triangulated_point_wrt_Oc(omnistereo_model.top_model.highest_elevation_angle, omnistereo_model.bot_model.highest_elevation_angle, 0)
    # Pns_mid = omnistereo_model.get_triangulated_point_wrt_Oc(omnistereo_model.top_model.lowest_elevation_angle, omnistereo_model.bot_model.highest_elevation_angle, 0)
    # Pns_low = omnistereo_model.get_triangulated_point_wrt_Oc(omnistereo_model.top_model.lowest_elevation_angle, omnistereo_model.bot_model.lowest_elevation_angle, 0)
    # print(Pns_high, Pns_mid, Pns_low)
    # hor_range_min_for_plot = min(Pns_low[0, 0, 0], Pns_high[0, 0, 0])
    # vert_range_min_for_plot = min(Pns_low[0, 0, 2], Pns_high[0, 0, 2])
    # vert_range_max_for_plot = max(Pns_low[0, 0, 2], Pns_high[0, 0, 2])
    # delta_z_mirror1, z_level_1 = omnistereo_model.top_model.get_vertical_range_variation(hor_range_min_for_plot)
    # delta_rho_mirror1, rho_level_1 = omnistereo_model.top_model.get_horizontal_range_variation(vert_range_min_for_plot)
    # delta_phi_mirror1, phi_level_1 = omnistereo_model.top_model.get_angular_range_variation(150)
    # delta_z_mirror2, z_level_2 = omnistereo_model.bot_model.get_vertical_range_variation(hor_range_min_for_plot)
    # delta_rho_mirror2, rho_level_2 = omnistereo_model.bot_model.get_horizontal_range_variation(vert_range_min_for_plot)
    # delta_phi_mirror2, phi_level_2 = omnistereo_model.bot_model.get_angular_range_variation(150)
    #===========================================================================

    #===========================================================================
    # # Figure 17 (Sensors Journal article)
    # from omnistereo.common_plot import plot_range_variation_due_to_pixel_disparity
    # plot_range_variation_due_to_pixel_disparity(omnistereo_model, disp_min=1, disp_max=100, fig_size=None)
    #===========================================================================

    #===========================================================================
    # from omnistereo.common_plot import plot_effect_of_pixel_disparity_on_range
    # plot_effect_of_pixel_disparity_on_range(omnistereo_model, disp_min=1, disp_max=100, disp_nums=5, use_log=True, plot_zoom=True, fig_size=None)
    #===========================================================================

    #===========================================================================
    # from omnistereo.common_plot import plot_vertical_range_variation
    # plot_vertical_range_variation(omnistereo_model, hor_range_max=30, depth_nums=5, use_meters=True, fig_size=None)
    #===========================================================================

    #===========================================================================
    # from omnistereo.common_plot import plot_horizontal_range_variation
    # plot_horizontal_range_variation(omnistereo_model, vertical_range_min=-500, vertical_range_max=500, depth_nums=5, use_meters=False, fig_size=None)
    #===========================================================================

    plt.show()  # Show both figures in separate windows

    if get_pointclouds:
        stereo_tuner_filename = osp.join(scene_path, "stereo_tuner.pkl")
        from omnistereo.common_plot import compute_pointclouds_simple
        compute_pointclouds_simple(
            omnistereo_model,
            omni_img_filename_template=None,
            img_indices=[img_index],
            compute_new_3D_points=compute_new_3D_points,
            dense_cloud=dense_cloud,
            manual_point_selection=manual_point_selection,
            load_stereo_tuner_from_pickle=True,
            save_pcl=save_pcd_point_cloud,
            points_3D_path=points_3D_path,
            stereo_tuner_filename=stereo_tuner_filename,
            tune_live=False,
            save_sparse_features=False,
            load_sparse_features_from_file=False)

    from omnistereo.common_cv import clean_up
    clean_up(wait_key_time=0)
Пример #6
0
def main_sos_vo():
    scene_path = osp.realpath(osp.expanduser(args.results_path[0]))
    gums_calibrated_filename = osp.realpath(
        osp.expanduser(args.calibrated_gums_file))
    visualize_VO = args.visualize_VO

    # HARD-CODED flags:
    # vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
    use_perfect_model_VO = False  # WARNING: It may be used only when using SYNTHETIC images
    only_visualize_frames_from_existing_VO_poses_file = False  # <<< SETME: Will not run VO if True, but just reload the file from an existing experiment
    # NOTE: For DEBUG, set use_multithreads_for_VO <-- False
    use_multithreads_for_VO = True  # <<< 3D Visualization interactivity (is more responsive) when running the VO as a threads
    # ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^

    scene_path_vo_results = osp.join(scene_path,
                                     "results-omni")  # VO Results path
    make_sure_path_exists(scene_path_vo_results)
    vis_name = "LIVE-SOS"
    if only_visualize_frames_from_existing_VO_poses_file:
        from omnistereo.common_plot import replay_VO_visualization
        replay_VO_visualization(scene_path_vo_results=scene_path_vo_results,
                                first_image_index=0,
                                last_image_index=-1,
                                step_for_poses=1,
                                vis_name=vis_name)
    else:
        from omnistereo.pose_est_tools import driver_VO_live
        from omnistereo.webcam_live import WebcamLive, CamAsWorkingThread
        cam = WebcamLive(cam_index=0,
                         mirror_image=False,
                         file_name="",
                         cam_model="BLACKFLY",
                         show_img=True)
        success, omni_frame = cam.get_single_frame()
        # Use camera as a working thread in order to obtain the most recent image from the buffer:
        cam_working_thread = CamAsWorkingThread(cam=cam)

        # Use the thread_name parameter to name the VO Visualization window:
        if use_perfect_model_VO:
            print("Using 'theoretical' model for", vis_name)
            from omnistereo.cata_hyper_model import get_theoretical_OmniStereo
            from omnistereo.common_cv import get_images
            mirror_images = get_images(calib_img_filename_template,
                                       indices_list=[0],
                                       show_images=False,
                                       return_names_only=False)
            use_existing_radial_bounds = True
            hyperbolic_model_theoretical = get_theoretical_OmniStereo(
                omni_img=mirror_images,
                radial_bounds_filename=radial_bounds_filename,
                theoretical_params_filename=theoretical_params_filename,
                model_version=model_version,
                is_synthetic=is_synthetic,
                use_existing_radial_bounds=use_existing_radial_bounds)
            driver_VO_live(camera_model=hyperbolic_model_theoretical,
                           scene_path_vo_results=scene_path_vo_results,
                           cam_working_thread=cam_working_thread,
                           visualize_VO=visualize_VO,
                           use_multithreads_for_VO=use_multithreads_for_VO,
                           thread_name=vis_name)
        else:
            # Attempting to just load the calibrated GUMS model
            gums_calibrated = load_obj_from_pickle(gums_calibrated_filename)
            driver_VO_live(camera_model=gums_calibrated,
                           scene_path_vo_results=scene_path_vo_results,
                           cam_working_thread=cam_working_thread,
                           visualize_VO=visualize_VO,
                           use_multithreads_for_VO=use_multithreads_for_VO,
                           thread_name=vis_name)

    from omnistereo.common_cv import clean_up
    clean_up(wait_key_time=1)
    print("GOODBYE!")
def load_marshalled_func_from_file(filename):
    marshalled_func = common_tools.load_obj_from_pickle(filename)
    return load_function(marshalled_func)
def main_demo():
    is_synthetic = True
    model_version = "old"  # Set to "old" for the PUBLISHED params, or "new" for the new one
    experiment_name = (
        "simple"
    )  # "simple", "VICON", "CVPR", "with_misalignment-4", etc.  # <<<<<- SET For example, "VICON" uses ground truth data, otherwise use "simple"

    # HYPERBOLIC Parameters (Used in Publication):
    # ===========================================================================
    # k1 = 5.7319  # Unitless
    # k2 = 9.7443  # Unitless
    # Using millimeters
    # r_sys = 37.0
    # r_reflex = 17.226
    # r_cam = 7.25
    # c1 = 123.488
    # c2 = 241.803
    # d = 233.684
    # ===========================================================================
    # vvvvvvvvvvvvvvvvvvvvvvv OPTIONS vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
    load_model_from_file = True  # <<< SETME: to load omnistereo model from a pickle or start anew
    show_panoramic_img = True
    show_3D_model = False
    get_pointclouds = True
    compute_new_3D_points = True
    dense_cloud = True
    manual_point_selection = False
    save_pcd_point_cloud = False

    # ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
    # vvvvvvvvvvvvvvvvvvvvvvv SETUP vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
    # vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
    data_root = "data"  # The root folder for all data
    if is_synthetic:
        model_type = "synthetic"
    else:
        model_type = "real"
    data_path_prefix = osp.join(data_root, model_type, model_version, experiment_name)
    # For SHOWING OFF: virtual office
    scene_name = "scene"
    scene_path = osp.join(
        data_path_prefix, scene_name
    )  # Pose estimation experiment: Translation on x only by 0, 25 cm and 75 cm (wrt init)
    scene_img_filename_template = osp.join(scene_path, "office-*.png")  # With PUBLISHED parameters
    # scene_img_filename_template = osp.join(data_path_prefix, scene_path, "office" + model_version + "-*.png")  # NEW design
    img_indices = []  # Choosing a predefined set of images to work with out of the set
    img_index = 0  # <<<<<------ Choosing an arbitrary image to work with out of the set

    omnistereo_model_filename = osp.join(data_path_prefix, "omnistereo-hyperbolic.pkl")
    # ------------------------------------------------
    radial_bounds_filename = osp.join(data_path_prefix, "radial_bounds.pkl")
    # ------------------------------------------------
    points_3D_filename_template = osp.join(scene_path, "3d_points-" + model_version + "-*.pkl")

    if get_pointclouds:
        points_3D_filename_template = "3d_points-*.pkl"
        features_detected_filename_template = "sparse_correspondences-*.pkl"
        if dense_cloud:
            points_3D_path = osp.join(scene_path, "cloud_dense")
        else:
            points_3D_path = osp.join(scene_path, "cloud_sparse")
        make_sure_path_exists(points_3D_path)
        stereo_tuner_filename = osp.join(scene_path, "stereo_tuner.pkl")

    # ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
    from omnistereo.common_cv import has_opencv, get_images

    opencv_exists = has_opencv()

    omni_images_list = get_images(scene_img_filename_template, indices_list=img_indices, show_images=True)
    # Read params from file and scale to [mm] units since using [cm] (only those params with dimensions)
    theoretical_params_filename = osp.join(data_root, "parameters-%s.txt" % (model_version))

    if load_model_from_file:
        omnistereo_model = load_obj_from_pickle(omnistereo_model_filename)
    else:
        # omni_img_filename = scene_img_filename_template.replace("*", str(img_index), 1)
        # omni_img = cv2.imread(omni_img_filename, 1)
        omni_img = omni_images_list[img_index]
        omnistereo_model = init_omnistereo_theoretical(
            omni_img, radial_bounds_filename, theoretical_params_filename, model_version, is_synthetic=is_synthetic
        )
        pano_width = np.pi * np.linalg.norm(
            omnistereo_model.bot_model.lowest_img_point - omnistereo_model.bot_model.precalib_params.center_point
        )
        omnistereo_model.set_current_omni_image(
            omni_img, pano_width_in_pixels=pano_width, generate_panoramas=True, idx=img_index, view=True
        )
        save_obj_in_pickle(omnistereo_model, omnistereo_model_filename, locals())

    # ===========================================================================
    # sanity_check(omnistereo_model)
    # ===========================================================================

    # Get pixel from pano test
    u, v, m_homo = omnistereo_model.top_model.panorama.get_panorama_pixel_coords_from_direction_angles(
        theta=np.deg2rad([10.0, 11, 80, -10]), psi=np.deg2rad([1, 12.0, 360, 60])
    )

    if show_panoramic_img and opencv_exists:
        pano_win_name_prefix = "DEMO - "
        omnistereo_model.view_all_panoramas(
            scene_img_filename_template,
            img_indices,
            win_name_modifier=pano_win_name_prefix,
            use_mask=True,
            mask_color_RGB=(0, 255, 0),
        )

    if show_3D_model:  # Figure 4 (MDPI Sensors journal article)
        try:
            # Drawing forward projection from 3D points:
            xw, yw, zw = 80, 10, 100
            # Pw = [(xw, yw, zw), (-xw, yw, zw), (xw, -yw, zw), (-xw, -yw, zw)]
            Pw = [(xw, yw, zw)]
            from omnistereo.common_plot import draw_fwd_projection_omnistereo

            draw_fwd_projection_omnistereo(omnistereo_model, Pw, verbose=True, fig_size=None)
            plt.show()  # Show both figures in separate windows
        except ImportError:
            print("MPLOT3D could not be imported for 3D visualization!")

            try:
                # NOTE: drawing with visvis and PyQt4 is troublesome when OpenCV is displaying windows that are using Qt5!!!
                # Drawing just the model:
                from omnistereo.common_plot import draw_omnistereo_model_visvis

                draw_omnistereo_model_visvis(
                    omnistereo_model,
                    finish_drawing=True,
                    show_grid_box=False,
                    mirror_transparency=0.5,
                    show_reference_frame=True,
                )
                # common_plot.draw_model_mono_visvis(omnistereo_model.top_model.theoretical_model, finish_drawing=True, show_grid_box=False, show_reference_frame=True)

            except ImportError:
                print("VISVIS could not be imported for 3D visualization!")
                try:
                    # USING Vispy:
                    from omnistereo.common_plot import draw_omnistereo_model_vispy

                    draw_omnistereo_model_vispy(omnistereo_model, show_grid=True, backend="pyqt4")
                except ImportError:
                    print("VISPY could not be imported for 3D visualization!")

    # UNCOMMENT THE FOLLOWING CODE BLOCKS AS DESIRED:
    # vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv

    # ===========================================================================
    # Draw a single mirror's profile in 2D
    # from omnistereo.common_plot import draw_fwd_projection
    # draw_fwd_projection(omnistereo_model.top_model)
    # ===========================================================================

    # Drawing backprojected pixels AND covariance ellipsoids
    # ===========================================================================
    # # Warning: Don't use anything less than 10 because it will be hard to visualize
    # pixels_to_skew = 0
    # #     delta_pixel = np.array([[65, 40, 20, 10]])
    # delta_pixel = np.array([[75., 47.4, 23.7]])
    # # For example, 1 pixel disparity produces a convergence about 50 m away
    # # whereas, with a disparity of 10 pixels, the diraction rays converge around 2.5 m away (horizontal range).
    # #     m1 = np.array([[[920, cam_mirror1.v_center, 1], [940, cam_mirror1.v_center, 1], [950, cam_mirror1.v_center, 1], [960, cam_mirror1.v_center, 1]]])
    # m1 = np.array([[[920, omnistereo_model.top_model.precalib_params.v_center, 1], [930, omnistereo_model.top_model.precalib_params.v_center, 1], [950, omnistereo_model.top_model.precalib_params.v_center, 1]]])
    # az1, el1 = omnistereo_model.top_model.get_direction_angles_from_pixel(m1)
    # u2, v2, m2_same_el1 = omnistereo_model.bot_model.get_pixel_from_direction_angles(az1, el1)
    # m2 = np.dstack((u2 - delta_pixel, v2 - pixels_to_skew))  # Needs to decrease delta_pixel pixels (disparity) on u2 (only for this example on the u-axis) so the elevation on mirror 2 increases for convergence
    # from omnistereo.common_plot import draw_bak_projection_visvis
    # draw_bak_projection_visvis(omnistereo_model, m1, m2, number_of_std_deviations=1, draw_covariance=True, line_thickness=2, show_grid_box=True, show_labels=False, plot_density_function=False)
    # ===========================================================================

    # Drawing a single point backprojected AND its covariance ellipsoids
    # ===========================================================================
    # ===========================================================================
    # pixels_to_skew = 0
    # delta_pixel = np.array([[150]])
    # m1 = np.array([[[920, omnistereo_model.top_model.precalib_params.v_center, 1]]])
    # az1, el1 = omnistereo_model.top_model.get_direction_angles_from_pixel(m1)
    # m2 = np.dstack((m1[..., 0] - delta_pixel * np.cos(az1), m1[..., 1] - delta_pixel * np.sin(az1) - pixels_to_skew))  # Needs to decrease delta_pixel pixels (disparity) on u2 (only for this example on the u-axis) so the elevation on mirror 2 increases for convergence
    # ===========================================================================
    # ===========================================================================
    # Using visvis:
    # from omnistereo.common_plot import draw_bak_projection_visvis
    # draw_bak_projection_visvis(omnistereo_model, m1, m2, number_of_std_deviations=1, draw_covariance=True, plot_density_function=True)
    # ===========================================================================
    # ===========================================================================
    # Using matplotlib only:
    # from omnistereo.common_plot import draw_bak_projection
    # draw_bak_projection(omnistereo_model, m1, m2)
    # ===========================================================================

    # ===========================================================================
    # # Figure 9 (Sensors Journal article)
    # from omnistereo.common_plot import plot_k_vs_rsys_for_vFOV
    # plot_k_vs_rsys_for_vFOV(omnistereo_model.top_model, fig_size=None)
    # ===========================================================================

    # ===========================================================================
    # # Figure 10 (Sensors Journal article)
    # from omnistereo.common_plot import plot_k_vs_baseline_for_vFOV
    # plot_k_vs_baseline_for_vFOV(omnistereo_model, fig_size=None)
    # ===========================================================================

    # ===========================================================================
    # from omnistereo.common_plot import plot_mirror_profiles
    # plot_mirror_profiles(omnistereo_model)
    # ===========================================================================

    # ===========================================================================
    # # Figure 11 (Sensors Journal article)
    # from omnistereo.common_plot import plot_catadioptric_spatial_resolution_vs_k
    # plot_catadioptric_spatial_resolution_vs_k(omnistereo_model, fig_size=None, legend_location=None)
    # ===========================================================================

    # Plotting spatial resolution

    # ===========================================================================
    # from omnistereo.common_plot import plot_perspective_camera_spatial_resolution
    # plot_perspective_camera_spatial_resolution(omnistereo_model.top_model.precalib_params, in_2D=True)
    # ===========================================================================

    # ===========================================================================
    # from omnistereo.common_plot import plot_catadioptric_spatial_resolution_by_BakerNayar
    # plot_catadioptric_spatial_resolution_by_BakerNayar(omnistereo_model)
    # ===========================================================================

    # ===========================================================================
    # # Figure 12 (Sensors Journal article)
    # from omnistereo.common_plot import plot_catadioptric_spatial_resolution
    # plot_catadioptric_spatial_resolution(omnistereo_model, in_2D=True, eta_max=18, fig_size=None)
    # ===========================================================================

    # Range variation:
    # ===========================================================================
    # Pns_high = omnistereo_model.get_triangulated_point_wrt_Oc(omnistereo_model.top_model.highest_elevation_angle, omnistereo_model.bot_model.highest_elevation_angle, 0)
    # Pns_mid = omnistereo_model.get_triangulated_point_wrt_Oc(omnistereo_model.top_model.lowest_elevation_angle, omnistereo_model.bot_model.highest_elevation_angle, 0)
    # Pns_low = omnistereo_model.get_triangulated_point_wrt_Oc(omnistereo_model.top_model.lowest_elevation_angle, omnistereo_model.bot_model.lowest_elevation_angle, 0)
    # print(Pns_high, Pns_mid, Pns_low)
    # hor_range_min_for_plot = min(Pns_low[0, 0, 0], Pns_high[0, 0, 0])
    # vert_range_min_for_plot = min(Pns_low[0, 0, 2], Pns_high[0, 0, 2])
    # vert_range_max_for_plot = max(Pns_low[0, 0, 2], Pns_high[0, 0, 2])
    # delta_z_mirror1, z_level_1 = omnistereo_model.top_model.get_vertical_range_variation(hor_range_min_for_plot)
    # delta_rho_mirror1, rho_level_1 = omnistereo_model.top_model.get_horizontal_range_variation(vert_range_min_for_plot)
    # delta_phi_mirror1, phi_level_1 = omnistereo_model.top_model.get_angular_range_variation(150)
    # delta_z_mirror2, z_level_2 = omnistereo_model.bot_model.get_vertical_range_variation(hor_range_min_for_plot)
    # delta_rho_mirror2, rho_level_2 = omnistereo_model.bot_model.get_horizontal_range_variation(vert_range_min_for_plot)
    # delta_phi_mirror2, phi_level_2 = omnistereo_model.bot_model.get_angular_range_variation(150)
    # ===========================================================================

    # ===========================================================================
    # # Figure 17 (Sensors Journal article)
    # from omnistereo.common_plot import plot_range_variation_due_to_pixel_disparity
    # plot_range_variation_due_to_pixel_disparity(omnistereo_model, disp_min=1, disp_max=100, fig_size=None)
    # ===========================================================================

    # ===========================================================================
    # from omnistereo.common_plot import plot_effect_of_pixel_disparity_on_range
    # plot_effect_of_pixel_disparity_on_range(omnistereo_model, disp_min=1, disp_max=100, disp_nums=5, use_log=True, plot_zoom=True, fig_size=None)
    # ===========================================================================

    # ===========================================================================
    # from omnistereo.common_plot import plot_vertical_range_variation
    # plot_vertical_range_variation(omnistereo_model, hor_range_max=30, depth_nums=5, use_meters=True, fig_size=None)
    # ===========================================================================

    # ===========================================================================
    # from omnistereo.common_plot import plot_horizontal_range_variation
    # plot_horizontal_range_variation(omnistereo_model, vertical_range_min=-500, vertical_range_max=500, depth_nums=5, use_meters=False, fig_size=None)
    # ===========================================================================

    plt.show()  # Show both figures in separate windows

    if get_pointclouds:
        stereo_tuner_filename = osp.join(scene_path, "stereo_tuner.pkl")
        from omnistereo.common_plot import compute_pointclouds_simple

        compute_pointclouds_simple(
            omnistereo_model,
            omni_img_filename_template=None,
            img_indices=[img_index],
            compute_new_3D_points=compute_new_3D_points,
            dense_cloud=dense_cloud,
            manual_point_selection=manual_point_selection,
            load_stereo_tuner_from_pickle=True,
            save_pcl=save_pcd_point_cloud,
            points_3D_path=points_3D_path,
            stereo_tuner_filename=stereo_tuner_filename,
            tune_live=False,
            save_sparse_features=False,
            load_sparse_features_from_file=False,
        )

    from omnistereo.common_cv import clean_up

    clean_up(wait_key_time=0)