Пример #1
0
def test_optimize_fast():
    b_true, b_init = create_test_problem(noise=0)


    ba = BundleAdjuster(b_init)
    ba.optimize(max_steps=50)

    report_bundle(b_init, 'Initial')
    report_bundle(ba.bundle, 'Estimated')
    report_bundle(b_true, 'True')

    #print '\nError in ts:'
    #print abs(np.array(b_true.ts) - bcur.ts)
    #print 'Error in Rs:'
    #print abs(asarray(b_true.Rs) - bcur.Rs)
    #print 'Error in points:'
    #print sum(square(abs(array(b_true.pts) - bcur.pts)), axis=1)

    #print '\nOffset in Rs:'
    #for Rtrue,Rinit in zip(b_true.Rs, b_init.Rs):
    #    print dots(Rtrue, Rinit.T)

    print '\nCost (initial -> estimated -> true)'
    print '  %f -> %f -> %f' % (b_init.complete_cost(), ba.bundle.complete_cost(), b_true.complete_cost())

    draw_bundle.output_views(b_init, 'out/init.pdf')
    draw_bundle.output_views(b_true, 'out/true.pdf')
    draw_bundle.output_views(ba.bundle, 'out/estimated.pdf')
Пример #2
0
def test_optimize_fast():
    b_true, b_init = create_test_problem(noise=0)

    ba = BundleAdjuster(b_init)
    ba.optimize(max_steps=50)

    report_bundle(b_init, 'Initial')
    report_bundle(ba.bundle, 'Estimated')
    report_bundle(b_true, 'True')

    #print '\nError in ts:'
    #print abs(np.array(b_true.ts) - bcur.ts)
    #print 'Error in Rs:'
    #print abs(asarray(b_true.Rs) - bcur.Rs)
    #print 'Error in points:'
    #print sum(square(abs(array(b_true.pts) - bcur.pts)), axis=1)

    #print '\nOffset in Rs:'
    #for Rtrue,Rinit in zip(b_true.Rs, b_init.Rs):
    #    print dots(Rtrue, Rinit.T)

    print '\nCost (initial -> estimated -> true)'
    print '  %f -> %f -> %f' % (b_init.complete_cost(),
                                ba.bundle.complete_cost(),
                                b_true.complete_cost())

    draw_bundle.output_views(b_init, 'out/init.pdf')
    draw_bundle.output_views(b_true, 'out/true.pdf')
    draw_bundle.output_views(ba.bundle, 'out/estimated.pdf')
Пример #3
0
def run(complete_bundle, window_size, num_to_freeze=2, pdf_pattern=None):
    NUM_TRACKS = 100

    # Determine the projection so that we always draw the bundle on a fixed subspace
    visualization_subspace = compute_subspace(complete_bundle)
    
    # Create binary mask for frozen cameras
    camera_mask = arange(window_size) < num_to_freeze

    track_ids = range(NUM_TRACKS)
    
    # Start optimizing
    cur_bundle = complete_bundle
    for i in range(0, len(complete_bundle.cameras)-window_size+1):
        print '\n\n==============\nWINDOW: [%d..%d]\n' % (i, i+window_size)

        prev_bundle = deepcopy(cur_bundle)

        # Adjust this window
        camera_ids = range(i, i+window_size)
        ba = BundleAdjuster()
        ba.set_bundle(cur_bundle, camera_ids=camera_ids, track_ids=track_ids)
        ba.optimize()
        cur_bundle = ba.bundle

        # Propagate the update to the next camera
        next_camera_id = i + window_size
        if next_camera_id < len(cur_bundle.cameras):
            geometry.propagate_pose_update_inplace(prev_bundle.cameras[i],
                                                   cur_bundle.cameras[i],
                                                   cur_bundle.cameras[i])
        
        
        # Draw this window
        print 'Drawing bundle...'
        if pdf_pattern is not None:
            pose_colors = ['b'] * len(ba.bundle.cameras)
            for j in range(i):
                pose_colors[j] = 'g'
            for j in range(i,i+window_size):
                pose_colors[j] = 'r'
            pdf_path = pdf_pattern % i

            print 'Writing to ',pdf_path
            draw_bundle(ba.bundle, visualization_subspace, pose_colors)
            if next_camera_id < len(prev_bundle.cameras):
                draw_pose(prev_bundle.cameras[next_camera_id].R,
                          prev_bundle.cameras[next_camera_id].t,
                          visualization_subspace,
                          'c')
            plt.savefig(pdf_path)
Пример #4
0
def run(complete_bundle, window_size, num_to_freeze=2, pdf_pattern=None):
    NUM_TRACKS = 100

    # Determine the projection so that we always draw the bundle on a fixed subspace
    visualization_subspace = compute_subspace(complete_bundle)

    # Create binary mask for frozen cameras
    camera_mask = arange(window_size) < num_to_freeze

    track_ids = range(NUM_TRACKS)

    # Start optimizing
    cur_bundle = complete_bundle
    for i in range(0, len(complete_bundle.cameras) - window_size + 1):
        print '\n\n==============\nWINDOW: [%d..%d]\n' % (i, i + window_size)

        prev_bundle = deepcopy(cur_bundle)

        # Adjust this window
        camera_ids = range(i, i + window_size)
        ba = BundleAdjuster()
        ba.set_bundle(cur_bundle, camera_ids=camera_ids, track_ids=track_ids)
        ba.optimize()
        cur_bundle = ba.bundle

        # Propagate the update to the next camera
        next_camera_id = i + window_size
        if next_camera_id < len(cur_bundle.cameras):
            geometry.propagate_pose_update_inplace(prev_bundle.cameras[i],
                                                   cur_bundle.cameras[i],
                                                   cur_bundle.cameras[i])

        # Draw this window
        print 'Drawing bundle...'
        if pdf_pattern is not None:
            pose_colors = ['b'] * len(ba.bundle.cameras)
            for j in range(i):
                pose_colors[j] = 'g'
            for j in range(i, i + window_size):
                pose_colors[j] = 'r'
            pdf_path = pdf_pattern % i

            print 'Writing to ', pdf_path
            draw_bundle(ba.bundle, visualization_subspace, pose_colors)
            if next_camera_id < len(prev_bundle.cameras):
                draw_pose(prev_bundle.cameras[next_camera_id].R,
                          prev_bundle.cameras[next_camera_id].t,
                          visualization_subspace, 'c')
            plt.savefig(pdf_path)
Пример #5
0
    for track in bundle.tracks:
        track.reconstruction = bundle.triangulate(track)

    NUM_CAMERAS = 100
    NUM_TRACKS = 100

    bundle.cameras = bundle.cameras[:NUM_CAMERAS]
    bundle.tracks = bundle.tracks[:NUM_TRACKS]
    for j,track in enumerate(bundle.tracks):
        track.measurements = { i : track.get_measurement(i) for i in range(NUM_CAMERAS) }

    param_mask = np.ones(bundle.num_params(), bool)
    param_mask[:6] = False
    param_mask[9] = False

    b_init = deepcopy(bundle)
    ba = BundleAdjuster(bundle)
    ba.optimize(param_mask, max_steps=10)

    # Write results
    with open('out/adjusted_poses.txt', 'w') as fd:
        for camera in ba.bundle.cameras:
            P = camera.projection_matrix()
            fd.write(''.join(['%f '%v for v in P.flatten()]))
            fd.write('\n')
    

    #draw_bundle.output_views(b_init, 'out/init.pdf')
    #draw_bundle.output_views(ba.bundle, 'out/estimated.pdf')
    # print(points3D.shape)
    # Generate Bundle file

    # Draw Optical Flow
    klt_tracker.draw_optical_flow()

    # Generate Initialization Point Cloud
    klt_tracker.generate_initial_point_cloud(config.INITIAL_POINT_CLOUD)

    # Generate bundle adjustment input file
    klt_tracker.generate_bundle_file('../output/bundle.out')

    # Bundle Adjustment
    ceres_params = config.CERES_PARAMS
    bundle_adjuster = BundleAdjuster(config.INITIAL_POINT_CLOUD,
                                     config.FINAL_POINT_CLOUD,
                                     config.BUNDLE_FILE, config.CERES_PARAMS)

    bundle_adjuster.bundle_adjust()

    # Read the point cloud generated after Bundle Adjustment
    pcd = o3d.io.read_point_cloud(config.FINAL_POINT_CLOUD)

    # Extract depth from the point cloud
    depth_map = point_cloud_2_depth_map(pcd)

    # run plane sweep and CRF optimization

    folder = config.IMAGE_DIR
    parser = argparse.ArgumentParser()
    # General Params
Пример #7
0
    def _bundle_adjustment(self, add_order):
        '''
    Iteratively add each match to the bundle adjuster
    '''

        # #-------start-----------
        # matches_to_add = self._matches.copy()
        # added_cameras = set()
        # ba = BundleAdjuster()

        # # Intialise the first camera that will be used as reference frame
        # first_cam = self._matches[0].cam_from
        # first_cam.R = np.identity(3)
        # first_cam.ppx, first_cam.ppy = 0, 0
        # added_cameras.add(first_cam)

        # while (len(matches_to_add) > 0):
        #   match = matches_to_add.pop(0)

        #   # Find which camera R needs to be estimated for
        #   if (match.cam_from in added_cameras):
        #     # cam_to_R = np.linalg.pinv(match.cam_to.K) @ match.H @ match.cam_from.K @ match.cam_from.R
        #     cam_to_R = (np.linalg.pinv(match.cam_from.R) @ (np.linalg.pinv(match.cam_from.K) @ match.H @ match.cam_to.K)).T
        #     match.cam_to.R = cam_to_R
        #     match.cam_to.ppx, match.cam_to.ppy = 0, 0
        #     print(f'{match.cam_from.image.filename} to {match.cam_to.image.filename}:\n {match.cam_to.R}\n\n')
        #   # elif (match.cam_to in added_cameras):
        #   #   print('to -> from match found')
        #   #   # print(f'np.linalg.pinv(match.cam_from.K): {np.linalg.pinv(match.cam_from.K)}')
        #   #   # print(f'np.linalg.pinv(match.H): {np.linalg.pinv(match.H)}')
        #   #   # print(f'match.cam_from.K: {match.cam_from.K}')
        #   #   # print(f'match.cam_from.R: {match.cam_from.R}')
        #   #   cam_from_R = np.linalg.pinv(match.cam_from.K) @ np.linalg.pinv(match.H) @ match.cam_to.K @ match.cam_to.R
        #   #   match.cam_from.R = cam_from_R
        #   #   match.cam_from.ppx, match.cam_from.ppy = 0, 0
        #   #   print(f'{match.cam_from.image.filename} - {match.cam_from.R}')

        #   # ba.add(match)
        #   added_cameras.update(match.cams())

        # for (i, match) in enumerate(matches_to_add):
        #   # If both cameras already added, add the match to BA
        #   if (match.cam_from in added_cameras and match.cam_to in added_cameras):
        #     ba.add(matches_to_add.pop(i))

        #   #ba.run()
        #   #-----end---------

        ba = BundleAdjuster()

        other_matches = set(self._matches) - set(add_order)

        # print(f'Total matches: {len(self._matches)}')
        # print(f'add_order count: {len(add_order)}')
        # print(f'other_matches count: {len(other_matches)}')

        identity_cam = add_order[0].cam_from
        identity_cam.R = np.identity(3)
        identity_cam.ppx, identity_cam.ppy = 0, 0

        print(f'Identity cam: {identity_cam.image.filename}')

        print('Original match params:')
        for match in add_order:
            print(
                f'{match.cam_from.image.filename} to {match.cam_to.image.filename}:\n {match.cam_to.R}\n'
            )
        print('------------------')

        for match in add_order:
            # result = warp_two_images(match.cam_from.image.image, match.cam_to.image.image, match.H)
            # cv.imshow('Original H from RANSAC', result)
            print(f'match.cam_from.R: {match.cam_from.R}')
            print(f'match.cam_from.K: {match.cam_from.K}')
            print(f'match.H: {match.H}')
            print(f'match.cam_to.K: {match.cam_to.K}')

            match.cam_to.R = (match.cam_from.R.T @ (
                np.linalg.pinv(match.cam_from.K) @ match.H @ match.cam_to.K)).T
            match.cam_to.ppx, match.cam_to.ppy = 0, 0
            print(
                f'{match.cam_from.image.filename} to {match.cam_to.image.filename}:\n {match.cam_to.R}\n'
            )

            # reconstructed_H = match.cam_from.K @ match.cam_from.R @ match.cam_to.R.T @ np.linalg.pinv(match.cam_to.K)
            # result = warp_two_images(match.cam_from.image.image, match.cam_to.image.image, reconstructed_H)
            # cv.imshow('Result with reconstructed H', result)

            # # Matrix -> rotvec -> matrix
            # converted_R = match.cam_to.rotvec_to_matrix(match.cam_to.angle_parameterisation())
            # print(f'homography:\n{match.H}')
            # print(f'converted_R matrix:\n{converted_R}')
            # reconstructed_from_converted_R_H = match.cam_from.K @ match.cam_from.R @ converted_R.T @ np.linalg.pinv(match.cam_to.K)
            # result = warp_two_images(match.cam_from.image.image, match.cam_to.image.image, reconstructed_from_converted_R_H)
            # cv.imshow('Result with reconstructed H from converted R', result)
            # cv.waitKey(0)
            # return

            ba.add(match)

            added_cams = ba.added_cameras()
            to_add = set()
            for other_match in other_matches:
                # If both cameras already added, add the match to BA
                if (other_match.cam_from in added_cams
                        and other_match.cam_to in added_cams):
                    to_add.add(other_match)
            for match in to_add:
                # self._reverse_match(match)
                ba.add(match)
                other_matches.remove(match)
        # return

        all_cameras = None
        try:
            all_cameras = pickle.load(
                open(f'all_cameras_{len(self._all_cameras())}.p', 'rb'))

            for match in self._matches:
                for cam in all_cameras:
                    if (match.cam_to.image.filename == cam.image.filename):
                        match.cam_to = cam
                    elif (match.cam_from.image.filename == cam.image.filename):
                        match.cam_from = cam

        except (OSError, IOError):
            ba.run()
            all_cameras = self._all_cameras()
            pickle.dump(
                all_cameras,
                open(f'all_cameras_{len(self._all_cameras())}.p', 'wb'))

        print('BA complete.')

        return
Пример #8
0
        track.reconstruction = bundle.triangulate(track)

    NUM_CAMERAS = 100
    NUM_TRACKS = 100

    bundle.cameras = bundle.cameras[:NUM_CAMERAS]
    bundle.tracks = bundle.tracks[:NUM_TRACKS]
    for j, track in enumerate(bundle.tracks):
        track.measurements = {
            i: track.get_measurement(i)
            for i in range(NUM_CAMERAS)
        }

    param_mask = np.ones(bundle.num_params(), bool)
    param_mask[:6] = False
    param_mask[9] = False

    b_init = deepcopy(bundle)
    ba = BundleAdjuster(bundle)
    ba.optimize(param_mask, max_steps=10)

    # Write results
    with open('out/adjusted_poses.txt', 'w') as fd:
        for camera in ba.bundle.cameras:
            P = camera.projection_matrix()
            fd.write(''.join(['%f ' % v for v in P.flatten()]))
            fd.write('\n')

    #draw_bundle.output_views(b_init, 'out/init.pdf')
    #draw_bundle.output_views(ba.bundle, 'out/estimated.pdf')