예제 #1
0
proj = project.ProjectMgr(args.project)
proj.load_images_info()
proj.load_features()
proj.undistort_keypoints()

source = 'matches_direct'
print("Loading matches:", source)
matches_orig = pickle.load(open(os.path.join(args.project, source), "rb"))
print('Number of original features:', len(matches_orig))
print("Loading optimized matches: matches_opt")
matches_opt = pickle.load(open(os.path.join(args.project, "matches_opt"),
                               "rb"))
print('Number of optimized features:', len(matches_opt))

# load the group connections within the image set
group_list = groups.load(args.project)
print('Main group size:', len(group_list[0]))


# compute the depth of each feature for each image
def compute_feature_depths(image_list, group, matches):
    print("Computing depths for all match points...")

    # init structures
    for image in image_list:
        image.z_list = []

    # make a list of distances for each feature of each image
    for match in matches:
        feat_ned = match[0]
        count = 0
예제 #2
0
import transformations

r2d = 180.0 / math.pi
d2r = math.pi / 180.0

parser = argparse.ArgumentParser(
    description='Set the aircraft poses from flight data.')
parser.add_argument('--project', required=True, help='project directory')

args = parser.parse_args()

proj = project.ProjectMgr(args.project)
print("Loading image info...")
proj.load_images_info()

group_list = groups.load(proj.analysis_dir)

# compute an average transform between original camera attitude estimate
# and optimized camera attitude estimate
quats = []
for i, image in enumerate(proj.image_list):
    if image.name in group_list[0]:
        print(image.name)
        ned, ypr, q0 = image.get_camera_pose(opt=False)
        ned, ypr, q1 = image.get_camera_pose(opt=True)
        # rx = q1 * conj(q0)
        conj_q0 = transformations.quaternion_conjugate(q0)
        rx = transformations.quaternion_multiply(q1, conj_q0)
        rx /= transformations.vector_norm(rx)
        print(' ', rx)
        (yaw_rad, pitch_rad,