示例#1
0
proj.save()

ref_node = getNode('/config/ned_reference', True)
ref = [
    ref_node.getFloat('lat_deg'),
    ref_node.getFloat('lon_deg'),
    ref_node.getFloat('alt_m')
]
log("NED reference location:", ref)
# local surface approximation
srtm.initialize(ref, 6000, 6000, 30)
surface.update_srtm_elevations(proj)
proj.save_images_info()

# camera calibration
K = camera.get_K()
print("K:", K)

# fire up the matcher
matcher.configure()
matcher.find_matches(proj.image_list,
                     K,
                     transform=args.filter,
                     sort=True,
                     review=False)

feature_count = 0
image_count = 0
for image in proj.image_list:
    feature_count += image.num_features
    image_count += 1
示例#2
0
#         image.z_avg = image.sum_values / float(image.sum_count)
#         print(image.name, 'avg elev:', image.z_avg)
#     else:
#         image.z_avg = 0

# compute the uv grid for each image and project each point out into
# ned space, then intersect each vector with the srtm / ground /
# delauney surface.

name_list = []
for image in proj.image_list:
    name_list.append(image.name)
    #print(image.name, image.z_avg)
    width, height = camera.get_image_params()
    # scale the K matrix if we have scaled the images
    K = camera.get_K(optimized=True)
    IK = np.linalg.inv(K)

    grid_list = []
    u_list = np.linspace(0, width, ac3d_steps + 1)
    v_list = np.linspace(0, height, ac3d_steps + 1)
    #print "u_list:", u_list
    #print "v_list:", v_list
    for v in v_list:
        for u in u_list:
            grid_list.append([u, v])
    #print 'grid_list:', grid_list
    image.distorted_uv = proj.redistort(grid_list, optimized=True)

    image_node = smart.smart_node.getChild(image.name, True)
    surface_m = image_node.getFloat("tri_surface_m")
proj = project.ProjectMgr(args.project)
proj.load_images_info()

source = 'matches_grouped'
print("Loading source matches:", source)
matches = pickle.load(open(os.path.join(proj.analysis_dir, source), 'rb'))

# load the group connections within the image set
group_list = groups.load(proj.analysis_dir)
print('Group sizes:', end=" ")
for group in group_list:
    print(len(group), end=" ")
print()

if args.method == 'triangulate':
    K = camera.get_K(optimized=True)
    dist_coeffs = np.array(proj.cam.get_dist_coeffs(optimized=True))
else:
    K = camera.get_K(optimized=False)
IK = np.linalg.inv(K)

do_sanity_check = False


# assume global K and distcoeff set earlier
def undistort(uv_orig):
    # convert the point into the proper format for opencv
    uv_raw = np.zeros((1, 1, 2), dtype=np.float32)
    uv_raw[0][0] = (uv_orig[0], uv_orig[1])
    # do the actual undistort
    uv_new = cv2.undistortPoints(uv_raw, K, dist_coeffs, P=K)
示例#4
0
    matcher_node.setFloat('match_ratio', args.match_ratio)
    matcher_node.setString('filter', args.filter)
    matcher_node.setInt('min_pairs', args.min_pairs)
    if args.min_dist:
        matcher_node.setFloat('min_dist', args.min_dist)
    if args.max_dist:
        matcher_node.setFloat('max_dist', args.max_dist)
    matcher_node.setInt('min_chain_len', args.min_chain_length)
    if args.ground:
        matcher_node.setFloat('ground_m', args.ground)

    # save any config changes
    proj.save()

    # camera calibration
    K = camera.get_K()
    # print("K:", K)

    log("Matching features")

    # fire up the matcher
    matcher.configure()
    matcher.find_matches(proj,
                         K,
                         strategy=args.match_strategy,
                         transform=args.filter,
                         sort=True,
                         review=False)

    feature_count = 0
    image_count = 0
示例#5
0
def find_essential(i1, i2):
    # quick sanity checks
    if i1 == i2:
        return None
    if not i2.name in i1.match_list:
        return None
    if len(i1.match_list[i2.name]) == 0:
        return None

    if not i1.kp_list or not len(i1.kp_list):
        i1.load_features()
    if not i2.kp_list or not len(i2.kp_list):
        i2.load_features()

    # camera calibration
    K = camera.get_K()
    IK = np.linalg.inv(K)

    # setup data structurs of cv2 call
    uv1 = []
    uv2 = []
    indices = []
    for pair in i1.match_list[i2.name]:
        uv1.append(i1.kp_list[pair[0]].pt)
        uv2.append(i2.kp_list[pair[1]].pt)
    uv1 = np.float32(uv1)
    uv2 = np.float32(uv2)
    E, mask = cv2.findEssentialMat(uv1, uv2, K, method=method)
    print(i1.name, 'vs', i2.name)
    print("E:\n", E)
    print()
    (n, R, tvec, mask) = cv2.recoverPose(E, uv1, uv2, K)
    print('  inliers:', n, 'of', len(uv1))
    print('  R:', R)
    print('  tvec:', tvec)

    # convert R to homogeonous
    #Rh = np.concatenate((R, np.zeros((3,1))), axis=1)
    #Rh = np.concatenate((Rh, np.zeros((1,4))), axis=0)
    #Rh[3,3] = 1
    # extract the equivalent quaternion, and invert
    q = transformations.quaternion_from_matrix(R)
    q_inv = transformations.quaternion_inverse(q)

    (ned1, ypr1, quat1) = i1.get_camera_pose()
    (ned2, ypr2, quat2) = i2.get_camera_pose()
    diff = np.array(ned2) - np.array(ned1)
    dist = np.linalg.norm(diff)
    dir = diff / dist
    print('dist:', dist, 'ned dir:', dir[0], dir[1], dir[2])
    crs_gps = 90 - math.atan2(dir[0], dir[1]) * r2d
    if crs_gps < 0: crs_gps += 360
    if crs_gps > 360: crs_gps -= 360
    print('crs_gps: %.1f' % crs_gps)

    Rbody2ned = i1.get_body2ned()
    cam2body = i1.get_cam2body()
    body2cam = i1.get_body2cam()
    est_dir = Rbody2ned.dot(cam2body).dot(R).dot(tvec)
    est_dir = est_dir / np.linalg.norm(est_dir)  # normalize
    print('est dir:', est_dir.tolist())
    crs_fit = 90 - math.atan2(-est_dir[0], -est_dir[1]) * r2d
    if crs_fit < 0: crs_fit += 360
    if crs_fit > 360: crs_fit -= 360
    print('est crs_fit: %.1f' % crs_fit)
    print("est yaw error: %.1f" % (crs_fit - crs_gps))