default=1.0,
                    help='max feature angle')
args = parser.parse_args()

proj = ProjectMgr.ProjectMgr(args.project)
proj.load_area_info(args.area)

area_dir = os.path.join(args.project, args.area)
#source = 'matches_direct'
source = 'matches_grouped'
print("Loading matches:", source)
matches = pickle.load(open(os.path.join(area_dir, source), "rb"))
print('Number of original features:', len(matches))

# load the group connections within the image set
groups = Groups.load(area_dir)
print('Group sizes:', end=" ")
for group in groups:
    print(len(group), end=" ")
print()


def compute_angle(ned1, ned2, ned3):
    vec1 = np.array(ned3) - np.array(ned1)
    vec2 = np.array(ned3) - np.array(ned2)
    n1 = np.linalg.norm(vec1)
    n2 = np.linalg.norm(vec2)
    denom = n1 * n2
    if abs(denom - 0.000001) > 0:
        try:
            tmp = np.dot(vec1, vec2) / denom
예제 #2
0
ref_node = getNode("/config/ned_reference", True)
ref = [
    ref_node.getFloat('lat_deg'),
    ref_node.getFloat('lon_deg'),
    ref_node.getFloat('alt_m')
]

# setup SRTM ground interpolator
sss = SRTM.NEDGround(ref, 6000, 6000, 30)

print("Loading optimized match points ...")
matches = pickle.load(
    open(os.path.join(proj.analysis_dir, "matches_grouped"), "rb"))

# load the group connections within the image set
groups = Groups.load(proj.analysis_dir)

# initialize temporary structures for vanity stats
for image in proj.image_list:
    image.sum_values = 0.0
    image.sum_count = 0.0
    image.max_z = -9999.0
    image.min_z = 9999.0

# elevation stats
print("Computing stats...")
ned_list = []
for match in matches:
    if match[1] == args.group:  # used by current group
        ned_list.append(match[0])
avg = -np.mean(np.array(ned_list)[:, 2])
def render(project_dir, render_options):

    group_id = render_options[0]
    texture_resolution = render_options[1]
    srtm = render_options[2]
    ground = render_options[3]
    direct = render_options[4]

    proj = ProjectMgr.ProjectMgr(project_dir)
    proj.load_images_info()

    # lookup ned reference
    ref_node = getNode("/config/ned_reference", True)
    ref = [
        ref_node.getFloat('lat_deg'),
        ref_node.getFloat('lon_deg'),
        ref_node.getFloat('alt_m')
    ]

    # setup SRTM ground interpolator
    sss = SRTM.NEDGround(ref, 6000, 6000, 30)

    print("Loading optimized match points ...")
    matches = pickle.load(
        open(os.path.join(proj.analysis_dir, "matches_grouped"), "rb"))

    # load the group connections within the image set
    groups = Groups.load(proj.analysis_dir)

    # initialize temporary structures for vanity stats
    for image in proj.image_list:
        image.sum_values = 0.0
        image.sum_count = 0.0
        image.max_z = -9999.0
        image.min_z = 9999.0

    # elevation stats
    print("Computing stats...")
    ned_list = []
    for match in matches:
        if match[1] == group_id:  # used by current group
            ned_list.append(match[0])
    avg = -np.mean(np.array(ned_list)[:, 2])
    std = np.std(np.array(ned_list)[:, 2])
    print("Average elevation: %.2f" % avg)
    print("Standard deviation: %.2f" % std)

    # sort through points
    print('Reading feature locations from optimized match points ...')
    raw_points = []
    raw_values = []
    for match in matches:
        if match[1] == group_id:  # used by current group
            ned = match[0]
            diff = abs(-ned[2] - avg)
            if diff < 10 * std:
                raw_points.append([ned[1], ned[0]])
                raw_values.append(ned[2])
                for m in match[2:]:
                    if proj.image_list[m[0]].name in groups[group_id]:
                        image = proj.image_list[m[0]]
                        z = -ned[2]
                        image.sum_values += z
                        image.sum_count += 1
                        if z < image.min_z:
                            image.min_z = z
                            #print(min_z, match)
                        if z > image.max_z:
                            image.max_z = z
                            #print(max_z, match)
            else:
                print("Discarding match with excessive altitude:", match)

    # save the surface definition as a separate file
    models_dir = os.path.join(proj.analysis_dir, 'models')
    if not os.path.exists(models_dir):
        print("Notice: creating models directory =", models_dir)
        os.makedirs(models_dir)
    surface = {'points': raw_points, 'values': raw_values}
    pickle.dump(
        surface,
        open(os.path.join(proj.analysis_dir, 'models', 'surface.bin'), "wb"))

    print('Generating Delaunay mesh and interpolator ...')
    global_tri_list = scipy.spatial.Delaunay(np.array(raw_points))
    interp = scipy.interpolate.LinearNDInterpolator(global_tri_list,
                                                    raw_values)

    no_extrapolate = True

    def intersect2d(ned, v, avg_ground):
        p = ned[:]  # copy

        # sanity check (always assume camera pose is above ground!)
        if v[2] <= 0.0:
            return p

        eps = 0.01
        count = 0
        #print("start:", p)
        #print("vec:", v)
        #print("ned:", ned)
        tmp = interp([p[1], p[0]])[0]
        if no_extrapolate or not np.isnan(tmp):
            surface = tmp
        else:
            surface = avg_ground
        error = abs(p[2] - surface)
        #print("p=%s surface=%s error=%s" % (p, surface, error))
        while error > eps and count < 25:
            d_proj = -(ned[2] - surface)
            factor = d_proj / v[2]
            n_proj = v[0] * factor
            e_proj = v[1] * factor
            #print(" proj = %s %s" % (n_proj, e_proj))
            p = [ned[0] + n_proj, ned[1] + e_proj, ned[2] + d_proj]
            #print(" new p:", p)
            tmp = interp([p[1], p[0]])[0]
            if no_extrapolate or not np.isnan(tmp):
                surface = tmp
            error = abs(p[2] - surface)
            #print("  p=%s surface=%.2f error = %.3f" % (p, surface, error))
            count += 1
        #print("surface:", surface)
        #if np.isnan(surface):
        #    #print(" returning nans")
        #    return [np.nan, np.nan, np.nan]
        dy = ned[0] - p[0]
        dx = ned[1] - p[1]
        dz = ned[2] - p[2]
        dist = math.sqrt(dx * dx + dy * dy)
        angle = math.atan2(-dz, dist) * r2d  # relative to horizon
        if angle < 30:
            print(" returning high angle nans:", angle)
            return [np.nan, np.nan, np.nan]
        else:
            return p

    def intersect_vectors(ned, v_list, avg_ground):
        pt_list = []
        for v in v_list:
            p = intersect2d(ned, v.flatten(), avg_ground)
            pt_list.append(p)
        return pt_list

    for image in proj.image_list:
        if image.sum_count > 0:
            image.z_avg = image.sum_values / float(image.sum_count)
            print(image.name, 'avg elev:', image.z_avg)
        else:
            image.z_avg = 0

    # compute the uv grid for each image and project each point out into
    # ned space, then intersect each vector with the srtm / ground /
    # delauney surface.

    #for group in groups:
    if True:
        group = groups[group_id]
        #if len(group) < 3:
        #    continue
        for name in group:
            image = proj.findImageByName(name)
            print(image.name, image.z_avg)
            width, height = proj.cam.get_image_params()
            # scale the K matrix if we have scaled the images
            K = proj.cam.get_K(optimized=True)
            IK = np.linalg.inv(K)

            grid_list = []
            u_list = np.linspace(0, width, ac3d_steps + 1)
            v_list = np.linspace(0, height, ac3d_steps + 1)
            #print "u_list:", u_list
            #print "v_list:", v_list
            for v in v_list:
                for u in u_list:
                    grid_list.append([u, v])
            #print 'grid_list:', grid_list
            image.distorted_uv = proj.redistort(grid_list, optimized=True)

            if direct:
                proj_list = proj.projectVectors(IK, image.get_body2ned(),
                                                image.get_cam2body(),
                                                grid_list)
            else:
                #print(image.get_body2ned(opt=True))
                proj_list = proj.projectVectors(IK,
                                                image.get_body2ned(opt=True),
                                                image.get_cam2body(),
                                                grid_list)
            #print 'proj_list:', proj_list

            if direct:
                ned, ypr, quat = image.get_camera_pose()
            else:
                ned, ypr, quat = image.get_camera_pose(opt=True)
            #print('cam orig:', image.camera_pose['ned'], 'optimized:', ned)
            if ground:
                pts_ned = proj.intersectVectorsWithGroundPlane(
                    ned, ground, proj_list)
            elif srtm:
                pts_ned = sss.interpolate_vectors(ned, proj_list)
            elif False:
                # this never seemed that productive
                print(image.name, image.z_avg)
                pts_ned = proj.intersectVectorsWithGroundPlane(
                    ned, image.z_avg, proj_list)
            elif True:
                # intersect with our polygon surface approximation
                pts_ned = intersect_vectors(ned, proj_list, -image.z_avg)
            elif False:
                # (moving away from the binned surface approach in this
                # script towards the above delauney interpolation
                # approach)
                # intersect with 2d binned surface approximation
                pts_ned = bin2d.intersect_vectors(ned, proj_list, -image.z_avg)

            #print(image.name, "pts_3d (ned):\n", pts_ned)

            # convert ned to xyz and stash the result for each image
            image.grid_list = []
            for p in pts_ned:
                image.grid_list.append([p[1], p[0], -p[2]])

    # generate the panda3d egg models
    dir_node = getNode('/config/directories', True)
    img_src_dir = dir_node.getString('images_source')
    Panda3d.generate_from_grid(proj,
                               groups[group_id],
                               src_dir=img_src_dir,
                               analysis_dir=proj.analysis_dir,
                               resolution=texture_resolution)
예제 #4
0
def match_trig(project_dir, match_trig_options):

    group = match_trig_options[0]
    method = match_trig_options[1]
    proj = ProjectMgr.ProjectMgr(project_dir)
    proj.load_images_info()

    source = 'matches_grouped'
    print("Loading source matches:", source)
    matches = pickle.load(open(os.path.join(proj.analysis_dir, source), 'rb'))

    # load the group connections within the image set
    groups = Groups.load(proj.analysis_dir)
    print('Group sizes:', end=" ")
    for group in groups:
        print(len(group), end=" ")
    print()

    if method == 'triangulate':
        K = proj.cam.get_K(optimized=True)
        dist_coeffs = np.array(proj.cam.get_dist_coeffs(optimized=True))
    else:
        K = proj.cam.get_K(optimized=False)
    IK = np.linalg.inv(K)

    do_sanity_check = False

    # assume global K and distcoeff set earlier
    def undistort(uv_orig):
        # convert the point into the proper format for opencv
        uv_raw = np.zeros((1, 1, 2), dtype=np.float32)
        uv_raw[0][0] = (uv_orig[0], uv_orig[1])
        # do the actual undistort
        uv_new = cv2.undistortPoints(uv_raw, K, dist_coeffs, P=K)
        # print(uv_orig, type(uv_new), uv_new)
        return uv_new[0][0]

    if method == 'srtm':
        # lookup ned reference
        ref_node = getNode("/config/ned_reference", True)
        ref = [
            ref_node.getFloat('lat_deg'),
            ref_node.getFloat('lon_deg'),
            ref_node.getFloat('alt_m')
        ]

        # setup SRTM ground interpolator
        sss = SRTM.NEDGround(ref, 3000, 3000, 30)

        # for each image lookup the SRTM elevation under the camera
        print("Looking up SRTM base elevation for each image location...")
        for image in proj.image_list:
            ned, ypr, quat = image.get_camera_pose()
            image.base_elev = sss.interp([ned[0], ned[1]])[0]
            # print(image.name, image.base_elev)

        print("Estimating initial projection for each feature...")
        bad_count = 0
        bad_indices = []
        for i, match in enumerate(tqdm(matches)):
            sum = np.zeros(3)
            array = []  # fixme: temp/debug
            for m in match[2:]:
                image = proj.image_list[m[0]]
                cam2body = image.get_cam2body()
                body2ned = image.get_body2ned()
                ned, ypr, quat = image.get_camera_pose()
                uv_list = [m[1]]  # just one uv element
                vec_list = proj.projectVectors(IK, body2ned, cam2body, uv_list)
                v = vec_list[0]
                if v[2] > 0.0:
                    d_proj = -(ned[2] + image.base_elev)
                    factor = d_proj / v[2]
                    n_proj = v[0] * factor
                    e_proj = v[1] * factor
                    p = [ned[0] + n_proj, ned[1] + e_proj, ned[2] + d_proj]
                    # print('  ', p)
                    sum += np.array(p)
                    array.append(p)
                else:
                    print('vector projected above horizon.')
            match[0] = (sum / len(match[2:])).tolist()
            # print(match[0])
            if do_sanity_check:
                # crude sanity check
                ok = True
                for p in array:
                    dist = np.linalg.norm(np.array(match[0]) - np.array(p))
                    if dist > 100:
                        ok = False
                if not ok:
                    bad_count += 1
                    bad_indices.append(i)
                    print('match:', i, match[0])
                    for p in array:
                        dist = np.linalg.norm(np.array(match[0]) - np.array(p))
                        print(' ', dist, p)
        if do_sanity_check:
            print('bad count:', bad_count)
            print('deleting bad matches...')
            bad_indices.reverse()
            for i in bad_indices:
                del matches[i]
    elif method == 'triangulate':
        for i, match in enumerate(matches):
            if match[1] == group:  # used in current group
                # print(match)
                points = []
                vectors = []
                for m in match[2:]:
                    if proj.image_list[m[0]].name in groups[group]:
                        # print(m)
                        image = proj.image_list[m[0]]
                        cam2body = image.get_cam2body()
                        body2ned = image.get_body2ned()
                        ned, ypr, quat = image.get_camera_pose(opt=True)
                        uv_list = [undistort(m[1])]  # just one uv element
                        vec_list = proj.projectVectors(IK, body2ned, cam2body,
                                                       uv_list)
                        points.append(ned)
                        vectors.append(vec_list[0])
                        # print(' ', image.name)
                        # print(' ', uv_list)
                        # print('  ', vec_list)
                if len(points) >= 2:
                    # print('points:', points)
                    # print('vectors:', vectors)
                    p = LineSolver.ls_lines_intersection(
                        points, vectors, transpose=True).tolist()
                    # print('result:',  p, p[0])
                    print(i, match[0], '>>>', end=" ")
                    match[0] = [p[0][0], p[1][0], p[2][0]]
                    if p[2][0] > 0:
                        print("WHOA!")
                    print(match[0])

    print("Writing:", source)
    pickle.dump(matches, open(os.path.join(proj.analysis_dir, source), "wb"))
def mre(project_dir, mre_options):

    group_id = mre_options[0]
    stddev = mre_options[1]
    initial_pose = mre_options[2]
    strong = mre_options[3]
    interactive = mre_options[4]

    proj = ProjectMgr.ProjectMgr(project_dir)
    proj.load_images_info()

    # a value of 2 let's pairs exist which can be trouble ...
    matcher_node = getNode('/config/matcher', True)
    min_chain_len = matcher_node.getInt("min_chain_len")
    if min_chain_len == 0:
        min_chain_len = 3
    print("Notice: min_chain_len is:", min_chain_len)

    source = 'matches_grouped'
    print("Loading matches:", source)
    matches = pickle.load(open(os.path.join(proj.analysis_dir, source), "rb"))
    print('Number of original features:', len(matches))

    # load the group connections within the image set
    groups = Groups.load(proj.analysis_dir)
    print('Group sizes:', end=" ")
    for group in groups:
        print(len(group), end=" ")
    print()

    opt = Optimizer.Optimizer(project_dir)
    if initial_pose:
        opt.setup(proj, groups, group_id, matches, optimized=False)
    else:
        opt.setup(proj, groups, group_id, matches, optimized=True)
    x0 = np.hstack((opt.camera_params.ravel(), opt.points_3d.ravel(),
                    opt.K[0, 0], opt.K[0, 2], opt.K[1, 2], opt.distCoeffs))
    error = opt.fun(x0, opt.n_cameras, opt.n_points,
                    opt.by_camera_point_indices, opt.by_camera_points_2d)

    print('cameras:', opt.n_cameras)

    print(len(error))
    mre = np.mean(np.abs(error))
    std = np.std(error)
    max = np.amax(np.abs(error))
    print('mre: %.3f std: %.3f max: %.2f' % (mre, std, max))

    print('Tabulating results...')
    results = []
    results_by_cam = []
    count = 0
    for i, cam in enumerate(opt.camera_params.reshape(
        (opt.n_cameras, opt.ncp))):
        # print(i, opt.camera_map_fwd[i])
        orig_cam_index = opt.camera_map_fwd[i]
        cam_errors = []
        # print(count, opt.by_camera_point_indices[i])
        for j in opt.by_camera_point_indices[i]:
            match = matches[opt.feat_map_rev[j]]
            match_index = 0
            #print(orig_cam_index, match)
            for k, p in enumerate(match[2:]):
                if p[0] == orig_cam_index:
                    match_index = k
            # print(match[0], opt.points_3d[j*3:j*3+3])
            e = error[count * 2:count * 2 + 2]
            #print(count, e, np.linalg.norm(e))
            #if abs(e[0]) > 5*std or abs(e[1]) > 5*std:
            #    print("big")
            cam_errors.append(np.linalg.norm(e))
            results.append(
                [np.linalg.norm(e), opt.feat_map_rev[j], match_index])
            count += 1
        if len(cam_errors):
            results_by_cam.append([
                np.mean(np.abs(np.array(cam_errors))),
                np.amax(np.abs(np.array(cam_errors))),
                proj.image_list[orig_cam_index].name
            ])
        else:
            results_by_cam.append(
                [9999.0, 9999.0, proj.image_list[orig_cam_index].name])

        #print(proj.image_list[orig_cam_index].name, ':',
        #      np.mean(np.abs(np.array(cam_errors))))

    print("Report of images that aren't fitting well:")
    results_by_cam = sorted(results_by_cam,
                            key=lambda fields: fields[0],
                            reverse=True)
    for line in results_by_cam:
        if line[0] > mre + 3 * std:
            print("%s - mean: %.3f max: %.3f" % (line[2], line[0], line[1]))
    for line in results_by_cam:
        if line[0] > mre + 3 * std:
            print(line[2], end=" ")
    print()

    error_list = sorted(results, key=lambda fields: fields[0], reverse=True)

    def mark_outliers(error_list, trim_stddev):
        print("Marking outliers...")
        sum = 0.0
        count = len(error_list)

        # numerically it is better to sum up a list of floatting point
        # numbers from smallest to biggest (error_list is sorted from
        # biggest to smallest)
        for line in reversed(error_list):
            sum += line[0]

        # stats on error values
        print(" computing stats...")
        mre = sum / count
        stddev_sum = 0.0
        for line in error_list:
            error = line[0]
            stddev_sum += (mre - error) * (mre - error)
        stddev = math.sqrt(stddev_sum / count)
        print("mre = %.4f stddev = %.4f" % (mre, stddev))

        # mark match items to delete
        print(" marking outliers...")
        mark_count = 0
        for line in error_list:
            # print "line:", line
            if line[0] > mre + stddev * trim_stddev:
                cull.mark_feature(matches, line[1], line[2], line[0])
                mark_count += 1

        return mark_count

    if interactive:
        # interactively pick outliers
        mark_list = cull.show_outliers(error_list, matches, proj.image_list)

        # mark selection
        cull.mark_using_list(mark_list, matches)
        mark_sum = len(mark_list)
    else:
        # trim outliers by some # of standard deviations high
        mark_sum = mark_outliers(error_list, stddev)

    # after marking the bad matches, now count how many remaining features
    # show up in each image
    for i in proj.image_list:
        i.feature_count = 0
    for i, match in enumerate(matches):
        for j, p in enumerate(match[2:]):
            if p[1] != [-1, -1]:
                image = proj.image_list[p[0]]
                image.feature_count += 1

    purge_weak_images = False
    if purge_weak_images:
        # make a dict of all images with less than 25 feature matches
        weak_dict = {}
        for i, img in enumerate(proj.image_list):
            # print img.name, img.feature_count
            if img.feature_count > 0 and img.feature_count < 25:
                weak_dict[i] = True
        print('weak images:', weak_dict)

        # mark any features in the weak images list
        for i, match in enumerate(matches):
            #print 'before:', match
            for j, p in enumerate(match[2:]):
                if p[0] in weak_dict:
                    match[j + 1] = [-1, -1]
                    mark_sum += 1

    if mark_sum > 0:
        print('Outliers removed from match lists:', mark_sum)
        result = input('Save these changes? (y/n):')
        if result == 'y' or result == 'Y':
            cull.delete_marked_features(matches, min_chain_len, strong=strong)
            # write out the updated match dictionaries
            print("Writing:", source)
            pickle.dump(matches,
                        open(os.path.join(proj.analysis_dir, source), "wb"))
예제 #6
0
parser = argparse.ArgumentParser(description='Keypoint projection.')
parser.add_argument('--project', required=True, help='project directory')
args = parser.parse_args()

proj = ProjectMgr.ProjectMgr(args.project)
proj.load_images_info()

source = 'matches_grouped'
print("Loading source matches:", source)
matches = pickle.load(open(os.path.join(proj.analysis_dir, source), 'rb'))

print("features:", len(matches))

# compute the group connections within the image set.
groups = Groups.compute(proj.image_list, matches)
Groups.save(proj.analysis_dir, groups)

print('Total images:', len(proj.image_list))
print('Group sizes:', end=" ")
for g in groups:
    print(len(g), end=" ")
print()

# debug
print("Counting allocated features...")
count = 0
for i, match in enumerate(matches):
    if match[1] >= 0:
        count += 1
def colocated(project_dir, colocated_options):

    group_id = colocated_options[0]
    min_angle = colocated_options[1]

    r2d = 180.0 / math.pi

    proj = ProjectMgr.ProjectMgr(project_dir)
    proj.load_images_info()

    # a value of 2 let's pairs exist which can be trouble ...
    matcher_node = getNode('/config/matcher', True)
    min_chain_len = matcher_node.getInt("min_chain_len")
    if min_chain_len == 0:
        min_chain_len = 3
    print("Notice: min_chain_len is:", min_chain_len)

    #source = 'matches_direct'
    source = 'matches_grouped'
    print("Loading matches:", source)
    matches = pickle.load(open(os.path.join(proj.analysis_dir, source), "rb"))
    print('Number of original features:', len(matches))

    # load the group connections within the image set
    groups = Groups.load(proj.analysis_dir)
    print('Group sizes:', end=" ")
    for group in groups:
        print(len(group), end=" ")
    print()

    def compute_angle(ned1, ned2, ned3):
        vec1 = np.array(ned3) - np.array(ned1)
        vec2 = np.array(ned3) - np.array(ned2)
        n1 = np.linalg.norm(vec1)
        n2 = np.linalg.norm(vec2)
        denom = n1 * n2
        if abs(denom - 0.000001) > 0:
            try:
                tmp = np.dot(vec1, vec2) / denom
                if tmp > 1.0: tmp = 1.0
                return math.acos(tmp)
            except:
                print('vec1:', vec1, 'vec2', vec2, 'dot:', np.dot(vec1, vec2))
                print('denom:', denom)
                return 0
        else:
            return 0

    print("Scanning match pair angles:")
    mark_list = []
    for k, match in enumerate(tqdm(matches)):
        if match[1] == group_id:  # used by current group
            for i, m1 in enumerate(match[2:]):
                for j, m2 in enumerate(match[2:]):
                    if i < j:
                        i1 = proj.image_list[m1[0]]
                        i2 = proj.image_list[m2[0]]
                        if i1.name in groups[group_id] and i2.name in groups[
                                group_id]:
                            ned1, ypr1, q1 = i1.get_camera_pose(opt=True)
                            ned2, ypr2, q2 = i2.get_camera_pose(opt=True)
                            quick_approx = False
                            if quick_approx:
                                # quick hack angle approximation
                                avg = (np.array(ned1) + np.array(ned2)) * 0.5
                                y = np.linalg.norm(
                                    np.array(ned2) - np.array(ned1))
                                x = np.linalg.norm(avg - np.array(match[0]))
                                angle_deg = math.atan2(y, x) * r2d
                            else:
                                angle_deg = compute_angle(
                                    ned1, ned2, match[0]) * r2d
                            if angle_deg < min_angle:
                                mark_list.append([k, i])

    # Pairs with very small average angles between each feature and camera
    # location indicate closely located camera poses and these cause
    # problems because very small changes in camera pose lead to very
    # large changes in feature location.

    # mark selection
    cull.mark_using_list(mark_list, matches)
    mark_sum = len(mark_list)

    mark_sum = len(mark_list)
    if mark_sum > 0:
        print('Outliers to remove from match lists:', mark_sum)
        result = input('Save these changes? (y/n):')
        if result == 'y' or result == 'Y':
            cull.delete_marked_features(matches, min_chain_len)
            # write out the updated match dictionaries
            print("Writing original matches:", source)
            pickle.dump(matches,
                        open(os.path.join(proj.analysis_dir, source), "wb"))
예제 #8
0
def optmizer(project_dir, optmize_options):

    group_id = optmize_options[0]
    refine = optmize_options[1]
    cam_calibration = optmize_options[2]

    d2r = math.pi / 180.0
    r2d = 180.0 / math.pi

    # return a 3d affine tranformation between current camera locations
    # and original camera locations.
    def get_recenter_affine(src_list, dst_list):
        print('get_recenter_affine():')
        src = [[], [], [], []]  # current camera locations
        dst = [[], [], [], []]  # original camera locations
        for i in range(len(src_list)):
            src_ned = src_list[i]
            src[0].append(src_ned[0])
            src[1].append(src_ned[1])
            src[2].append(src_ned[2])
            src[3].append(1.0)
            dst_ned = dst_list[i]
            dst[0].append(dst_ned[0])
            dst[1].append(dst_ned[1])
            dst[2].append(dst_ned[2])
            dst[3].append(1.0)
            # print("{} <-- {}".format(dst_ned, src_ned))
        A = transformations.superimposition_matrix(src, dst, scale=True)
        print("A:\n", A)
        return A

    # transform a point list given an affine transform matrix
    def transform_points(A, pts_list):
        src = [[], [], [], []]
        for p in pts_list:
            src[0].append(p[0])
            src[1].append(p[1])
            src[2].append(p[2])
            src[3].append(1.0)
        dst = A.dot(np.array(src))
        result = []
        for i in range(len(pts_list)):
            result.append(
                [float(dst[0][i]),
                 float(dst[1][i]),
                 float(dst[2][i])])
        return result

    proj = ProjectMgr.ProjectMgr(project_dir)
    proj.load_images_info()

    source_file = os.path.join(proj.analysis_dir, 'matches_grouped')
    print('Match file:', source_file)
    matches = pickle.load(open(source_file, "rb"))
    print('Match features:', len(matches))

    # load the group connections within the image set
    groups = Groups.load(proj.analysis_dir)
    # sort from smallest to largest: groups.sort(key=len)

    opt = Optimizer.Optimizer(project_dir)
    opt.setup(proj,
              groups,
              group_id,
              matches,
              optimized=refine,
              cam_calib=cam_calibration)
    cameras, features, cam_index_map, feat_index_map, fx_opt, fy_opt, cu_opt, cv_opt, distCoeffs_opt = opt.run(
    )

    # mark all the optimized poses as invalid
    for image in proj.image_list:
        opt_cam_node = image.node.getChild('camera_pose_opt', True)
        opt_cam_node.setBool('valid', False)

    for i, cam in enumerate(cameras):
        image_index = cam_index_map[i]
        image = proj.image_list[image_index]
        ned_orig, ypr_orig, quat_orig = image.get_camera_pose()
        print('optimized cam:', cam)
        rvec = cam[0:3]
        tvec = cam[3:6]
        Rned2cam, jac = cv2.Rodrigues(rvec)
        cam2body = image.get_cam2body()
        Rned2body = cam2body.dot(Rned2cam)
        Rbody2ned = np.matrix(Rned2body).T
        (yaw, pitch,
         roll) = transformations.euler_from_matrix(Rbody2ned, 'rzyx')
        #print "orig ypr =", image.camera_pose['ypr']
        #print "new ypr =", [yaw/d2r, pitch/d2r, roll/d2r]
        pos = -np.matrix(Rned2cam).T * np.matrix(tvec).T
        newned = pos.T[0].tolist()[0]
        print(image.name, ned_orig, '->', newned, 'dist:',
              np.linalg.norm(np.array(ned_orig) - np.array(newned)))
        image.set_camera_pose(newned,
                              yaw * r2d,
                              pitch * r2d,
                              roll * r2d,
                              opt=True)
        image.placed = True
    proj.save_images_info()
    print('Updated the optimized camera poses.')

    # update and save the optimized camera calibration
    proj.cam.set_K(fx_opt, fy_opt, cu_opt, cv_opt, optimized=True)
    proj.cam.set_dist_coeffs(distCoeffs_opt.tolist(), optimized=True)
    proj.save()

    # compare original camera locations with optimized camera locations and
    # derive a transform matrix to 'best fit' the new camera locations
    # over the original ... trusting the original group gps solution as
    # our best absolute truth for positioning the system in world
    # coordinates.
    #
    # each optimized group needs a separate/unique fit

    matches_opt = list(matches)  # shallow copy
    refit_group_orientations = True
    if refit_group_orientations:
        group = groups[group_id]
        print('refitting group size:', len(group))
        src_list = []
        dst_list = []
        # only consider images that are in the current   group
        for name in group:
            image = proj.findImageByName(name)
            ned, ypr, quat = image.get_camera_pose(opt=True)
            src_list.append(ned)
            ned, ypr, quat = image.get_camera_pose()
            dst_list.append(ned)
        A = get_recenter_affine(src_list, dst_list)

        # extract the rotation matrix (R) from the affine transform
        scale, shear, angles, trans, persp = transformations.decompose_matrix(
            A)
        print('  scale:', scale)
        print('  shear:', shear)
        print('  angles:', angles)
        print('  translate:', trans)
        print('  perspective:', persp)
        R = transformations.euler_matrix(*angles)
        print("R:\n{}".format(R))

        # fixme (just group):

        # update the optimized camera locations based on best fit
        camera_list = []
        # load optimized poses
        for image in proj.image_list:
            if image.name in group:
                ned, ypr, quat = image.get_camera_pose(opt=True)
            else:
                # this is just fodder to match size/index of the lists
                ned, ypr, quat = image.get_camera_pose()
            camera_list.append(ned)

        # refit
        new_cams = transform_points(A, camera_list)

        # update position
        for i, image in enumerate(proj.image_list):
            if not image.name in group:
                continue
            ned, [y, p, r], quat = image.get_camera_pose(opt=True)
            image.set_camera_pose(new_cams[i], y, p, r, opt=True)
        proj.save_images_info()

        if True:
            # update optimized pose orientation.
            dist_report = []
            for i, image in enumerate(proj.image_list):
                if not image.name in group:
                    continue
                ned_orig, ypr_orig, quat_orig = image.get_camera_pose()
                ned, ypr, quat = image.get_camera_pose(opt=True)
                Rbody2ned = image.get_body2ned(opt=True)
                # update the orientation with the same transform to keep
                # everything in proper consistent alignment

                newRbody2ned = R[:3, :3].dot(Rbody2ned)
                (yaw, pitch, roll) = transformations.euler_from_matrix(
                    newRbody2ned, 'rzyx')
                image.set_camera_pose(new_cams[i],
                                      yaw * r2d,
                                      pitch * r2d,
                                      roll * r2d,
                                      opt=True)
                dist = np.linalg.norm(
                    np.array(ned_orig) - np.array(new_cams[i]))
                print('image: {}'.format(image.name))
                print('  orig pos: {}'.format(ned_orig))
                print('  fit pos: {}'.format(new_cams[i]))
                print('  dist moved: {}'.format(dist))
                dist_report.append((dist, image.name))
            proj.save_images_info()

            dist_report = sorted(dist_report,
                                 key=lambda fields: fields[0],
                                 reverse=False)
            print('Image movement sorted lowest to highest:')
            for report in dist_report:
                print('{} dist: {}'.format(report[1], report[0]))

        # tranform the optimized point locations using the same best
        # fit transform for the camera locations.
        new_feats = transform_points(A, features)

        # update any of the transformed feature locations that have
        # membership in the currently processing group back to the
        # master match structure.  Note we process groups in order of
        # little to big so if a match is in more than one group it
        # follows the larger group.
        for i, feat in enumerate(new_feats):
            match_index = feat_index_map[i]
            match = matches_opt[match_index]
            in_group = False
            for m in match[2:]:
                if proj.image_list[m[0]].name in group:
                    in_group = True
                    break
            if in_group:
                #print(' before:', match)
                match[0] = feat
                #print(' after:', match)
    else:
        # not refitting group orientations, just copy over optimized
        # coordinates
        for i, feat in enumerate(features):
            match_index = feat_index_map[i]
            match = matches_opt[match_index]
            match[0] = feat

    # write out the updated match_dict
    print('Updating matches file:', len(matches_opt), 'features')
    pickle.dump(matches_opt, open(source_file, 'wb'))

    #proj.cam.set_K(fx_opt/scale[0], fy_opt/scale[0], cu_opt/scale[0], cv_opt/scale[0], optimized=True)
    #proj.save()

    # temp write out just the points so we can plot them with gnuplot
    f = open(os.path.join(proj.analysis_dir, 'opt-plot.txt'), 'w')
    for m in matches_opt:
        try:
            f.write('%.2f %.2f %.2f\n' % (m[0][0], m[0][1], m[0][2]))
        except:
            pass
    f.close()

    # temp write out direct and optimized camera positions
    f1 = open(os.path.join(proj.analysis_dir, 'cams-direct.txt'), 'w')
    f2 = open(os.path.join(proj.analysis_dir, 'cams-opt.txt'), 'w')
    for name in groups[group_id]:
        image = proj.findImageByName(name)
        ned1, ypr1, quat1 = image.get_camera_pose()
        ned2, ypr2, quat2 = image.get_camera_pose(opt=True)
        f1.write('%.2f %.2f %.2f\n' % (ned1[1], ned1[0], -ned1[2]))
        f2.write('%.2f %.2f %.2f\n' % (ned2[1], ned2[0], -ned2[2]))
    f1.close()
    f2.close()
예제 #9
0
def delaunay(project_dir, group_id):

    def gen_ac3d_surface(name, points_group, values_group, tris_group):
        kids = len(tris_group)
        # write out the ac3d file
        f = open( name, "w" )
        f.write("AC3Db\n")
        trans = 0.0
        f.write("MATERIAL \"\" rgb 1 1 1  amb 0.6 0.6 0.6  emis 0 0 0  spec 0.5 0.5 0.5  shi 10  trans %.2f\n" % (trans))
        f.write("OBJECT world\n")
        f.write("kids " + str(kids) + "\n")

        for i in range(kids):
            points = points_group[i]
            values = values_group[i]
            tris = tris_group[i]
            f.write("OBJECT poly\n")
            f.write("loc 0 0 0\n")
            f.write("numvert %d\n" % len(points))
            for j in range(len(points)):
                f.write("%.3f %.3f %.3f\n" % (points[j][0], points[j][1],
                                            values[j]))
            f.write("numsurf %d\n" % len(tris.simplices))
            for tri in tris.simplices:
                f.write("SURF 0x30\n")
                f.write("mat 0\n")
                f.write("refs 3\n")
                for t in tri:
                    f.write("%d 0 0\n" % (t))
            f.write("kids 0\n")
                    
    proj = ProjectMgr.ProjectMgr(project_dir)
    proj.load_images_info()

    print("Loading optimized points ...")
    matches = pickle.load( open( os.path.join(proj.analysis_dir, "matches_grouped"), "rb" ) )

    # load the group connections within the image set
    groups = Groups.load(proj.analysis_dir)

    points_group = []
    values_group = []
    tris_group = []

    # initialize temporary structures for vanity stats
    for image in proj.image_list:
        image.raw_points = []
        image.raw_values = []
        image.sum_values = 0.0
        image.sum_count = 0.0
        image.max_z = -9999.0
        image.min_z = 9999.0

    # elevation stats
    print("Computing stats...")
    ned_list = []
    for match in matches:
        if match[1] == group_id:  # used by current group
            ned_list.append(match[0])
    avg = -np.mean(np.array(ned_list)[:,2])
    std = np.std(np.array(ned_list)[:,2])
    print("Average elevation: %.2f" % avg)
    print("Standard deviation: %.2f" % std)

    # sort through points
    print('Reading feature locations from optimized match points ...')
    global_raw_points = []
    global_raw_values = []
    for match in matches:
        if match[1] == group_id:  # used by current group
            ned = match[0]
            diff = abs(-ned[2] - avg)
            if diff < 5*std:
                global_raw_points.append( [ned[1], ned[0]] )
                global_raw_values.append( -ned[2] )
            else:
                print("Discarding match with excessive altitude:", match)

    print('Generating Delaunay meshes ...')
    global_tri_list = scipy.spatial.Delaunay(np.array(global_raw_points))

    print('Generating ac3d surface model ...')
    name = os.path.join(proj.analysis_dir, "surface-global.ac")
    gen_ac3d_surface(name, [global_raw_points], [global_raw_values], [global_tri_list])
parser = argparse.ArgumentParser(description='Remove all matches referencing the specific image.')
parser.add_argument('--project', required=True, help='project directory')
parser.add_argument('--group', type=int, default=0, help='group number')
parser.add_argument('--indices', nargs='+', type=int, help='image index')
parser.add_argument('--images', nargs='+', help='image names')
args = parser.parse_args()

proj = ProjectMgr.ProjectMgr(args.project)
proj.load_images_info()

print("Loading matches_grouped...")
matches = pickle.load( open( os.path.join(proj.analysis_dir, "matches_grouped"), "rb" ) )
print("  features:", len(matches))

# load the group connections within the image set
groups = Groups.load(proj.analysis_dir)

# a value of 2 let's pairs exist which can be trouble ...
matcher_node = getNode('/config/matcher', True)
min_chain_len = matcher_node.getInt("min_chain_len")
if min_chain_len == 0:
    min_chain_len = 3
print("Notice: min_chain_len is:", min_chain_len)

def mark_image_features(index, matches):
    # iterate through the match dictionary and mark any matches for
    # the specified image for deletion
    print("Marking feature matches for image:", index)
    count = 0
    new_matches = []
    for i, match in enumerate(matches):
예제 #11
0
parser.add_argument('--area', default='area-00', help='sub area directory')
args = parser.parse_args()

proj = ProjectMgr.ProjectMgr(args.project)
proj.load_area_info(args.area)

area_dir = os.path.join(args.project, args.area)
source = 'matches_grouped'
print("Loading source matches:", source)
matches = pickle.load( open( os.path.join(area_dir, source), 'rb' ) )

print("features:", len(matches))

# compute the group connections within the image set.

groups = Groups.groupByFeatureConnections(proj.image_list, matches)
# groups = Groups.groupByConnectedArea(proj.image_list, matches)
# groups = Groups.groupByImageConnections(proj)

groups.sort(key=len, reverse=True)
Groups.save(area_dir, groups)

print('Total images:', len(proj.image_list))
print('Group sizes:', end=" ")
for g in groups:
    print(len(g), end=" ")
print()

# debug
print("Counting allocated features...")
count = 0