Beispiel #1
0
def detect(project_dir, detection_options):
    detector = detection_options[0]
    scale = detection_options[1]
    sift_max_features = detection_options[2]
    surf_hessian_threshold = detection_options[3]
    surf_noctaves = detection_options[4]
    grid_detect = detection_options[5]
    orb_max_features = detection_options[6]
    star_max_size = detection_options[7]
    star_response_threshold = detection_options[8]
    star_line_threshold_binarized = detection_options[9]
    star_suppress_nonmax_size = detection_options[10]
    show = detection_options[11]

    proj = ProjectMgr.ProjectMgr(project_dir)

    # load existing images info which could include things like camera pose
    proj.load_images_info()

    # setup project detector params
    detector_node = getNode('/config/detector', True)
    detector_node.setString('detector', detector)
    detector_node.setString('scale', scale)
    # TODO: Use concurrency, a que to decrease the processing time:
    if detector == 'SIFT':
        detector_node.setInt('sift_max_features', sift_max_features)
    elif detector == 'SURF':
        detector_node.setInt('surf_hessian_threshold', surf_hessian_threshold)
        detector_node.setInt('surf_noctaves', surf_noctaves)
    elif detector == 'ORB':
        detector_node.setInt('grid_detect', grid_detect)
        detector_node.setInt('orb_max_features', orb_max_features)
    elif detector == 'Star':
        detector_node.setInt('star_max_size', star_max_size)
        detector_node.setInt('star_response_threshold',
                             star_response_threshold)
        detector_node.setInt('star_line_threshold_projected',
                             star_response_threshold)
        detector_node.setInt('star_line_threshold_binarized',
                             star_line_threshold_binarized)
        detector_node.setInt('star_suppress_nonmax_size',
                             star_suppress_nonmax_size)

    # find features in the full image set
    proj.detect_features(scale=scale, show=show)

    feature_count = 0
    image_count = 0
    for image in proj.image_list:
        feature_count += len(image.kp_list)
        image_count += 1

    print("Average # of features per image found = %.0f" %
          (feature_count / image_count))

    print("Saving project configuration")
    proj.save()
def set_camera(project_dir,
               camera,
               yaw_deg=0.0,
               pitch_deg=-90.0,
               roll_deg=0.0):
    proj = ProjectMgr.ProjectMgr(project_dir)

    if camera:
        # specified on command line
        camera_file = camera
    else:
        # auto detect camera from image meta data
        camera, make, model, lens_model = proj.detect_camera()
        camera_file = os.path.join("..", "cameras", camera + ".json")
    print("Camera:", camera_file)

    # copy/overlay/update the specified camera config into the existing
    # project configuration
    cam_node = getNode('/config/camera', True)
    tmp_node = PropertyNode()
    if props_json.load(camera_file, tmp_node):
        for child in tmp_node.getChildren(expand=False):
            if tmp_node.isEnum(child):
                # print(child, tmp_node.getLen(child))
                for i in range(tmp_node.getLen(child)):
                    cam_node.setFloatEnum(child, i,
                                          tmp_node.getFloatEnum(child, i))
            else:
                # print(child, type(tmp_node.__dict__[child]))
                child_type = type(tmp_node.__dict__[child])
                if child_type is float:
                    cam_node.setFloat(child, tmp_node.getFloat(child))
                elif child_type is int:
                    cam_node.setInt(child, tmp_node.getInt(child))
                elif child_type is str:
                    cam_node.setString(child, tmp_node.getString(child))
                else:
                    print('Unknown child type:', child, child_type)

        proj.cam.set_mount_params(yaw_deg, pitch_deg, roll_deg)

        # note: dist_coeffs = array[5] = k1, k2, p1, p2, k3

        # ... and save
        proj.save()
    else:
        # failed to load camera config file
        if not camera:
            print("Camera autodetection failed.")
            print(
                "Consider running the new camera script to create a camera config"
            )
            print("and then try running this script again.")
        else:
            print("Provided camera config not found:", camera)
def new_project(project_dir):
    # test if images directory exists
    if not os.path.isdir(project_dir):
        print("Images directory doesn't exist:", args.project)
        quit()

    # create an empty project
    proj = ProjectMgr.ProjectMgr(project_dir, create=True)

    # and save what we have so far ...
    proj.save()
Beispiel #4
0
def set_pose(project_dir, max_angle=25.0):
    proj = ProjectMgr.ProjectMgr(project_dir)
    print("Loading image info...")
    proj.load_images_info()

    # simplifying assumption
    image_dir = project_dir

    pix4d_file = os.path.join(image_dir, 'pix4d.csv')
    meta_file = os.path.join(image_dir, 'image-metadata.txt')
    if os.path.exists(pix4d_file):
        Pose.setAircraftPoses(proj,
                              pix4d_file,
                              order='rpy',
                              max_angle=max_angle)
    elif os.path.exists(meta_file):
        Pose.setAircraftPoses(proj,
                              meta_file,
                              order='ypr',
                              max_angle=max_angle)
    else:
        print("Error: no pose file found in image directory:", image_dir)
        quit()

    # compute the project's NED reference location (based on average of
    # aircraft poses)
    proj.compute_ned_reference_lla()
    ned_node = getNode('/config/ned_reference', True)
    print("NED reference location:")
    ned_node.pretty_print("  ")

    # set the camera poses (fixed offset from aircraft pose) Camera pose
    # location is specfied in ned, so do this after computing the ned
    # reference point for this project.
    Pose.compute_camera_poses(proj)

    # save the poses
    proj.save_images_info()

    # save change to ned reference
    proj.save()
# set all the various camera configuration parameters

parser = argparse.ArgumentParser(description='Set camera configuration.')
parser.add_argument('--project', required=True, help='project directory')
parser.add_argument('--camera', required=True, help='camera config file')

parser.add_argument('--yaw-deg', required=True, type=float,
                    help='camera yaw mounting offset from aircraft')
parser.add_argument('--pitch-deg', required=True, type=float,
                    help='camera pitch mounting offset from aircraft')
parser.add_argument('--roll-deg', required=True, type=float,
                    help='camera roll mounting offset from aircraft')

args = parser.parse_args()

proj = ProjectMgr.ProjectMgr(args.project)

# copy/overlay/update the specified camera config into the existing
# project configuration
cam_node = getNode('/config/camera', True)
tmp_node = PropertyNode()
props_json.load(args.camera, tmp_node)
for child in tmp_node.getChildren(expand=False):
    if tmp_node.isEnum(child):
        # print(child, tmp_node.getLen(child))
        for i in range(tmp_node.getLen(child)):
            cam_node.setFloatEnum(child, i, tmp_node.getFloatEnum(child, i))
    else:
        # print(child, type(tmp_node.__dict__[child]))
        child_type = type(tmp_node.__dict__[child])
        if child_type is float:
Beispiel #6
0
def match_trig(project_dir, match_trig_options):

    group = match_trig_options[0]
    method = match_trig_options[1]
    proj = ProjectMgr.ProjectMgr(project_dir)
    proj.load_images_info()

    source = 'matches_grouped'
    print("Loading source matches:", source)
    matches = pickle.load(open(os.path.join(proj.analysis_dir, source), 'rb'))

    # load the group connections within the image set
    groups = Groups.load(proj.analysis_dir)
    print('Group sizes:', end=" ")
    for group in groups:
        print(len(group), end=" ")
    print()

    if method == 'triangulate':
        K = proj.cam.get_K(optimized=True)
        dist_coeffs = np.array(proj.cam.get_dist_coeffs(optimized=True))
    else:
        K = proj.cam.get_K(optimized=False)
    IK = np.linalg.inv(K)

    do_sanity_check = False

    # assume global K and distcoeff set earlier
    def undistort(uv_orig):
        # convert the point into the proper format for opencv
        uv_raw = np.zeros((1, 1, 2), dtype=np.float32)
        uv_raw[0][0] = (uv_orig[0], uv_orig[1])
        # do the actual undistort
        uv_new = cv2.undistortPoints(uv_raw, K, dist_coeffs, P=K)
        # print(uv_orig, type(uv_new), uv_new)
        return uv_new[0][0]

    if method == 'srtm':
        # lookup ned reference
        ref_node = getNode("/config/ned_reference", True)
        ref = [
            ref_node.getFloat('lat_deg'),
            ref_node.getFloat('lon_deg'),
            ref_node.getFloat('alt_m')
        ]

        # setup SRTM ground interpolator
        sss = SRTM.NEDGround(ref, 3000, 3000, 30)

        # for each image lookup the SRTM elevation under the camera
        print("Looking up SRTM base elevation for each image location...")
        for image in proj.image_list:
            ned, ypr, quat = image.get_camera_pose()
            image.base_elev = sss.interp([ned[0], ned[1]])[0]
            # print(image.name, image.base_elev)

        print("Estimating initial projection for each feature...")
        bad_count = 0
        bad_indices = []
        for i, match in enumerate(tqdm(matches)):
            sum = np.zeros(3)
            array = []  # fixme: temp/debug
            for m in match[2:]:
                image = proj.image_list[m[0]]
                cam2body = image.get_cam2body()
                body2ned = image.get_body2ned()
                ned, ypr, quat = image.get_camera_pose()
                uv_list = [m[1]]  # just one uv element
                vec_list = proj.projectVectors(IK, body2ned, cam2body, uv_list)
                v = vec_list[0]
                if v[2] > 0.0:
                    d_proj = -(ned[2] + image.base_elev)
                    factor = d_proj / v[2]
                    n_proj = v[0] * factor
                    e_proj = v[1] * factor
                    p = [ned[0] + n_proj, ned[1] + e_proj, ned[2] + d_proj]
                    # print('  ', p)
                    sum += np.array(p)
                    array.append(p)
                else:
                    print('vector projected above horizon.')
            match[0] = (sum / len(match[2:])).tolist()
            # print(match[0])
            if do_sanity_check:
                # crude sanity check
                ok = True
                for p in array:
                    dist = np.linalg.norm(np.array(match[0]) - np.array(p))
                    if dist > 100:
                        ok = False
                if not ok:
                    bad_count += 1
                    bad_indices.append(i)
                    print('match:', i, match[0])
                    for p in array:
                        dist = np.linalg.norm(np.array(match[0]) - np.array(p))
                        print(' ', dist, p)
        if do_sanity_check:
            print('bad count:', bad_count)
            print('deleting bad matches...')
            bad_indices.reverse()
            for i in bad_indices:
                del matches[i]
    elif method == 'triangulate':
        for i, match in enumerate(matches):
            if match[1] == group:  # used in current group
                # print(match)
                points = []
                vectors = []
                for m in match[2:]:
                    if proj.image_list[m[0]].name in groups[group]:
                        # print(m)
                        image = proj.image_list[m[0]]
                        cam2body = image.get_cam2body()
                        body2ned = image.get_body2ned()
                        ned, ypr, quat = image.get_camera_pose(opt=True)
                        uv_list = [undistort(m[1])]  # just one uv element
                        vec_list = proj.projectVectors(IK, body2ned, cam2body,
                                                       uv_list)
                        points.append(ned)
                        vectors.append(vec_list[0])
                        # print(' ', image.name)
                        # print(' ', uv_list)
                        # print('  ', vec_list)
                if len(points) >= 2:
                    # print('points:', points)
                    # print('vectors:', vectors)
                    p = LineSolver.ls_lines_intersection(
                        points, vectors, transpose=True).tolist()
                    # print('result:',  p, p[0])
                    print(i, match[0], '>>>', end=" ")
                    match[0] = [p[0][0], p[1][0], p[2][0]]
                    if p[2][0] > 0:
                        print("WHOA!")
                    print(match[0])

    print("Writing:", source)
    pickle.dump(matches, open(os.path.join(proj.analysis_dir, source), "wb"))
def render(project_dir, render_options):

    group_id = render_options[0]
    texture_resolution = render_options[1]
    srtm = render_options[2]
    ground = render_options[3]
    direct = render_options[4]

    proj = ProjectMgr.ProjectMgr(project_dir)
    proj.load_images_info()

    # lookup ned reference
    ref_node = getNode("/config/ned_reference", True)
    ref = [
        ref_node.getFloat('lat_deg'),
        ref_node.getFloat('lon_deg'),
        ref_node.getFloat('alt_m')
    ]

    # setup SRTM ground interpolator
    sss = SRTM.NEDGround(ref, 6000, 6000, 30)

    print("Loading optimized match points ...")
    matches = pickle.load(
        open(os.path.join(proj.analysis_dir, "matches_grouped"), "rb"))

    # load the group connections within the image set
    groups = Groups.load(proj.analysis_dir)

    # initialize temporary structures for vanity stats
    for image in proj.image_list:
        image.sum_values = 0.0
        image.sum_count = 0.0
        image.max_z = -9999.0
        image.min_z = 9999.0

    # elevation stats
    print("Computing stats...")
    ned_list = []
    for match in matches:
        if match[1] == group_id:  # used by current group
            ned_list.append(match[0])
    avg = -np.mean(np.array(ned_list)[:, 2])
    std = np.std(np.array(ned_list)[:, 2])
    print("Average elevation: %.2f" % avg)
    print("Standard deviation: %.2f" % std)

    # sort through points
    print('Reading feature locations from optimized match points ...')
    raw_points = []
    raw_values = []
    for match in matches:
        if match[1] == group_id:  # used by current group
            ned = match[0]
            diff = abs(-ned[2] - avg)
            if diff < 10 * std:
                raw_points.append([ned[1], ned[0]])
                raw_values.append(ned[2])
                for m in match[2:]:
                    if proj.image_list[m[0]].name in groups[group_id]:
                        image = proj.image_list[m[0]]
                        z = -ned[2]
                        image.sum_values += z
                        image.sum_count += 1
                        if z < image.min_z:
                            image.min_z = z
                            #print(min_z, match)
                        if z > image.max_z:
                            image.max_z = z
                            #print(max_z, match)
            else:
                print("Discarding match with excessive altitude:", match)

    # save the surface definition as a separate file
    models_dir = os.path.join(proj.analysis_dir, 'models')
    if not os.path.exists(models_dir):
        print("Notice: creating models directory =", models_dir)
        os.makedirs(models_dir)
    surface = {'points': raw_points, 'values': raw_values}
    pickle.dump(
        surface,
        open(os.path.join(proj.analysis_dir, 'models', 'surface.bin'), "wb"))

    print('Generating Delaunay mesh and interpolator ...')
    global_tri_list = scipy.spatial.Delaunay(np.array(raw_points))
    interp = scipy.interpolate.LinearNDInterpolator(global_tri_list,
                                                    raw_values)

    no_extrapolate = True

    def intersect2d(ned, v, avg_ground):
        p = ned[:]  # copy

        # sanity check (always assume camera pose is above ground!)
        if v[2] <= 0.0:
            return p

        eps = 0.01
        count = 0
        #print("start:", p)
        #print("vec:", v)
        #print("ned:", ned)
        tmp = interp([p[1], p[0]])[0]
        if no_extrapolate or not np.isnan(tmp):
            surface = tmp
        else:
            surface = avg_ground
        error = abs(p[2] - surface)
        #print("p=%s surface=%s error=%s" % (p, surface, error))
        while error > eps and count < 25:
            d_proj = -(ned[2] - surface)
            factor = d_proj / v[2]
            n_proj = v[0] * factor
            e_proj = v[1] * factor
            #print(" proj = %s %s" % (n_proj, e_proj))
            p = [ned[0] + n_proj, ned[1] + e_proj, ned[2] + d_proj]
            #print(" new p:", p)
            tmp = interp([p[1], p[0]])[0]
            if no_extrapolate or not np.isnan(tmp):
                surface = tmp
            error = abs(p[2] - surface)
            #print("  p=%s surface=%.2f error = %.3f" % (p, surface, error))
            count += 1
        #print("surface:", surface)
        #if np.isnan(surface):
        #    #print(" returning nans")
        #    return [np.nan, np.nan, np.nan]
        dy = ned[0] - p[0]
        dx = ned[1] - p[1]
        dz = ned[2] - p[2]
        dist = math.sqrt(dx * dx + dy * dy)
        angle = math.atan2(-dz, dist) * r2d  # relative to horizon
        if angle < 30:
            print(" returning high angle nans:", angle)
            return [np.nan, np.nan, np.nan]
        else:
            return p

    def intersect_vectors(ned, v_list, avg_ground):
        pt_list = []
        for v in v_list:
            p = intersect2d(ned, v.flatten(), avg_ground)
            pt_list.append(p)
        return pt_list

    for image in proj.image_list:
        if image.sum_count > 0:
            image.z_avg = image.sum_values / float(image.sum_count)
            print(image.name, 'avg elev:', image.z_avg)
        else:
            image.z_avg = 0

    # compute the uv grid for each image and project each point out into
    # ned space, then intersect each vector with the srtm / ground /
    # delauney surface.

    #for group in groups:
    if True:
        group = groups[group_id]
        #if len(group) < 3:
        #    continue
        for name in group:
            image = proj.findImageByName(name)
            print(image.name, image.z_avg)
            width, height = proj.cam.get_image_params()
            # scale the K matrix if we have scaled the images
            K = proj.cam.get_K(optimized=True)
            IK = np.linalg.inv(K)

            grid_list = []
            u_list = np.linspace(0, width, ac3d_steps + 1)
            v_list = np.linspace(0, height, ac3d_steps + 1)
            #print "u_list:", u_list
            #print "v_list:", v_list
            for v in v_list:
                for u in u_list:
                    grid_list.append([u, v])
            #print 'grid_list:', grid_list
            image.distorted_uv = proj.redistort(grid_list, optimized=True)

            if direct:
                proj_list = proj.projectVectors(IK, image.get_body2ned(),
                                                image.get_cam2body(),
                                                grid_list)
            else:
                #print(image.get_body2ned(opt=True))
                proj_list = proj.projectVectors(IK,
                                                image.get_body2ned(opt=True),
                                                image.get_cam2body(),
                                                grid_list)
            #print 'proj_list:', proj_list

            if direct:
                ned, ypr, quat = image.get_camera_pose()
            else:
                ned, ypr, quat = image.get_camera_pose(opt=True)
            #print('cam orig:', image.camera_pose['ned'], 'optimized:', ned)
            if ground:
                pts_ned = proj.intersectVectorsWithGroundPlane(
                    ned, ground, proj_list)
            elif srtm:
                pts_ned = sss.interpolate_vectors(ned, proj_list)
            elif False:
                # this never seemed that productive
                print(image.name, image.z_avg)
                pts_ned = proj.intersectVectorsWithGroundPlane(
                    ned, image.z_avg, proj_list)
            elif True:
                # intersect with our polygon surface approximation
                pts_ned = intersect_vectors(ned, proj_list, -image.z_avg)
            elif False:
                # (moving away from the binned surface approach in this
                # script towards the above delauney interpolation
                # approach)
                # intersect with 2d binned surface approximation
                pts_ned = bin2d.intersect_vectors(ned, proj_list, -image.z_avg)

            #print(image.name, "pts_3d (ned):\n", pts_ned)

            # convert ned to xyz and stash the result for each image
            image.grid_list = []
            for p in pts_ned:
                image.grid_list.append([p[1], p[0], -p[2]])

    # generate the panda3d egg models
    dir_node = getNode('/config/directories', True)
    img_src_dir = dir_node.getString('images_source')
    Panda3d.generate_from_grid(proj,
                               groups[group_id],
                               src_dir=img_src_dir,
                               analysis_dir=proj.analysis_dir,
                               resolution=texture_resolution)
#!/usr/bin/python3

import argparse
import os

from lib import ProjectMgr

# initialize a new project workspace

parser = argparse.ArgumentParser(description='Create an empty project.')
parser.add_argument('--project',
                    required=True,
                    help='Directory with a set of aerial images.')

args = parser.parse_args()

# test if images directory exists
if not os.path.isdir(args.project):
    print("Images directory doesn't exist:", args.project)
    quit()

# create an empty project
proj = ProjectMgr.ProjectMgr(args.project, create=True)

# and save what we have so far ...
proj.save()
Beispiel #9
0
def clean(project_dir):
    m = Matcher.Matcher()

    proj = ProjectMgr.ProjectMgr(project_dir)
    proj.load_images_info()
    proj.load_features(descriptors=False)
    #proj.undistort_keypoints()
    proj.load_match_pairs()

    # compute keypoint usage map
    proj.compute_kp_usage()

    # For some features detection algorithms we expect duplicated feature
    # uv coordinates.  These duplicates may have different scaling or
    # other attributes important during feature matching, yet ultimately
    # resolve to the same uv coordinate in an image.
    print("Indexing features by unique uv coordinates:")
    for image in tqdm(proj.image_list):
        # pass one, build a tmp structure of unique keypoints (by uv) and
        # the index of the first instance.
        image.kp_remap = {}
        used = 0
        for i, kp in enumerate(image.kp_list):
            if image.kp_used[i]:
                used += 1
                key = "%.2f-%.2f" % (kp.pt[0], kp.pt[1])
                if not key in image.kp_remap:
                    image.kp_remap[key] = i
                else:
                    #print("%d -> %d" % (i, image.kp_remap[key]))
                    #print(" ", image.coord_list[i], image.coord_list[image.kp_remap[key]])
                    pass

        #print(" features used:", used)
        #print(" unique by uv and used:", len(image.kp_remap))

    # after feature matching we don't care about other attributes, just
    # the uv coordinate.
    #
    # notes: we do a first pass duplicate removal during the original
    # matching process.  This removes 1->many relationships, or duplicate
    # matches at different scales within a match pair.  However, different
    # pairs could reference the same keypoint at different scales, so
    # duplicates could still exist.  This finds all the duplicates within
    # the entire match set and collapses them down to eliminate any
    # redundancy.
    print("Merging keypoints with duplicate uv coordinates:")
    for i, i1 in enumerate(tqdm(proj.image_list)):
        for key in i1.match_list:
            matches = i1.match_list[key]
            count = 0
            i2 = proj.findImageByName(key)
            if i2 is None:
                # ignore pairs outside our area set
                continue
            for k, pair in enumerate(matches):
                # print pair
                idx1 = pair[0]
                idx2 = pair[1]
                kp1 = i1.kp_list[idx1]
                kp2 = i2.kp_list[idx2]
                key1 = "%.2f-%.2f" % (kp1.pt[0], kp1.pt[1])
                key2 = "%.2f-%.2f" % (kp2.pt[0], kp2.pt[1])
                # print key1, key2
                new_idx1 = i1.kp_remap[key1]
                new_idx2 = i2.kp_remap[key2]
                # count the number of match rewrites
                if idx1 != new_idx1 or idx2 != new_idx2:
                    count += 1
                if idx1 != new_idx1:
                    # sanity check
                    uv1 = list(i1.kp_list[idx1].pt)
                    new_uv1 = list(i1.kp_list[new_idx1].pt)
                    if not np.allclose(uv1, new_uv1):
                        print("OOPS!!!")
                        print("  index 1: %d -> %d" % (idx1, new_idx1))
                        print("  [%.2f, %.2f] -> [%.2f, %.2f]" %
                              (uv1[0], uv1[1], new_uv1[0], new_uv1[1]))
                if idx2 != new_idx2:
                    # sanity check
                    uv2 = list(i2.kp_list[idx2].pt)
                    new_uv2 = list(i2.kp_list[new_idx2].pt)
                    if not np.allclose(uv2, new_uv2):
                        print("OOPS!")
                        print("  index 2: %d -> %d" % (idx2, new_idx2))
                        print("  [%.2f, %.2f] -> [%.2f, %.2f]" %
                              (uv2[0], uv2[1], new_uv2[0], new_uv2[1]))
                # rewrite matches
                matches[k] = [new_idx1, new_idx2]
            #if count > 0:
            #    print('Match:', i1.name, 'vs', i2.name, '%d/%d' % ( count, len(matches) ), 'rewrites')

    # enable the following code to visualize the matches after collapsing
    # identical uv coordinates
    if False:
        for i, i1 in enumerate(proj.image_list):
            for j, i2 in enumerate(proj.image_list):
                if i >= j:
                    # don't repeat reciprocal matches
                    continue
                if len(i1.match_list[j]):
                    print("Showing %s vs %s" % (i1.name, i2.name))
                    status = m.showMatchOrient(i1, i2, i1.match_list[j])

    # after collapsing by uv coordinate, we could be left with duplicate
    # matches (matched at different scales or other attributes, but same
    # exact point.)
    #
    # notes: this really shouldn't (!) (by my best current understanding)
    # be able to find any dups.  These should all get caught in the
    # original pair matching step.
    print("Checking for pair duplicates (there never should be any):")
    for i, i1 in enumerate(tqdm(proj.image_list)):
        for key in i1.match_list:
            matches = i1.match_list[key]
            i2 = proj.findImageByName(key)
            if i2 is None:
                # ignore pairs not in our area set
                continue
            count = 0
            pair_dict = {}
            new_matches = []
            for k, pair in enumerate(matches):
                pair_key = "%d-%d" % (pair[0], pair[1])
                if not pair_key in pair_dict:
                    pair_dict[pair_key] = True
                    new_matches.append(pair)
                else:
                    count += 1
            if count > 0:
                print('Match:', i, 'vs', j, 'matches:', len(matches), 'dups:',
                      count)
            i1.match_list[key] = new_matches

    # enable the following code to visualize the matches after eliminating
    # duplicates (duplicates can happen after collapsing uv coordinates.)
    if False:
        for i, i1 in enumerate(proj.image_list):
            for j, i2 in enumerate(proj.image_list):
                if i >= j:
                    # don't repeat reciprocal matches
                    continue
                if len(i1.match_list[j]):
                    print("Showing %s vs %s" % (i1.name, i2.name))
                    status = m.showMatchOrient(i1, i2, i1.match_list[j])

    # Do we have a keypoint in i1 matching multiple keypoints in i2?
    #
    # Notes: again these shouldn't exist here, but let's check anyway.  If
    # we start finding these here, I should hunt for the reason earlier in
    # the code that lets some through, or try to understand what larger
    # logic principle allows somne of these to still exist here.
    print(
        "Testing for 1 vs. n keypoint duplicates (there never should be any):")
    for i, i1 in enumerate(tqdm(proj.image_list)):
        for key in i1.match_list:
            matches = i1.match_list[key]
            i2 = proj.findImageByName(key)
            if i2 is None:
                # skip pairs outside our area set
                continue
            count = 0
            kp_dict = {}
            for k, pair in enumerate(matches):
                if not pair[0] in kp_dict:
                    kp_dict[pair[0]] = pair[1]
                else:
                    print("Warning keypoint idx", pair[0],
                          "already used in another match.")
                    uv2a = list(i2.kp_list[kp_dict[pair[0]]].pt)
                    uv2b = list(i2.kp_list[pair[1]].pt)
                    if not np.allclose(uv2, new_uv2):
                        print("  [%.2f, %.2f] -> [%.2f, %.2f]" %
                              (uv2a[0], uv2a[1], uv2b[0], uv2b[1]))
                    count += 1
            if count > 0:
                print('Match:', i, 'vs', j, 'matches:', len(matches), 'dups:',
                      count)

    print("Constructing unified match structure:")
    # create an initial pair-wise match list
    matches_direct = []
    for i, img in enumerate(tqdm(proj.image_list)):
        # print img.name
        for key in img.match_list:
            j = proj.findIndexByName(key)
            if j is None:
                continue
            matches = img.match_list[key]
            # print proj.image_list[j].name
            if j > i:
                for pair in matches:
                    # ned place holder, in use flag
                    match = [None, -1]
                    # camera/feature references
                    match.append([i, pair[0]])
                    match.append([j, pair[1]])
                    matches_direct.append(match)
                    # print pair, match

    sum = 0.0
    for match in matches_direct:
        sum += len(match[2:])

    if len(matches_direct):
        print("Total image pairs in image set:", len(matches_direct))
        print("Keypoint average instances = %.1f (should be 2.0 here)" %
              (sum / len(matches_direct)))

    # Note to self: I don't think we need the matches_direct file any more
    # (except for debugging possibly in the future.)
    #
    #print("Writing matches_direct file ...")
    #direct_file = os.path.join(proj.analysis_dir, "matches_direct")
    #pickle.dump(matches_direct, open(direct_file, "wb"))

    # collect/group match chains that refer to the same keypoint

    print("Linking common matches together into chains:")
    count = 0
    done = False
    while not done:
        print("Iteration %d:" % count)
        count += 1
        matches_new = []
        matches_lookup = {}
        for i, match in enumerate(tqdm(matches_direct)):
            # scan if any of these match points have been previously seen
            # and record the match index
            index = -1
            for p in match[2:]:
                key = "%d-%d" % (p[0], p[1])
                if key in matches_lookup:
                    index = matches_lookup[key]
                    break
            if index < 0:
                # not found, append to the new list
                for p in match[2:]:
                    key = "%d-%d" % (p[0], p[1])
                    matches_lookup[key] = len(matches_new)
                matches_new.append(list(match))  # shallow copy
            else:
                # found a previous reference, append these match items
                existing = matches_new[index]
                for p in match[2:]:
                    key = "%d-%d" % (p[0], p[1])
                    found = False
                    for e in existing[2:]:
                        if p[0] == e[0]:
                            found = True
                            break
                    if not found:
                        # add
                        existing.append(list(p))  # shallow copy
                        matches_lookup[key] = index
                # no 3d location estimation yet
                # # attempt to combine location equitably
                # size1 = len(match[2:])
                # size2 = len(existing[2:])
                # ned1 = np.array(match[0])
                # ned2 = np.array(existing[0])
                # avg = (ned1 * size1 + ned2 * size2) / (size1 + size2)
                # existing[0] = avg.tolist()
                # # print(ned1, ned2, existing[0])
                # # print "new:", existing
                # # print
        if len(matches_new) == len(matches_direct):
            done = True
        else:
            matches_direct = list(matches_new)  # shallow copy

    # replace the keypoint index in the matches file with the actual kp
    # values.  This will save time later and avoid needing to load the
    # full original feature files which are quite large.  This also will
    # reduce the in-memory footprint for many steps.
    print('Replacing keypoint indices with uv coordinates:')
    for match in tqdm(matches_direct):
        for m in match[2:]:
            kp = proj.image_list[m[0]].kp_list[m[1]].pt
            m[1] = list(kp)
        # print(match)

    # sort by longest match chains first
    print("Sorting matches by longest chain first.")
    matches_direct.sort(key=len, reverse=True)

    sum = 0.0
    for i, match in enumerate(matches_direct):
        refs = len(match[2:])
        sum += refs

    if count >= 1:
        print("Total unique features in image set:", len(matches_direct))
        print("Keypoint average instances:",
              "%.2f" % (sum / len(matches_direct)))

    print("Writing full group chain matches_grouped file ...")
    pickle.dump(matches_direct,
                open(os.path.join(proj.analysis_dir, "matches_grouped"), "wb"))
def mre(project_dir, mre_options):

    group_id = mre_options[0]
    stddev = mre_options[1]
    initial_pose = mre_options[2]
    strong = mre_options[3]
    interactive = mre_options[4]

    proj = ProjectMgr.ProjectMgr(project_dir)
    proj.load_images_info()

    # a value of 2 let's pairs exist which can be trouble ...
    matcher_node = getNode('/config/matcher', True)
    min_chain_len = matcher_node.getInt("min_chain_len")
    if min_chain_len == 0:
        min_chain_len = 3
    print("Notice: min_chain_len is:", min_chain_len)

    source = 'matches_grouped'
    print("Loading matches:", source)
    matches = pickle.load(open(os.path.join(proj.analysis_dir, source), "rb"))
    print('Number of original features:', len(matches))

    # load the group connections within the image set
    groups = Groups.load(proj.analysis_dir)
    print('Group sizes:', end=" ")
    for group in groups:
        print(len(group), end=" ")
    print()

    opt = Optimizer.Optimizer(project_dir)
    if initial_pose:
        opt.setup(proj, groups, group_id, matches, optimized=False)
    else:
        opt.setup(proj, groups, group_id, matches, optimized=True)
    x0 = np.hstack((opt.camera_params.ravel(), opt.points_3d.ravel(),
                    opt.K[0, 0], opt.K[0, 2], opt.K[1, 2], opt.distCoeffs))
    error = opt.fun(x0, opt.n_cameras, opt.n_points,
                    opt.by_camera_point_indices, opt.by_camera_points_2d)

    print('cameras:', opt.n_cameras)

    print(len(error))
    mre = np.mean(np.abs(error))
    std = np.std(error)
    max = np.amax(np.abs(error))
    print('mre: %.3f std: %.3f max: %.2f' % (mre, std, max))

    print('Tabulating results...')
    results = []
    results_by_cam = []
    count = 0
    for i, cam in enumerate(opt.camera_params.reshape(
        (opt.n_cameras, opt.ncp))):
        # print(i, opt.camera_map_fwd[i])
        orig_cam_index = opt.camera_map_fwd[i]
        cam_errors = []
        # print(count, opt.by_camera_point_indices[i])
        for j in opt.by_camera_point_indices[i]:
            match = matches[opt.feat_map_rev[j]]
            match_index = 0
            #print(orig_cam_index, match)
            for k, p in enumerate(match[2:]):
                if p[0] == orig_cam_index:
                    match_index = k
            # print(match[0], opt.points_3d[j*3:j*3+3])
            e = error[count * 2:count * 2 + 2]
            #print(count, e, np.linalg.norm(e))
            #if abs(e[0]) > 5*std or abs(e[1]) > 5*std:
            #    print("big")
            cam_errors.append(np.linalg.norm(e))
            results.append(
                [np.linalg.norm(e), opt.feat_map_rev[j], match_index])
            count += 1
        if len(cam_errors):
            results_by_cam.append([
                np.mean(np.abs(np.array(cam_errors))),
                np.amax(np.abs(np.array(cam_errors))),
                proj.image_list[orig_cam_index].name
            ])
        else:
            results_by_cam.append(
                [9999.0, 9999.0, proj.image_list[orig_cam_index].name])

        #print(proj.image_list[orig_cam_index].name, ':',
        #      np.mean(np.abs(np.array(cam_errors))))

    print("Report of images that aren't fitting well:")
    results_by_cam = sorted(results_by_cam,
                            key=lambda fields: fields[0],
                            reverse=True)
    for line in results_by_cam:
        if line[0] > mre + 3 * std:
            print("%s - mean: %.3f max: %.3f" % (line[2], line[0], line[1]))
    for line in results_by_cam:
        if line[0] > mre + 3 * std:
            print(line[2], end=" ")
    print()

    error_list = sorted(results, key=lambda fields: fields[0], reverse=True)

    def mark_outliers(error_list, trim_stddev):
        print("Marking outliers...")
        sum = 0.0
        count = len(error_list)

        # numerically it is better to sum up a list of floatting point
        # numbers from smallest to biggest (error_list is sorted from
        # biggest to smallest)
        for line in reversed(error_list):
            sum += line[0]

        # stats on error values
        print(" computing stats...")
        mre = sum / count
        stddev_sum = 0.0
        for line in error_list:
            error = line[0]
            stddev_sum += (mre - error) * (mre - error)
        stddev = math.sqrt(stddev_sum / count)
        print("mre = %.4f stddev = %.4f" % (mre, stddev))

        # mark match items to delete
        print(" marking outliers...")
        mark_count = 0
        for line in error_list:
            # print "line:", line
            if line[0] > mre + stddev * trim_stddev:
                cull.mark_feature(matches, line[1], line[2], line[0])
                mark_count += 1

        return mark_count

    if interactive:
        # interactively pick outliers
        mark_list = cull.show_outliers(error_list, matches, proj.image_list)

        # mark selection
        cull.mark_using_list(mark_list, matches)
        mark_sum = len(mark_list)
    else:
        # trim outliers by some # of standard deviations high
        mark_sum = mark_outliers(error_list, stddev)

    # after marking the bad matches, now count how many remaining features
    # show up in each image
    for i in proj.image_list:
        i.feature_count = 0
    for i, match in enumerate(matches):
        for j, p in enumerate(match[2:]):
            if p[1] != [-1, -1]:
                image = proj.image_list[p[0]]
                image.feature_count += 1

    purge_weak_images = False
    if purge_weak_images:
        # make a dict of all images with less than 25 feature matches
        weak_dict = {}
        for i, img in enumerate(proj.image_list):
            # print img.name, img.feature_count
            if img.feature_count > 0 and img.feature_count < 25:
                weak_dict[i] = True
        print('weak images:', weak_dict)

        # mark any features in the weak images list
        for i, match in enumerate(matches):
            #print 'before:', match
            for j, p in enumerate(match[2:]):
                if p[0] in weak_dict:
                    match[j + 1] = [-1, -1]
                    mark_sum += 1

    if mark_sum > 0:
        print('Outliers removed from match lists:', mark_sum)
        result = input('Save these changes? (y/n):')
        if result == 'y' or result == 'Y':
            cull.delete_marked_features(matches, min_chain_len, strong=strong)
            # write out the updated match dictionaries
            print("Writing:", source)
            pickle.dump(matches,
                        open(os.path.join(proj.analysis_dir, source), "wb"))
Beispiel #11
0
def match(project_dir, matching_options):

    matcher = matching_options[0]
    match_ratio = matching_options[1]
    min_pairs = matching_options[2]
    min_dist = matching_options[3]
    max_dist = matching_options[4]
    filters = matching_options[5]
    min_chain_length = matching_options[6]

    proj = ProjectMgr.ProjectMgr(project_dir)
    proj.load_images_info()
    proj.load_features(
        descriptors=False)  # descriptors cached on the fly later
    proj.undistort_keypoints()
    proj.load_match_pairs()

    matcher_node = getNode('/config/matcher', True)
    matcher_node.setString('matcher', matcher)
    matcher_node.setFloat('match_ratio', match_ratio)
    matcher_node.setString('filter', filters)
    matcher_node.setInt('min_pairs', min_pairs)
    matcher_node.setFloat('min_dist', min_dist)
    matcher_node.setFloat('max_dist', max_dist)
    matcher_node.setInt('min_chain_len', min_chain_length)

    # save any config changes
    proj.save()

    # camera calibration
    K = proj.cam.get_K()
    print("K:", K)

    # fire up the matcher
    m = Matcher.Matcher()
    m.configure()
    m.robustGroupMatches(proj.image_list, K, filter=filters, review=False)

    # The following code is deprecated ...
    do_old_match_consolodation = False
    if do_old_match_consolodation:
        # build a list of all 'unique' keypoints.  Include an index to each
        # containing image and feature.
        matches_dict = {}
        for i, i1 in enumerate(proj.image_list):
            for j, matches in enumerate(i1.match_list):
                if j > i:
                    for pair in matches:
                        key = "%d-%d" % (i, pair[0])
                        m1 = [i, pair[0]]
                        m2 = [j, pair[1]]
                        if key in matches_dict:
                            feature_dict = matches_dict[key]
                            feature_dict['pts'].append(m2)
                        else:
                            feature_dict = {}
                            feature_dict['pts'] = [m1, m2]
                            matches_dict[key] = feature_dict
        #print match_dict
        count = 0.0
        sum = 0.0
        for key in matches_dict:
            sum += len(matches_dict[key]['pts'])
            count += 1
        if count > 0.1:
            print("total unique features in image set = %d" % count)
            print("kp average instances = %.4f" % (sum / count))

        # compute an initial guess at the 3d location of each unique feature
        # by averaging the locations of each projection
        for key in matches_dict:
            feature_dict = matches_dict[key]
            sum = np.array([0.0, 0.0, 0.0])
            for p in feature_dict['pts']:
                sum += proj.image_list[p[0]].coord_list[p[1]]
            ned = sum / len(feature_dict['pts'])
            feature_dict['ned'] = ned.tolist()

    def update_match_location(match):
        sum = np.array([0.0, 0.0, 0.0])
        for p in match[1:]:
            # print proj.image_list[ p[0] ].coord_list[ p[1] ]
            sum += proj.image_list[p[0]].coord_list[p[1]]
            ned = sum / len(match[1:])
            # print "avg =", ned
            match[0] = ned.tolist()
        return match

    if False:
        print("Constructing unified match structure...")
        print(
            "This probably will fail because we didn't do the ground intersection at the start..."
        )
        matches_direct = []
        for i, image in enumerate(proj.image_list):
            # print image.name
            for j, matches in enumerate(image.match_list):
                # print proj.image_list[j].name
                if j > i:
                    for pair in matches:
                        match = []
                        # ned place holder
                        match.append([0.0, 0.0, 0.0])
                        match.append([i, pair[0]])
                        match.append([j, pair[1]])
                        update_match_location(match)
                        matches_direct.append(match)
                        # print pair, match

        print("Writing match file ...")
        pickle.dump(matches_direct, open(project_dir + "/matches_direct",
                                         "wb"))
Beispiel #12
0
def show_matches(project_dir, show_matches_option):

    orders = show_matches_option[0]
    orient = show_matches_option[1]
    image = show_matches_option[2]
    index = show_matches_option[3]
    direct = show_matches_option[4]
    sba = show_matches_option[5]

    proj = ProjectMgr.ProjectMgr(project_dir)
    proj.load_images_info()
    proj.load_features()
    if args.direct:
        # recreate the pair-wise match structure
        matches_list = pickle.load( open( os.path.join(project_dir, "matches_direct"), "rb" ) )
        for i1 in proj.image_list:
            i1.match_list = []
            for i2 in proj.image_list:
                i1.match_list.append([])
        for match in matches_list:
            for p1 in match[1:]:
                for p2 in match[1:]:
                    if p1 == p2:
                        pass
                    else:
                        i = p1[0]
                        j = p2[0]
                        image = proj.image_list[i]
                        image.match_list[j].append( [p1[1], p2[1]] )
        # for i in range(len(proj.image_list)):
        #     print(len(proj.image_list[i].match_list))
        #     print(proj.image_list[i].match_list)
        #     for j in range(len(proj.image_list)):
        #         print(i, j, len(proj.image_list[i].match_list[j]),
        #               proj.image_list[i].match_list[j])
    else:
        proj.load_match_pairs()

    # lookup ned reference
    ref_node = getNode("/config/ned_reference", True)
    ref = [ ref_node.getFloat('lat_deg'),
            ref_node.getFloat('lon_deg'),
            ref_node.getFloat('alt_m') ]

    m = Matcher.Matcher()

    order = 'fewest-matches'

    if image:
        i1 = proj.findImageByName(image)
        if i1 != None:
            for key in i1.match_list:
                print(key, len(i1.match_list[key]))
                if len(i1.match_list[key]):
                    i2 = proj.findImageByName(key)
                    print("Showing %s vs %s (%d matches)" % (i1.name, i2.name, len(i1.match_list[key])))
                    status = m.showMatchOrient(i1, i2, i1.match_list[key],
                                            orient=orient)
        else:
            print("Cannot locate:", image)
    elif index:
        i1 = proj.image_list[index]
        if i1 != None:
            for j, i2 in enumerate(proj.image_list):
                if len(i1.match_list[j]):
                    print("Showing %s vs %s" % (i1.name, i2.name))
                    status = m.showMatchOrient(i1, i2, i1.match_list[j],
                                            orient=orient)
        else:
            print("Cannot locate:", index)
    elif order == 'sequential':
        for i, i1 in enumerate(proj.image_list):
            for j, i2 in enumerate(proj.image_list):
                if i >= j:
                    # don't repeat reciprocal matches
                    continue
                if i2.name in i1.match_list:
                    if len(i1.match_list[i2.name]):
                        print("Showing %s vs %s" % (i1.name, i2.name))
                        status = m.showMatchOrient(i1, i2, i1.match_list[i2.name],
                                                orient=orient)
    elif order == 'fewest-matches':
        match_list = []
        for i, i1 in enumerate(proj.image_list):
            for j, i2 in enumerate(proj.image_list):
                if i >= j:
                    # don't repeat reciprocal matches
                    continue
                if len(i1.match_list[j]):
                    match_list.append( ( len(i1.match_list[j]), i, j ) )
        match_list = sorted(match_list,
                            key=lambda fields: fields[0],
                            reverse=False)
        for match in match_list:
            count = match[0]
            i = match[1]
            j = match[2]
            i1 = proj.image_list[i]
            i2 = proj.image_list[j]
            print("Showing %s vs %s (matches=%d)" % (i1.name, i2.name, count))
            status = m.showMatchOrient(i1, i2, i1.match_list[j],
                                    orient=orient)
def colocated(project_dir, colocated_options):

    group_id = colocated_options[0]
    min_angle = colocated_options[1]

    r2d = 180.0 / math.pi

    proj = ProjectMgr.ProjectMgr(project_dir)
    proj.load_images_info()

    # a value of 2 let's pairs exist which can be trouble ...
    matcher_node = getNode('/config/matcher', True)
    min_chain_len = matcher_node.getInt("min_chain_len")
    if min_chain_len == 0:
        min_chain_len = 3
    print("Notice: min_chain_len is:", min_chain_len)

    #source = 'matches_direct'
    source = 'matches_grouped'
    print("Loading matches:", source)
    matches = pickle.load(open(os.path.join(proj.analysis_dir, source), "rb"))
    print('Number of original features:', len(matches))

    # load the group connections within the image set
    groups = Groups.load(proj.analysis_dir)
    print('Group sizes:', end=" ")
    for group in groups:
        print(len(group), end=" ")
    print()

    def compute_angle(ned1, ned2, ned3):
        vec1 = np.array(ned3) - np.array(ned1)
        vec2 = np.array(ned3) - np.array(ned2)
        n1 = np.linalg.norm(vec1)
        n2 = np.linalg.norm(vec2)
        denom = n1 * n2
        if abs(denom - 0.000001) > 0:
            try:
                tmp = np.dot(vec1, vec2) / denom
                if tmp > 1.0: tmp = 1.0
                return math.acos(tmp)
            except:
                print('vec1:', vec1, 'vec2', vec2, 'dot:', np.dot(vec1, vec2))
                print('denom:', denom)
                return 0
        else:
            return 0

    print("Scanning match pair angles:")
    mark_list = []
    for k, match in enumerate(tqdm(matches)):
        if match[1] == group_id:  # used by current group
            for i, m1 in enumerate(match[2:]):
                for j, m2 in enumerate(match[2:]):
                    if i < j:
                        i1 = proj.image_list[m1[0]]
                        i2 = proj.image_list[m2[0]]
                        if i1.name in groups[group_id] and i2.name in groups[
                                group_id]:
                            ned1, ypr1, q1 = i1.get_camera_pose(opt=True)
                            ned2, ypr2, q2 = i2.get_camera_pose(opt=True)
                            quick_approx = False
                            if quick_approx:
                                # quick hack angle approximation
                                avg = (np.array(ned1) + np.array(ned2)) * 0.5
                                y = np.linalg.norm(
                                    np.array(ned2) - np.array(ned1))
                                x = np.linalg.norm(avg - np.array(match[0]))
                                angle_deg = math.atan2(y, x) * r2d
                            else:
                                angle_deg = compute_angle(
                                    ned1, ned2, match[0]) * r2d
                            if angle_deg < min_angle:
                                mark_list.append([k, i])

    # Pairs with very small average angles between each feature and camera
    # location indicate closely located camera poses and these cause
    # problems because very small changes in camera pose lead to very
    # large changes in feature location.

    # mark selection
    cull.mark_using_list(mark_list, matches)
    mark_sum = len(mark_list)

    mark_sum = len(mark_list)
    if mark_sum > 0:
        print('Outliers to remove from match lists:', mark_sum)
        result = input('Save these changes? (y/n):')
        if result == 'y' or result == 'Y':
            cull.delete_marked_features(matches, min_chain_len)
            # write out the updated match dictionaries
            print("Writing original matches:", source)
            pickle.dump(matches,
                        open(os.path.join(proj.analysis_dir, source), "wb"))
Beispiel #14
0
def optmizer(project_dir, optmize_options):

    group_id = optmize_options[0]
    refine = optmize_options[1]
    cam_calibration = optmize_options[2]

    d2r = math.pi / 180.0
    r2d = 180.0 / math.pi

    # return a 3d affine tranformation between current camera locations
    # and original camera locations.
    def get_recenter_affine(src_list, dst_list):
        print('get_recenter_affine():')
        src = [[], [], [], []]  # current camera locations
        dst = [[], [], [], []]  # original camera locations
        for i in range(len(src_list)):
            src_ned = src_list[i]
            src[0].append(src_ned[0])
            src[1].append(src_ned[1])
            src[2].append(src_ned[2])
            src[3].append(1.0)
            dst_ned = dst_list[i]
            dst[0].append(dst_ned[0])
            dst[1].append(dst_ned[1])
            dst[2].append(dst_ned[2])
            dst[3].append(1.0)
            # print("{} <-- {}".format(dst_ned, src_ned))
        A = transformations.superimposition_matrix(src, dst, scale=True)
        print("A:\n", A)
        return A

    # transform a point list given an affine transform matrix
    def transform_points(A, pts_list):
        src = [[], [], [], []]
        for p in pts_list:
            src[0].append(p[0])
            src[1].append(p[1])
            src[2].append(p[2])
            src[3].append(1.0)
        dst = A.dot(np.array(src))
        result = []
        for i in range(len(pts_list)):
            result.append(
                [float(dst[0][i]),
                 float(dst[1][i]),
                 float(dst[2][i])])
        return result

    proj = ProjectMgr.ProjectMgr(project_dir)
    proj.load_images_info()

    source_file = os.path.join(proj.analysis_dir, 'matches_grouped')
    print('Match file:', source_file)
    matches = pickle.load(open(source_file, "rb"))
    print('Match features:', len(matches))

    # load the group connections within the image set
    groups = Groups.load(proj.analysis_dir)
    # sort from smallest to largest: groups.sort(key=len)

    opt = Optimizer.Optimizer(project_dir)
    opt.setup(proj,
              groups,
              group_id,
              matches,
              optimized=refine,
              cam_calib=cam_calibration)
    cameras, features, cam_index_map, feat_index_map, fx_opt, fy_opt, cu_opt, cv_opt, distCoeffs_opt = opt.run(
    )

    # mark all the optimized poses as invalid
    for image in proj.image_list:
        opt_cam_node = image.node.getChild('camera_pose_opt', True)
        opt_cam_node.setBool('valid', False)

    for i, cam in enumerate(cameras):
        image_index = cam_index_map[i]
        image = proj.image_list[image_index]
        ned_orig, ypr_orig, quat_orig = image.get_camera_pose()
        print('optimized cam:', cam)
        rvec = cam[0:3]
        tvec = cam[3:6]
        Rned2cam, jac = cv2.Rodrigues(rvec)
        cam2body = image.get_cam2body()
        Rned2body = cam2body.dot(Rned2cam)
        Rbody2ned = np.matrix(Rned2body).T
        (yaw, pitch,
         roll) = transformations.euler_from_matrix(Rbody2ned, 'rzyx')
        #print "orig ypr =", image.camera_pose['ypr']
        #print "new ypr =", [yaw/d2r, pitch/d2r, roll/d2r]
        pos = -np.matrix(Rned2cam).T * np.matrix(tvec).T
        newned = pos.T[0].tolist()[0]
        print(image.name, ned_orig, '->', newned, 'dist:',
              np.linalg.norm(np.array(ned_orig) - np.array(newned)))
        image.set_camera_pose(newned,
                              yaw * r2d,
                              pitch * r2d,
                              roll * r2d,
                              opt=True)
        image.placed = True
    proj.save_images_info()
    print('Updated the optimized camera poses.')

    # update and save the optimized camera calibration
    proj.cam.set_K(fx_opt, fy_opt, cu_opt, cv_opt, optimized=True)
    proj.cam.set_dist_coeffs(distCoeffs_opt.tolist(), optimized=True)
    proj.save()

    # compare original camera locations with optimized camera locations and
    # derive a transform matrix to 'best fit' the new camera locations
    # over the original ... trusting the original group gps solution as
    # our best absolute truth for positioning the system in world
    # coordinates.
    #
    # each optimized group needs a separate/unique fit

    matches_opt = list(matches)  # shallow copy
    refit_group_orientations = True
    if refit_group_orientations:
        group = groups[group_id]
        print('refitting group size:', len(group))
        src_list = []
        dst_list = []
        # only consider images that are in the current   group
        for name in group:
            image = proj.findImageByName(name)
            ned, ypr, quat = image.get_camera_pose(opt=True)
            src_list.append(ned)
            ned, ypr, quat = image.get_camera_pose()
            dst_list.append(ned)
        A = get_recenter_affine(src_list, dst_list)

        # extract the rotation matrix (R) from the affine transform
        scale, shear, angles, trans, persp = transformations.decompose_matrix(
            A)
        print('  scale:', scale)
        print('  shear:', shear)
        print('  angles:', angles)
        print('  translate:', trans)
        print('  perspective:', persp)
        R = transformations.euler_matrix(*angles)
        print("R:\n{}".format(R))

        # fixme (just group):

        # update the optimized camera locations based on best fit
        camera_list = []
        # load optimized poses
        for image in proj.image_list:
            if image.name in group:
                ned, ypr, quat = image.get_camera_pose(opt=True)
            else:
                # this is just fodder to match size/index of the lists
                ned, ypr, quat = image.get_camera_pose()
            camera_list.append(ned)

        # refit
        new_cams = transform_points(A, camera_list)

        # update position
        for i, image in enumerate(proj.image_list):
            if not image.name in group:
                continue
            ned, [y, p, r], quat = image.get_camera_pose(opt=True)
            image.set_camera_pose(new_cams[i], y, p, r, opt=True)
        proj.save_images_info()

        if True:
            # update optimized pose orientation.
            dist_report = []
            for i, image in enumerate(proj.image_list):
                if not image.name in group:
                    continue
                ned_orig, ypr_orig, quat_orig = image.get_camera_pose()
                ned, ypr, quat = image.get_camera_pose(opt=True)
                Rbody2ned = image.get_body2ned(opt=True)
                # update the orientation with the same transform to keep
                # everything in proper consistent alignment

                newRbody2ned = R[:3, :3].dot(Rbody2ned)
                (yaw, pitch, roll) = transformations.euler_from_matrix(
                    newRbody2ned, 'rzyx')
                image.set_camera_pose(new_cams[i],
                                      yaw * r2d,
                                      pitch * r2d,
                                      roll * r2d,
                                      opt=True)
                dist = np.linalg.norm(
                    np.array(ned_orig) - np.array(new_cams[i]))
                print('image: {}'.format(image.name))
                print('  orig pos: {}'.format(ned_orig))
                print('  fit pos: {}'.format(new_cams[i]))
                print('  dist moved: {}'.format(dist))
                dist_report.append((dist, image.name))
            proj.save_images_info()

            dist_report = sorted(dist_report,
                                 key=lambda fields: fields[0],
                                 reverse=False)
            print('Image movement sorted lowest to highest:')
            for report in dist_report:
                print('{} dist: {}'.format(report[1], report[0]))

        # tranform the optimized point locations using the same best
        # fit transform for the camera locations.
        new_feats = transform_points(A, features)

        # update any of the transformed feature locations that have
        # membership in the currently processing group back to the
        # master match structure.  Note we process groups in order of
        # little to big so if a match is in more than one group it
        # follows the larger group.
        for i, feat in enumerate(new_feats):
            match_index = feat_index_map[i]
            match = matches_opt[match_index]
            in_group = False
            for m in match[2:]:
                if proj.image_list[m[0]].name in group:
                    in_group = True
                    break
            if in_group:
                #print(' before:', match)
                match[0] = feat
                #print(' after:', match)
    else:
        # not refitting group orientations, just copy over optimized
        # coordinates
        for i, feat in enumerate(features):
            match_index = feat_index_map[i]
            match = matches_opt[match_index]
            match[0] = feat

    # write out the updated match_dict
    print('Updating matches file:', len(matches_opt), 'features')
    pickle.dump(matches_opt, open(source_file, 'wb'))

    #proj.cam.set_K(fx_opt/scale[0], fy_opt/scale[0], cu_opt/scale[0], cv_opt/scale[0], optimized=True)
    #proj.save()

    # temp write out just the points so we can plot them with gnuplot
    f = open(os.path.join(proj.analysis_dir, 'opt-plot.txt'), 'w')
    for m in matches_opt:
        try:
            f.write('%.2f %.2f %.2f\n' % (m[0][0], m[0][1], m[0][2]))
        except:
            pass
    f.close()

    # temp write out direct and optimized camera positions
    f1 = open(os.path.join(proj.analysis_dir, 'cams-direct.txt'), 'w')
    f2 = open(os.path.join(proj.analysis_dir, 'cams-opt.txt'), 'w')
    for name in groups[group_id]:
        image = proj.findImageByName(name)
        ned1, ypr1, quat1 = image.get_camera_pose()
        ned2, ypr2, quat2 = image.get_camera_pose(opt=True)
        f1.write('%.2f %.2f %.2f\n' % (ned1[1], ned1[0], -ned1[2]))
        f2.write('%.2f %.2f %.2f\n' % (ned2[1], ned2[0], -ned2[2]))
    f1.close()
    f2.close()
Beispiel #15
0
def delaunay(project_dir, group_id):

    def gen_ac3d_surface(name, points_group, values_group, tris_group):
        kids = len(tris_group)
        # write out the ac3d file
        f = open( name, "w" )
        f.write("AC3Db\n")
        trans = 0.0
        f.write("MATERIAL \"\" rgb 1 1 1  amb 0.6 0.6 0.6  emis 0 0 0  spec 0.5 0.5 0.5  shi 10  trans %.2f\n" % (trans))
        f.write("OBJECT world\n")
        f.write("kids " + str(kids) + "\n")

        for i in range(kids):
            points = points_group[i]
            values = values_group[i]
            tris = tris_group[i]
            f.write("OBJECT poly\n")
            f.write("loc 0 0 0\n")
            f.write("numvert %d\n" % len(points))
            for j in range(len(points)):
                f.write("%.3f %.3f %.3f\n" % (points[j][0], points[j][1],
                                            values[j]))
            f.write("numsurf %d\n" % len(tris.simplices))
            for tri in tris.simplices:
                f.write("SURF 0x30\n")
                f.write("mat 0\n")
                f.write("refs 3\n")
                for t in tri:
                    f.write("%d 0 0\n" % (t))
            f.write("kids 0\n")
                    
    proj = ProjectMgr.ProjectMgr(project_dir)
    proj.load_images_info()

    print("Loading optimized points ...")
    matches = pickle.load( open( os.path.join(proj.analysis_dir, "matches_grouped"), "rb" ) )

    # load the group connections within the image set
    groups = Groups.load(proj.analysis_dir)

    points_group = []
    values_group = []
    tris_group = []

    # initialize temporary structures for vanity stats
    for image in proj.image_list:
        image.raw_points = []
        image.raw_values = []
        image.sum_values = 0.0
        image.sum_count = 0.0
        image.max_z = -9999.0
        image.min_z = 9999.0

    # elevation stats
    print("Computing stats...")
    ned_list = []
    for match in matches:
        if match[1] == group_id:  # used by current group
            ned_list.append(match[0])
    avg = -np.mean(np.array(ned_list)[:,2])
    std = np.std(np.array(ned_list)[:,2])
    print("Average elevation: %.2f" % avg)
    print("Standard deviation: %.2f" % std)

    # sort through points
    print('Reading feature locations from optimized match points ...')
    global_raw_points = []
    global_raw_values = []
    for match in matches:
        if match[1] == group_id:  # used by current group
            ned = match[0]
            diff = abs(-ned[2] - avg)
            if diff < 5*std:
                global_raw_points.append( [ned[1], ned[0]] )
                global_raw_values.append( -ned[2] )
            else:
                print("Discarding match with excessive altitude:", match)

    print('Generating Delaunay meshes ...')
    global_tri_list = scipy.spatial.Delaunay(np.array(global_raw_points))

    print('Generating ac3d surface model ...')
    name = os.path.join(proj.analysis_dir, "surface-global.ac")
    gen_ac3d_surface(name, [global_raw_points], [global_raw_values], [global_tri_list])
Beispiel #16
0
def run(project_dir):
    if False:
        import wx
        def get_path(wildcard):
            app = wx.App(None)
            style = wx.FD_OPEN | wx.FD_FILE_MUST_EXIST
            dialog = wx.FileDialog(None, 'Open', wildcard=wildcard, style=style)
            if dialog.ShowModal() == wx.ID_OK:
                path = dialog.GetPath()
            else:
                path = None
            dialog.Destroy()
            return path
        get_path("*")
        print(get_path('*.txt'))
        quit()

    tk_root = tk.Tk()
    tk_root.withdraw()

    if not project_dir:
        # file_path = filedialog.askopenfilename()
        file_path = filedialog.askdirectory(title="Please open the project directory", mustexist=True)
        # print('selected:', type(file_path), len(file_path), file_path)
        if file_path:
            project_dir = file_path
        else:
            print("no project selected, exiting.")
            quit()

    proj = ProjectMgr.ProjectMgr(project_dir)
    proj.load_images_info()

    # lookup ned reference
    ref_node = getNode("/config/ned_reference", True)
    ned_ref = [ ref_node.getFloat('lat_deg'),
                ref_node.getFloat('lon_deg'),
                ref_node.getFloat('alt_m') ]

    tcache = {}

    # adaptive equalizer
    clahe = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(8,8))

    class MyApp(ShowBase):
    
        def __init__(self):
            ShowBase.__init__(self)
    
            self.models = []
            self.base_textures = []

            # window title
            props = WindowProperties( )
            props.setTitle( pathlib.Path(project_dir).name )
            base.win.requestProperties( props )

            # we would like an orthographic lens
            self.lens = OrthographicLens()
            self.lens.setFilmSize(20, 15)
            base.camNode.setLens(self.lens)

            self.cam_pos = [ 0.0, 0.0, 1000.0 ]
            self.camera.setPos(self.cam_pos[0], self.cam_pos[1], self.cam_pos[2])
            self.camera.setHpr(0, -90.0, 0)
            self.view_size = 100.0
            self.last_ysize = 0
            
            self.top_image = 0

            # modules
            self.surface = surface.Surface(proj.analysis_dir)
            self.annotations = annotations.Annotations(self.render, self.surface,
                                                    project_dir,
                                                    ned_ref, tk_root)
            self.reticle = reticle.Reticle(self.render, self.surface, ned_ref)

            #self.messenger.toggleVerbose()

            # event handlers
            self.accept('arrow_left', self.cam_move, [-0.1, 0, 0])
            self.accept('arrow_right', self.cam_move, [0.1, 0, 0])
            self.accept('arrow_down', self.cam_move, [0, -0.1, 0])
            self.accept('arrow_up', self.cam_move, [0, 0.1, 0])
            self.accept('=', self.cam_zoom, [1.1])
            self.accept('shift-=', self.cam_zoom, [1.1])
            self.accept('-', self.cam_zoom, [1.0/1.1])
            self.accept('wheel_up', self.cam_zoom, [1.1])
            self.accept('wheel_down', self.cam_zoom, [1.0/1.1])
            self.accept('mouse1', self.mouse_state, [0, 1])
            self.accept('mouse1-up', self.mouse_state, [0, 0])
            self.accept('0', self.image_select, [0])
            self.accept('1', self.image_select, [1])
            self.accept('2', self.image_select, [2])
            self.accept('3', self.image_select, [3])
            self.accept('4', self.image_select, [4])
            self.accept('5', self.image_select, [5])
            self.accept('6', self.image_select, [6])
            self.accept('7', self.image_select, [7])
            self.accept('8', self.image_select, [8])
            self.accept('9', self.image_select, [9])
            self.accept('f', self.toggle_filter)
            self.accept('m', self.toggle_view_mode)
            self.accept(',', self.update_sequential_num, [-1])
            self.accept('.', self.update_sequential_num, [1])
            self.accept('escape', self.quit)
            self.accept('mouse3', self.annotations.toggle, [self.cam_pos])

            # mouse state
            self.last_mpos = [0, 0]
            self.mouse = [0, 0, 0]
            self.last_mouse = [0, 0, 0] 
        
            # Add the tasks to the task manager.
            self.taskMgr.add(self.updateCameraTask, "updateCameraTask")

            # Shader (aka filter?)
            self.filter = 'none'
            
            # Set default view mode
            self.view_mode = 'best'
            # self.view_mode = 'sequential'
            self.sequential_num = 0
            
            # dump a summary of supposed card capabilities
            self.query_capabilities(display=True)

            # test shader
            # self.shader = Shader.load(Shader.SL_GLSL, vertex="explore/myshader.vert", fragment="explore/myshader.frag", geometry="explore/myshader.geom")
            self.shader = Shader.load(Shader.SL_GLSL, vertex="explore/myshader.vert", fragment="explore/myshader.frag")
            
        def query_capabilities(self, display=True):
            gsg=base.win.getGsg()
            print("driver vendor", gsg.getDriverVendor())
            self.driver_vendor = gsg.getDriverVendor()
            print("alpha_scale_via_texture", bool(gsg.getAlphaScaleViaTexture()))
            print("color_scale_via_lighting", bool(gsg.getColorScaleViaLighting()))
            print("copy_texture_inverted", bool(gsg.getCopyTextureInverted()))
            print("max_3d_texture_dimension", gsg.getMax3dTextureDimension())
            print("max_clip_planes", gsg.getMaxClipPlanes())
            print("max_cube_map_dimension", gsg.getMaxCubeMapDimension())
            print("max_lights", gsg.getMaxLights())
            print("max_texture_dimension", gsg.getMaxTextureDimension())
            self.max_texture_dimension = gsg.getMaxTextureDimension()
            print("max_texture_stages", gsg.getMaxTextureStages())
            print("max_vertex_transform_indices",  gsg.getMaxVertexTransformIndices())
            print("max_vertex_transforms", gsg.getMaxVertexTransforms())
            print("shader_model", gsg.getShaderModel())
            print("supports_3d_texture", bool(gsg.getSupports3dTexture()))
            print("supports_basic_shaders", bool(gsg.getSupportsBasicShaders()))
            print("supports_compressed_texture",  bool(gsg.getSupportsCompressedTexture()))
            print("supports_cube_map", bool(gsg.getSupportsCubeMap()))
            print("supports_depth_stencil", bool(gsg.getSupportsDepthStencil()))
            print("supports_depth_texture",  bool(gsg.getSupportsDepthTexture()))
            print("supports_generate_mipmap",  bool(gsg.getSupportsGenerateMipmap()))
            #print("supports_render_texture", bool(gsg.getSupportsRenderTexture()))
            print("supports_shadow_filter", bool(gsg.getSupportsShadowFilter()))
            print("supports_tex_non_pow2", bool(gsg.getSupportsTexNonPow2()))
            self.needs_pow2 = not bool(gsg.getSupportsTexNonPow2())
            if self.driver_vendor == 'Intel' and os.name == 'nt':
                # windows driver lies!
                self.needs_pow2 = True
            print("supports_texture_combine", bool(gsg.getSupportsTextureCombine()))
            print("supports_texture_dot3", bool(gsg.getSupportsTextureDot3()))
            print("supports_texture_saved_result",  bool(gsg.getSupportsTextureSavedResult()))
            print("supports_two_sided_stencil",  bool(gsg.getSupportsTwoSidedStencil()))
            print("max_vertices_per_array", gsg.getMaxVerticesPerArray())
            print("max_vertices_per_primitive", gsg.getMaxVerticesPerPrimitive())
            print("supported_geom_rendering", gsg.getSupportedGeomRendering())
            print("supports_multisample", bool(gsg.getSupportsMultisample()))
            print("supports_occlusion_query", bool(gsg.getSupportsOcclusionQuery()))
            print("prefers_triangle_strips", bool(gsg.prefersTriangleStrips()))

        def tmpItemSel(self, arg):
            self.dialog.cleanup()
            print('result:', arg)
            
        def dialog_test(self, tmp):
            self.dialog = YesNoDialog(dialogName="YesNoCancelDialog", text="Please choose:", command=self.tmpItemSel)
            
        def mouse_state(self, index, state):
            self.mouse[index] = state
        
        def pretty_print(self, node, indent=''):
            for child in node.getChildren():
                print(indent, child)
                    
        def load(self, path):
            # vignette mask
            self.vignette_mask = None
            self.vignette_mask_small = None
            vfile = os.path.join(path, "vignette-mask.jpg")
            if os.path.exists(vfile):
                print("loading vignette correction mask:", vfile)
                self.vignette_mask = cv2.imread(vfile, flags=cv2.IMREAD_ANYCOLOR|cv2.IMREAD_ANYDEPTH|cv2.IMREAD_IGNORE_ORIENTATION)
                self.vignette_mask_small = cv2.resize(self.vignette_mask, (512, 512))
            files = []
            for file in sorted(os.listdir(path)):
                if fnmatch.fnmatch(file, '*.egg'):
                    # print('load:', file)
                    files.append(file)
            print('Loading models:')
            for file in tqdm(files):
                # load and reparent each egg file
                pandafile = Filename.fromOsSpecific(os.path.join(path, file))
                model = self.loader.loadModel(pandafile)
                # model.set_shader(self.shader)

                # print(file)
                # self.pretty_print(model, '  ')
                
                model.reparentTo(self.render)
                self.models.append(model)
                tex = model.findTexture('*')
                if tex != None:
                    tex.setWrapU(Texture.WM_clamp)
                    tex.setWrapV(Texture.WM_clamp)
                self.base_textures.append(tex)

            # The egg model lists "dummy.jpg" as the texture model which
            # doesn't exists.  Here we load the actual textures and
            # possibly apply vignette correction and adaptive histogram
            # equalization.
            print('Loading base textures:')
            for i, model in enumerate(tqdm(self.models)):
                base, ext = os.path.splitext(model.getName())
                image_file = None
                dir = os.path.join(proj.analysis_dir, 'models')
                tmp1 = os.path.join(dir, base + '.JPG')
                tmp2 = os.path.join(dir, base + '.jpg')
                if os.path.isfile(tmp1):
                    image_file = tmp1
                elif os.path.isfile(tmp2):
                    image_file = tmp2
                #print("texture file:", image_file)
                if False:
                    tex = self.loader.loadTexture(image_file)
                else:
                    rgb = cv2.imread(image_file, flags=cv2.IMREAD_ANYCOLOR|cv2.IMREAD_ANYDEPTH|cv2.IMREAD_IGNORE_ORIENTATION)
                    rgb = np.flipud(rgb)
                    # vignette correction
                    if not self.vignette_mask_small is None:
                        rgb = rgb.astype('uint16') + self.vignette_mask_small
                        rgb = np.clip(rgb, 0, 255).astype('uint8')
                    # adaptive equalization
                    hsv = cv2.cvtColor(rgb, cv2.COLOR_BGR2HSV)
                    hue, sat, val = cv2.split(hsv)
                    aeq = clahe.apply(val)
                    # recombine
                    hsv = cv2.merge((hue,sat,aeq))
                    # convert back to rgb
                    rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
                    tex = Texture(base)
                    tex.setCompression(Texture.CMOff)
                    tex.setup2dTexture(512, 512, Texture.TUnsignedByte,
                                    Texture.FRgb)
                    tex.setRamImage(rgb)
                tex.setWrapU(Texture.WM_clamp)
                tex.setWrapV(Texture.WM_clamp)
                model.setTexture(tex, 1)
                self.base_textures[i] = tex
            self.sortImages()
            self.annotations.rebuild(self.view_size)

        def cam_move(self, x, y, z, sort=True):
            #print('move:', x, y)
            self.cam_pos[0] += x * self.view_size * base.getAspectRatio()
            self.cam_pos[1] += y * self.view_size
            if self.view_mode == 'best' and sort:
                self.image_select(0)
                self.sortImages()
            
        def cam_zoom(self, f):
            self.view_size /= f
            self.annotations.rebuild(self.view_size)

        def cam_fit(self, model):
            b = model.getTightBounds()
            #print('tight', b)
            if b:
                center = [ (b[0][0] + b[1][0]) * 0.5,
                        (b[0][1] + b[1][1]) * 0.5,
                        (b[0][2] + b[1][2]) * 0.5 ]
                self.cam_pos[0] = center[0]
                self.cam_pos[1] = center[1]
                vol = [ b[1][0] - b[0][0],
                        b[1][1] - b[0][1],
                        b[1][2] - b[0][2] ]
                if vol[1] * base.getAspectRatio() > vol[0]:
                    # set by y axis
                    self.view_size = vol[1] * 1.05
                else:
                    # set by x axis size
                    self.view_size = vol[0] * 1.05 / base.getAspectRatio()
                print("view_size:", self.view_size)
                self.annotations.rebuild(self.view_size)

        def toggle_filter(self):
            if self.filter == 'shader':
                self.filter = 'none'
                for m in self.models:
                    m.clear_shader()
            else:
                self.filter = 'shader'
                for m in self.models:
                    m.set_shader(self.shader)
            # print("Setting filter:", self.filter)

        def toggle_view_mode(self):
            if self.view_mode == 'best':
                self.view_mode = 'sequential'
            else:
                self.view_mode = 'best'
            print("Setting view mode:", self.view_mode)
            self.sortImages()

        def update_sequential_num(self, inc):
            if self.view_mode == 'sequential':
                self.sequential_num += inc
                if self.sequential_num < 0:
                    self.sequential_num = len(self.models) - 1
                elif self.sequential_num >= len(self.models) - 1:
                    self.sequential_num = 0
                print("Sequential image number:", self.sequential_num)
                self.sortImages()
    
        def quit(self):
            raise SystemExit

        def image_select(self, level):
            if self.view_mode == 'best':
                self.top_image = level
            elif self.view_mode == 'sequential':
                max = len(self.models) - 1
                self.sequential_num = int(round(float(level) * float(max) / 9.0))
            self.sortImages()
                
        # Define a procedure to move the camera.
        def updateCameraTask(self, task):
            self.camera.setPos(self.cam_pos[0], self.cam_pos[1], self.cam_pos[2])
            self.camera.setHpr(0, -90, 0)
            self.lens.setFilmSize(self.view_size*base.getAspectRatio(),
                                self.view_size)
            # reticle
            self.reticle.update(self.cam_pos, self.view_size)
            
            # annotations
            props = base.win.getProperties()
            y = props.getYSize()
            if y != self.last_ysize:
                self.annotations.rebuild(self.view_size)
                self.last_ysize = y
            mw = base.mouseWatcherNode
            if mw.hasMouse():
                mpos = mw.getMouse()
                if self.mouse[0]:
                    dx = self.last_mpos[0] - mpos[0]
                    dy = self.last_mpos[1] - mpos[1]
                    self.cam_move( dx * 0.5, dy * 0.5, 0, sort=False)
                elif not self.mouse[0] and self.last_mouse[0]:
                    # button up
                    self.cam_move( 0, 0, 0, sort=True)
                self.last_mpos = list(mpos)
                self.last_mouse[0] = self.mouse[0]
            return Task.cont

        # return true if cam_pos inside bounding corners
        def inbounds(self, b):
            if self.cam_pos[0] < b[0][0] or self.cam_pos[0] > b[1][0]:
                return False
            elif self.cam_pos[1] < b[0][1] or self.cam_pos[1] > b[1][1]:
                return False
            else:
                return True
            
        def sortImages(self):
            # sort images by (hopefully) best covering view center
            result_list = []
            for i, m in enumerate(self.models):
                b = m.getTightBounds()
                #print('tight', b)
                if b:
                    center = [ (b[0][0] + b[1][0]) * 0.5,
                            (b[0][1] + b[1][1]) * 0.5,
                            (b[0][2] + b[1][2]) * 0.5 ]
                    vol = [ b[1][0] - b[0][0],
                            b[1][1] - b[0][1],
                            b[1][2] - b[0][2] ]
                    span = math.sqrt(vol[0]*vol[0] + vol[1]*vol[1] + vol[2]*vol[2])
                    dx = center[0] - self.cam_pos[0]
                    dy = center[1] - self.cam_pos[1]
                    dist = math.sqrt(dx*dx + dy*dy)
                    #print('center:', center, 'span:', span, 'dist:', dist)
                    if self.view_mode == 'best':
                        metric = dist + (span * 0.1)
                        if not self.inbounds(b):
                            metric += 1000
                    elif self.view_mode == 'sequential':
                        metric = abs(i - self.sequential_num)
                    result_list.append( [metric, m, i] )
            result_list = sorted(result_list, key=lambda fields: fields[0],
                                reverse=True)
            if self.view_mode == 'best':
                top_entry = result_list[-1-self.top_image]
            else:
                top_entry = result_list[-1]
            top = top_entry[1]
            top.setColor(1.0, 1.0, 1.0, 1.0)
            self.updateTexture(top)
            if self.view_mode == 'sequential':
                self.cam_fit(top)
            
            for i, line in enumerate(result_list):
                m = line[1]
                if m == top:
                    m.setBin("fixed", 2*len(self.models))
                elif m.getName() in tcache:
                    # reward draw order for models with high res texture loaded
                    m.setBin("fixed", i + len(self.models))
                else:
                    m.setBin("fixed", i)
                m.setDepthTest(False)
                m.setDepthWrite(False)
                if m != top:
                    gray = 1.0
                    m.setColor(gray, gray, gray, 1.0)

        def updateTexture(self, main):
            dir_node = getNode('/config/directories', True)
            
            # reset base textures
            for i, m in enumerate(self.models):
                if m != main:
                    if m.getName() in tcache:
                        fulltex = tcache[m.getName()][1]
                        self.models[i].setTexture(fulltex, 1)
                    else:
                        if self.base_textures[i] != None:
                            self.models[i].setTexture(self.base_textures[i], 1)
                else:
                    print(m.getName())
                    if m.getName() in tcache:
                        fulltex = tcache[m.getName()][1]
                        self.models[i].setTexture(fulltex, 1)
                        continue
                    base, ext = os.path.splitext(m.getName())
                    image_file = None
                    search = [ project_dir, os.path.join(project_dir, 'images') ]
                    for dir in search:
                        tmp1 = os.path.join(dir, base + '.JPG')
                        tmp2 = os.path.join(dir, base + '.jpg')
                        if os.path.isfile(tmp1):
                            image_file = tmp1
                        elif os.path.isfile(tmp2):
                            image_file = tmp2
                    if not image_file:
                        print('Warning: no full resolution image source file found:', base)
                    else:
                        if True:
                            # example of passing an opencv image as a
                            # panda texture
                            print(base, image_file)
                            #image = proj.findImageByName(base)
                            #print(image)
                            rgb = cv2.imread(image_file, flags=cv2.IMREAD_ANYCOLOR|cv2.IMREAD_ANYDEPTH|cv2.IMREAD_IGNORE_ORIENTATION)
                            rgb = np.flipud(rgb)
                            # vignette correction
                            if not self.vignette_mask is None:
                                rgb = rgb.astype('uint16') + self.vignette_mask
                                rgb = np.clip(rgb, 0, 255).astype('uint8')

                            h, w = rgb.shape[:2]
                            print('shape: (%d,%d)' % (w, h))
                            rescale = False
                            if h > self.max_texture_dimension:
                                h = self.max_texture_dimension
                                rescale = True
                            if w > self.max_texture_dimension:
                                w = self.max_texture_dimension
                                rescale = True
                            if self.needs_pow2:
                                h2 = 2**math.floor(math.log(h,2))
                                w2 = 2**math.floor(math.log(w,2))
                                if h2 != h:
                                    h = h2
                                    rescale = True
                                if w2 != w:
                                    w = w2
                                    rescale = True
                            if rescale:
                                print("Notice: rescaling texture to (%d,%d) to honor video card capability." % (w, h))
                                rgb = cv2.resize(rgb, (w,h))

                            # filter_by = 'none'
                            filter_by = 'equalize_value'
                            # filter_by = 'equalize_rgb'
                            # filter_by = 'equalize_blue'
                            # filter_by = 'equalize_green'
                            # filter_by = 'equalize_blue'
                            # filter_by = 'equalize_red'
                            # filter_by = 'red/green'
                            if filter_by == 'none':
                                b, g, r = cv2.split(rgb)
                                result = cv2.merge((b, g, r))
                            if filter_by == 'equalize_value':
                                # equalize val (essentially gray scale level)
                                hsv = cv2.cvtColor(rgb, cv2.COLOR_BGR2HSV)
                                hue, sat, val = cv2.split(hsv)
                                aeq = clahe.apply(val)
                                # recombine
                                hsv = cv2.merge((hue,sat,aeq))
                                # convert back to rgb
                                result = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
                            elif filter_by == 'equalize_rgb':
                                # equalize individual b, g, r channels
                                b, g, r = cv2.split(rgb)
                                b = clahe.apply(b)
                                g = clahe.apply(g)
                                r = clahe.apply(r)
                                result = cv2.merge((b,g,r))
                            elif filter_by == 'equalize_blue':
                                # equalize val (essentially gray scale level)
                                hsv = cv2.cvtColor(rgb, cv2.COLOR_BGR2HSV)
                                hue, sat, val = cv2.split(hsv)
                                # blue hue = 120
                                
                                # slide 120 -> 90 (center of 0-180 range
                                # with mod() roll over)
                                diff = np.mod(hue.astype('float64') - 30, 180)
                                # move this center point to 0 (-90 to +90
                                # range) and take absolute value
                                # (distance)
                                diff = np.abs(diff - 90)
                                # scale to 0 to 1 (1 being the closest to
                                # our target hue)
                                diff = 1.0 - diff / 90
                                print('hue:', np.amin(hue), np.amax(hue))
                                print('sat:', np.amin(sat), np.amax(sat))
                                print('diff:', np.amin(diff), np.amax(diff))
                                #print(diff)
                                #g = (256 - (256.0/90.0)*diff).astype('uint8')
                                b = (diff * sat).astype('uint8')
                                g = np.zeros(hue.shape, dtype='uint8')
                                r = np.zeros(hue.shape, dtype='uint8')
                                #g = clahe.apply(g)
                                result = cv2.merge((b,g,r))
                                print(result.shape, result.dtype)
                            elif filter_by == 'equalize_green':
                                # equalize val (essentially gray scale level)
                                hsv = cv2.cvtColor(rgb, cv2.COLOR_BGR2HSV)
                                hue, sat, val = cv2.split(hsv)
                                # green hue = 60
                                
                                # slide 60 -> 90 (center of 0-180 range
                                # with mod() roll over)
                                diff = np.mod(hue.astype('float64') + 30, 180)
                                # move this center point to 0 (-90 to +90
                                # range) and take absolute value
                                # (distance)
                                diff = np.abs(diff - 90)
                                # scale to 0 to 1 (1 being the closest to
                                # our target hue)
                                diff = 1.0 - diff / 90
                                print('hue:', np.amin(hue), np.amax(hue))
                                print('sat:', np.amin(sat), np.amax(sat))
                                print('diff:', np.amin(diff), np.amax(diff))
                                #print(diff)
                                b = np.zeros(hue.shape, dtype='uint8')
                                g = (diff * sat).astype('uint8')
                                r = np.zeros(hue.shape, dtype='uint8')
                                #g = clahe.apply(g)
                                result = cv2.merge((b,g,r))
                                print(result.shape, result.dtype)
                            elif filter_by == 'equalize_red':
                                # equalize val (essentially gray scale level)
                                hsv = cv2.cvtColor(rgb, cv2.COLOR_BGR2HSV)
                                hue, sat, val = cv2.split(hsv)
                                # red hue = 0
                                
                                # slide 0 -> 90 (center of 0-180 range
                                # with mod() roll over)
                                diff = np.mod(hue.astype('float64') + 90, 180)
                                # move this center point to 0 (-90 to +90
                                # range) and take absolute value
                                # (distance)
                                diff = np.abs(diff - 90)
                                # scale to 0 to 1 (1 being the closest to
                                # our target hue)
                                diff = 1.0 - diff / 90
                                print('hue:', np.amin(hue), np.amax(hue))
                                print('sat:', np.amin(sat), np.amax(sat))
                                print('diff:', np.amin(diff), np.amax(diff))
                                b = np.zeros(hue.shape, dtype='uint8')
                                g = np.zeros(hue.shape, dtype='uint8')
                                r = (diff * sat).astype('uint8')
                                result = cv2.merge((b,g,r))
                                print(result.shape, result.dtype)
                            elif filter_by == 'red/green':
                                # equalize val (essentially gray scale level)
                                max = 4.0
                                b, g, r = cv2.split(rgb)
                                ratio = r / (g.astype('float64')+1.0)
                                ratio = np.clip(ratio, 0, max)
                                inv = g / (r.astype('float64')+1.0)
                                inv = np.clip(inv, 0, max)
                                max_ratio = np.amax(ratio)
                                max_inv = np.amax(inv)
                                print(max_ratio, max_inv)
                                b[:] = 0
                                g = (inv * (255/max)).astype('uint8')
                                r = (ratio * (255/max)).astype('uint8')
                                result = cv2.merge((b,g,r))
                                print(result.shape, result.dtype)
                                
                            fulltex = Texture(base)
                            fulltex.setCompression(Texture.CMOff)
                            fulltex.setup2dTexture(w, h, Texture.TUnsignedByte, Texture.FRgb)
                            fulltex.setRamImage(result)
                            # fulltex.load(rgb) # for loading a pnm image
                            fulltex.setWrapU(Texture.WM_clamp)
                            fulltex.setWrapV(Texture.WM_clamp)
                            m.setTexture(fulltex, 1)
                            tcache[m.getName()] = [m, fulltex, time.time()]
                        else:
                            print(image_file)
                            fulltex = self.loader.loadTexture(image_file)
                            fulltex.setWrapU(Texture.WM_clamp)
                            fulltex.setWrapV(Texture.WM_clamp)
                            #print('fulltex:', fulltex)
                            m.setTexture(fulltex, 1)
                            tcache[m.getName()] = [m, fulltex, time.time()]
            cachesize = 10
            while len(tcache) > cachesize:
                oldest_time = time.time()
                oldest_name = ""
                for name in tcache:
                    if tcache[name][2] < oldest_time:
                        oldest_time = tcache[name][2]
                        oldest_name = name
                del tcache[oldest_name]
        
    app = MyApp()
    app.load( os.path.join(proj.analysis_dir, "models") )
    app.run()