示例#1
0
matcher_node.setFloat('match_ratio', args.match_ratio)
matcher_node.setString('filter', args.filter)
matcher_node.setInt('min_pairs', args.min_pairs)
matcher_node.setFloat('min_dist', args.min_dist)
matcher_node.setFloat('max_dist', args.max_dist)
matcher_node.setInt('min_chain_len', args.min_chain_length)

# save any config changes
proj.save()

# camera calibration
K = proj.cam.get_K()
print("K:", K)

# fire up the matcher
m = Matcher.Matcher()
m.configure()
m.robustGroupMatches(proj.image_list, K,
                     filter=args.filter, review=False)

# The following code is deprecated ...
do_old_match_consolodation = False
if do_old_match_consolodation:
    # build a list of all 'unique' keypoints.  Include an index to each
    # containing image and feature.
    matches_dict = {}
    for i, i1 in enumerate(proj.image_list):
        for j, matches in enumerate(i1.match_list):
            if j > i:
                for pair in matches:
                    key = "%d-%d" % (i, pair[0])
示例#2
0
def clean(project_dir):
    m = Matcher.Matcher()

    proj = ProjectMgr.ProjectMgr(project_dir)
    proj.load_images_info()
    proj.load_features(descriptors=False)
    #proj.undistort_keypoints()
    proj.load_match_pairs()

    # compute keypoint usage map
    proj.compute_kp_usage()

    # For some features detection algorithms we expect duplicated feature
    # uv coordinates.  These duplicates may have different scaling or
    # other attributes important during feature matching, yet ultimately
    # resolve to the same uv coordinate in an image.
    print("Indexing features by unique uv coordinates:")
    for image in tqdm(proj.image_list):
        # pass one, build a tmp structure of unique keypoints (by uv) and
        # the index of the first instance.
        image.kp_remap = {}
        used = 0
        for i, kp in enumerate(image.kp_list):
            if image.kp_used[i]:
                used += 1
                key = "%.2f-%.2f" % (kp.pt[0], kp.pt[1])
                if not key in image.kp_remap:
                    image.kp_remap[key] = i
                else:
                    #print("%d -> %d" % (i, image.kp_remap[key]))
                    #print(" ", image.coord_list[i], image.coord_list[image.kp_remap[key]])
                    pass

        #print(" features used:", used)
        #print(" unique by uv and used:", len(image.kp_remap))

    # after feature matching we don't care about other attributes, just
    # the uv coordinate.
    #
    # notes: we do a first pass duplicate removal during the original
    # matching process.  This removes 1->many relationships, or duplicate
    # matches at different scales within a match pair.  However, different
    # pairs could reference the same keypoint at different scales, so
    # duplicates could still exist.  This finds all the duplicates within
    # the entire match set and collapses them down to eliminate any
    # redundancy.
    print("Merging keypoints with duplicate uv coordinates:")
    for i, i1 in enumerate(tqdm(proj.image_list)):
        for key in i1.match_list:
            matches = i1.match_list[key]
            count = 0
            i2 = proj.findImageByName(key)
            if i2 is None:
                # ignore pairs outside our area set
                continue
            for k, pair in enumerate(matches):
                # print pair
                idx1 = pair[0]
                idx2 = pair[1]
                kp1 = i1.kp_list[idx1]
                kp2 = i2.kp_list[idx2]
                key1 = "%.2f-%.2f" % (kp1.pt[0], kp1.pt[1])
                key2 = "%.2f-%.2f" % (kp2.pt[0], kp2.pt[1])
                # print key1, key2
                new_idx1 = i1.kp_remap[key1]
                new_idx2 = i2.kp_remap[key2]
                # count the number of match rewrites
                if idx1 != new_idx1 or idx2 != new_idx2:
                    count += 1
                if idx1 != new_idx1:
                    # sanity check
                    uv1 = list(i1.kp_list[idx1].pt)
                    new_uv1 = list(i1.kp_list[new_idx1].pt)
                    if not np.allclose(uv1, new_uv1):
                        print("OOPS!!!")
                        print("  index 1: %d -> %d" % (idx1, new_idx1))
                        print("  [%.2f, %.2f] -> [%.2f, %.2f]" %
                              (uv1[0], uv1[1], new_uv1[0], new_uv1[1]))
                if idx2 != new_idx2:
                    # sanity check
                    uv2 = list(i2.kp_list[idx2].pt)
                    new_uv2 = list(i2.kp_list[new_idx2].pt)
                    if not np.allclose(uv2, new_uv2):
                        print("OOPS!")
                        print("  index 2: %d -> %d" % (idx2, new_idx2))
                        print("  [%.2f, %.2f] -> [%.2f, %.2f]" %
                              (uv2[0], uv2[1], new_uv2[0], new_uv2[1]))
                # rewrite matches
                matches[k] = [new_idx1, new_idx2]
            #if count > 0:
            #    print('Match:', i1.name, 'vs', i2.name, '%d/%d' % ( count, len(matches) ), 'rewrites')

    # enable the following code to visualize the matches after collapsing
    # identical uv coordinates
    if False:
        for i, i1 in enumerate(proj.image_list):
            for j, i2 in enumerate(proj.image_list):
                if i >= j:
                    # don't repeat reciprocal matches
                    continue
                if len(i1.match_list[j]):
                    print("Showing %s vs %s" % (i1.name, i2.name))
                    status = m.showMatchOrient(i1, i2, i1.match_list[j])

    # after collapsing by uv coordinate, we could be left with duplicate
    # matches (matched at different scales or other attributes, but same
    # exact point.)
    #
    # notes: this really shouldn't (!) (by my best current understanding)
    # be able to find any dups.  These should all get caught in the
    # original pair matching step.
    print("Checking for pair duplicates (there never should be any):")
    for i, i1 in enumerate(tqdm(proj.image_list)):
        for key in i1.match_list:
            matches = i1.match_list[key]
            i2 = proj.findImageByName(key)
            if i2 is None:
                # ignore pairs not in our area set
                continue
            count = 0
            pair_dict = {}
            new_matches = []
            for k, pair in enumerate(matches):
                pair_key = "%d-%d" % (pair[0], pair[1])
                if not pair_key in pair_dict:
                    pair_dict[pair_key] = True
                    new_matches.append(pair)
                else:
                    count += 1
            if count > 0:
                print('Match:', i, 'vs', j, 'matches:', len(matches), 'dups:',
                      count)
            i1.match_list[key] = new_matches

    # enable the following code to visualize the matches after eliminating
    # duplicates (duplicates can happen after collapsing uv coordinates.)
    if False:
        for i, i1 in enumerate(proj.image_list):
            for j, i2 in enumerate(proj.image_list):
                if i >= j:
                    # don't repeat reciprocal matches
                    continue
                if len(i1.match_list[j]):
                    print("Showing %s vs %s" % (i1.name, i2.name))
                    status = m.showMatchOrient(i1, i2, i1.match_list[j])

    # Do we have a keypoint in i1 matching multiple keypoints in i2?
    #
    # Notes: again these shouldn't exist here, but let's check anyway.  If
    # we start finding these here, I should hunt for the reason earlier in
    # the code that lets some through, or try to understand what larger
    # logic principle allows somne of these to still exist here.
    print(
        "Testing for 1 vs. n keypoint duplicates (there never should be any):")
    for i, i1 in enumerate(tqdm(proj.image_list)):
        for key in i1.match_list:
            matches = i1.match_list[key]
            i2 = proj.findImageByName(key)
            if i2 is None:
                # skip pairs outside our area set
                continue
            count = 0
            kp_dict = {}
            for k, pair in enumerate(matches):
                if not pair[0] in kp_dict:
                    kp_dict[pair[0]] = pair[1]
                else:
                    print("Warning keypoint idx", pair[0],
                          "already used in another match.")
                    uv2a = list(i2.kp_list[kp_dict[pair[0]]].pt)
                    uv2b = list(i2.kp_list[pair[1]].pt)
                    if not np.allclose(uv2, new_uv2):
                        print("  [%.2f, %.2f] -> [%.2f, %.2f]" %
                              (uv2a[0], uv2a[1], uv2b[0], uv2b[1]))
                    count += 1
            if count > 0:
                print('Match:', i, 'vs', j, 'matches:', len(matches), 'dups:',
                      count)

    print("Constructing unified match structure:")
    # create an initial pair-wise match list
    matches_direct = []
    for i, img in enumerate(tqdm(proj.image_list)):
        # print img.name
        for key in img.match_list:
            j = proj.findIndexByName(key)
            if j is None:
                continue
            matches = img.match_list[key]
            # print proj.image_list[j].name
            if j > i:
                for pair in matches:
                    # ned place holder, in use flag
                    match = [None, -1]
                    # camera/feature references
                    match.append([i, pair[0]])
                    match.append([j, pair[1]])
                    matches_direct.append(match)
                    # print pair, match

    sum = 0.0
    for match in matches_direct:
        sum += len(match[2:])

    if len(matches_direct):
        print("Total image pairs in image set:", len(matches_direct))
        print("Keypoint average instances = %.1f (should be 2.0 here)" %
              (sum / len(matches_direct)))

    # Note to self: I don't think we need the matches_direct file any more
    # (except for debugging possibly in the future.)
    #
    #print("Writing matches_direct file ...")
    #direct_file = os.path.join(proj.analysis_dir, "matches_direct")
    #pickle.dump(matches_direct, open(direct_file, "wb"))

    # collect/group match chains that refer to the same keypoint

    print("Linking common matches together into chains:")
    count = 0
    done = False
    while not done:
        print("Iteration %d:" % count)
        count += 1
        matches_new = []
        matches_lookup = {}
        for i, match in enumerate(tqdm(matches_direct)):
            # scan if any of these match points have been previously seen
            # and record the match index
            index = -1
            for p in match[2:]:
                key = "%d-%d" % (p[0], p[1])
                if key in matches_lookup:
                    index = matches_lookup[key]
                    break
            if index < 0:
                # not found, append to the new list
                for p in match[2:]:
                    key = "%d-%d" % (p[0], p[1])
                    matches_lookup[key] = len(matches_new)
                matches_new.append(list(match))  # shallow copy
            else:
                # found a previous reference, append these match items
                existing = matches_new[index]
                for p in match[2:]:
                    key = "%d-%d" % (p[0], p[1])
                    found = False
                    for e in existing[2:]:
                        if p[0] == e[0]:
                            found = True
                            break
                    if not found:
                        # add
                        existing.append(list(p))  # shallow copy
                        matches_lookup[key] = index
                # no 3d location estimation yet
                # # attempt to combine location equitably
                # size1 = len(match[2:])
                # size2 = len(existing[2:])
                # ned1 = np.array(match[0])
                # ned2 = np.array(existing[0])
                # avg = (ned1 * size1 + ned2 * size2) / (size1 + size2)
                # existing[0] = avg.tolist()
                # # print(ned1, ned2, existing[0])
                # # print "new:", existing
                # # print
        if len(matches_new) == len(matches_direct):
            done = True
        else:
            matches_direct = list(matches_new)  # shallow copy

    # replace the keypoint index in the matches file with the actual kp
    # values.  This will save time later and avoid needing to load the
    # full original feature files which are quite large.  This also will
    # reduce the in-memory footprint for many steps.
    print('Replacing keypoint indices with uv coordinates:')
    for match in tqdm(matches_direct):
        for m in match[2:]:
            kp = proj.image_list[m[0]].kp_list[m[1]].pt
            m[1] = list(kp)
        # print(match)

    # sort by longest match chains first
    print("Sorting matches by longest chain first.")
    matches_direct.sort(key=len, reverse=True)

    sum = 0.0
    for i, match in enumerate(matches_direct):
        refs = len(match[2:])
        sum += refs

    if count >= 1:
        print("Total unique features in image set:", len(matches_direct))
        print("Keypoint average instances:",
              "%.2f" % (sum / len(matches_direct)))

    print("Writing full group chain matches_grouped file ...")
    pickle.dump(matches_direct,
                open(os.path.join(proj.analysis_dir, "matches_grouped"), "wb"))
示例#3
0
def match(project_dir, matching_options):

    matcher = matching_options[0]
    match_ratio = matching_options[1]
    min_pairs = matching_options[2]
    min_dist = matching_options[3]
    max_dist = matching_options[4]
    filters = matching_options[5]
    min_chain_length = matching_options[6]

    proj = ProjectMgr.ProjectMgr(project_dir)
    proj.load_images_info()
    proj.load_features(
        descriptors=False)  # descriptors cached on the fly later
    proj.undistort_keypoints()
    proj.load_match_pairs()

    matcher_node = getNode('/config/matcher', True)
    matcher_node.setString('matcher', matcher)
    matcher_node.setFloat('match_ratio', match_ratio)
    matcher_node.setString('filter', filters)
    matcher_node.setInt('min_pairs', min_pairs)
    matcher_node.setFloat('min_dist', min_dist)
    matcher_node.setFloat('max_dist', max_dist)
    matcher_node.setInt('min_chain_len', min_chain_length)

    # save any config changes
    proj.save()

    # camera calibration
    K = proj.cam.get_K()
    print("K:", K)

    # fire up the matcher
    m = Matcher.Matcher()
    m.configure()
    m.robustGroupMatches(proj.image_list, K, filter=filters, review=False)

    # The following code is deprecated ...
    do_old_match_consolodation = False
    if do_old_match_consolodation:
        # build a list of all 'unique' keypoints.  Include an index to each
        # containing image and feature.
        matches_dict = {}
        for i, i1 in enumerate(proj.image_list):
            for j, matches in enumerate(i1.match_list):
                if j > i:
                    for pair in matches:
                        key = "%d-%d" % (i, pair[0])
                        m1 = [i, pair[0]]
                        m2 = [j, pair[1]]
                        if key in matches_dict:
                            feature_dict = matches_dict[key]
                            feature_dict['pts'].append(m2)
                        else:
                            feature_dict = {}
                            feature_dict['pts'] = [m1, m2]
                            matches_dict[key] = feature_dict
        #print match_dict
        count = 0.0
        sum = 0.0
        for key in matches_dict:
            sum += len(matches_dict[key]['pts'])
            count += 1
        if count > 0.1:
            print("total unique features in image set = %d" % count)
            print("kp average instances = %.4f" % (sum / count))

        # compute an initial guess at the 3d location of each unique feature
        # by averaging the locations of each projection
        for key in matches_dict:
            feature_dict = matches_dict[key]
            sum = np.array([0.0, 0.0, 0.0])
            for p in feature_dict['pts']:
                sum += proj.image_list[p[0]].coord_list[p[1]]
            ned = sum / len(feature_dict['pts'])
            feature_dict['ned'] = ned.tolist()

    def update_match_location(match):
        sum = np.array([0.0, 0.0, 0.0])
        for p in match[1:]:
            # print proj.image_list[ p[0] ].coord_list[ p[1] ]
            sum += proj.image_list[p[0]].coord_list[p[1]]
            ned = sum / len(match[1:])
            # print "avg =", ned
            match[0] = ned.tolist()
        return match

    if False:
        print("Constructing unified match structure...")
        print(
            "This probably will fail because we didn't do the ground intersection at the start..."
        )
        matches_direct = []
        for i, image in enumerate(proj.image_list):
            # print image.name
            for j, matches in enumerate(image.match_list):
                # print proj.image_list[j].name
                if j > i:
                    for pair in matches:
                        match = []
                        # ned place holder
                        match.append([0.0, 0.0, 0.0])
                        match.append([i, pair[0]])
                        match.append([j, pair[1]])
                        update_match_location(match)
                        matches_direct.append(match)
                        # print pair, match

        print("Writing match file ...")
        pickle.dump(matches_direct, open(project_dir + "/matches_direct",
                                         "wb"))
示例#4
0
parser = argparse.ArgumentParser(description='Set the initial camera poses.')
parser.add_argument('--project', required=True, help='project directory')
parser.add_argument(
    '--stddev',
    type=float,
    default=5,
    help='how many stddevs above the mean for auto discarding features')

args = parser.parse_args()

proj = ProjectMgr.ProjectMgr(args.project)
proj.load_images_info()
proj.load_features()
proj.undistort_keypoints()

matcher = Matcher.Matcher()

print("Loading match points (direct)...")
matches = pickle.load(open(os.path.join(args.project, "matches_direct"), "rb"))

print('num images:', len(proj.image_list))

# traverse the matches structure and create a pair-wise match
# structure.  (Start with an empty n x n list of empty pair lists,
# then fill in the structures.)
pairs = []
homography = []
averages = []
stddevs = []
status_flags = []
dsts = []
示例#5
0
def show_matches(project_dir, show_matches_option):

    orders = show_matches_option[0]
    orient = show_matches_option[1]
    image = show_matches_option[2]
    index = show_matches_option[3]
    direct = show_matches_option[4]
    sba = show_matches_option[5]

    proj = ProjectMgr.ProjectMgr(project_dir)
    proj.load_images_info()
    proj.load_features()
    if args.direct:
        # recreate the pair-wise match structure
        matches_list = pickle.load( open( os.path.join(project_dir, "matches_direct"), "rb" ) )
        for i1 in proj.image_list:
            i1.match_list = []
            for i2 in proj.image_list:
                i1.match_list.append([])
        for match in matches_list:
            for p1 in match[1:]:
                for p2 in match[1:]:
                    if p1 == p2:
                        pass
                    else:
                        i = p1[0]
                        j = p2[0]
                        image = proj.image_list[i]
                        image.match_list[j].append( [p1[1], p2[1]] )
        # for i in range(len(proj.image_list)):
        #     print(len(proj.image_list[i].match_list))
        #     print(proj.image_list[i].match_list)
        #     for j in range(len(proj.image_list)):
        #         print(i, j, len(proj.image_list[i].match_list[j]),
        #               proj.image_list[i].match_list[j])
    else:
        proj.load_match_pairs()

    # lookup ned reference
    ref_node = getNode("/config/ned_reference", True)
    ref = [ ref_node.getFloat('lat_deg'),
            ref_node.getFloat('lon_deg'),
            ref_node.getFloat('alt_m') ]

    m = Matcher.Matcher()

    order = 'fewest-matches'

    if image:
        i1 = proj.findImageByName(image)
        if i1 != None:
            for key in i1.match_list:
                print(key, len(i1.match_list[key]))
                if len(i1.match_list[key]):
                    i2 = proj.findImageByName(key)
                    print("Showing %s vs %s (%d matches)" % (i1.name, i2.name, len(i1.match_list[key])))
                    status = m.showMatchOrient(i1, i2, i1.match_list[key],
                                            orient=orient)
        else:
            print("Cannot locate:", image)
    elif index:
        i1 = proj.image_list[index]
        if i1 != None:
            for j, i2 in enumerate(proj.image_list):
                if len(i1.match_list[j]):
                    print("Showing %s vs %s" % (i1.name, i2.name))
                    status = m.showMatchOrient(i1, i2, i1.match_list[j],
                                            orient=orient)
        else:
            print("Cannot locate:", index)
    elif order == 'sequential':
        for i, i1 in enumerate(proj.image_list):
            for j, i2 in enumerate(proj.image_list):
                if i >= j:
                    # don't repeat reciprocal matches
                    continue
                if i2.name in i1.match_list:
                    if len(i1.match_list[i2.name]):
                        print("Showing %s vs %s" % (i1.name, i2.name))
                        status = m.showMatchOrient(i1, i2, i1.match_list[i2.name],
                                                orient=orient)
    elif order == 'fewest-matches':
        match_list = []
        for i, i1 in enumerate(proj.image_list):
            for j, i2 in enumerate(proj.image_list):
                if i >= j:
                    # don't repeat reciprocal matches
                    continue
                if len(i1.match_list[j]):
                    match_list.append( ( len(i1.match_list[j]), i, j ) )
        match_list = sorted(match_list,
                            key=lambda fields: fields[0],
                            reverse=False)
        for match in match_list:
            count = match[0]
            i = match[1]
            j = match[2]
            i1 = proj.image_list[i]
            i2 = proj.image_list[j]
            print("Showing %s vs %s (matches=%d)" % (i1.name, i2.name, count))
            status = m.showMatchOrient(i1, i2, i1.match_list[j],
                                    orient=orient)