# for image in proj.image_list: # if image.sum_count > 0: # image.z_avg = image.sum_values / float(image.sum_count) # print(image.name, 'avg elev:', image.z_avg) # else: # image.z_avg = 0 # compute the uv grid for each image and project each point out into # ned space, then intersect each vector with the srtm / ground / # delauney surface. name_list = [] for image in proj.image_list: name_list.append(image.name) #print(image.name, image.z_avg) width, height = camera.get_image_params() # scale the K matrix if we have scaled the images K = camera.get_K(optimized=True) IK = np.linalg.inv(K) grid_list = [] u_list = np.linspace(0, width, ac3d_steps + 1) v_list = np.linspace(0, height, ac3d_steps + 1) #print "u_list:", u_list #print "v_list:", v_list for v in v_list: for u in u_list: grid_list.append([u, v]) #print 'grid_list:', grid_list image.distorted_uv = proj.redistort(grid_list, optimized=True)
i1 = proj.image_list[i] i2 = proj.image_list[j] # print(i1.match_list) num_matches = len(i1.match_list[i2.name]) print("dist: %.1f" % dist, "yaw: %.1f" % yaw_diff, i1.name, i2.name, num_matches) print("rev matches:", len(i2.match_list[i1.name])) if num_matches >= 25: continue if not len(i1.kp_list) or not len(i1.des_list): i1.detect_features(args.scale) if not len(i2.kp_list) or not len(i2.des_list): i2.detect_features(args.scale) w, h = camera.get_image_params() diag = int(math.sqrt(h * h + w * w)) print("h:", h, "w:", w) print("scaled diag:", diag) rgb1 = i1.load_rgb() rgb2 = i2.load_rgb() FLANN_INDEX_KDTREE = 1 flann_params = {'algorithm': FLANN_INDEX_KDTREE, 'trees': 5} search_params = dict(checks=100) matcher = cv2.FlannBasedMatcher(flann_params, search_params) matches = matcher.knnMatch(i1.des_list, i2.des_list, k=2) print("Raw matches:", len(matches)) # numpy rotation matrix