예제 #1
0
        # horizontal edges
        for u in u_list:
            grid_list.append( [u, 0] )
            grid_list.append( [u, height] )
        # vertical edges (minus corners)
        for v in v_list[1:-1]:
            grid_list.append( [0, v] )
            grid_list.append( [width, v] )
        #print('grid_list:', grid_list)
        
        distorted_uv = proj.redistort(grid_list, optimized=True)
        distorted_uv = grid_list

        if args.direct:
            proj_list = project.projectVectors( IK, image.get_body2ned(),
                                                image.get_cam2body(),
                                                grid_list )
        else:
            #print(image.get_body2ned(opt=True))
            proj_list = project.projectVectors( IK,
                                                image.get_body2ned(opt=True),
                                                image.get_cam2body(),
                                                grid_list )
        #print 'proj_list:', proj_list

        if args.direct:
            ned, ypr, quat = image.get_camera_pose()
        else:
            ned, ypr, quat = image.get_camera_pose(opt=True)
        #print('cam orig:', image.camera_pose['ned'], 'optimized:', ned)
        if args.ground:
예제 #2
0
    yaw_diff = line[1]
    i1 = proj.image_list[ line[2] ]
    i2 = proj.image_list[ line[3] ]
    # print(i1.match_list)
    num_matches = len(i1.match_list[i2.name])
    print("dist: %.1f" % dist, "yaw: %.1f" % yaw_diff, i1.name, i2.name, num_matches)
    if num_matches > 0:
        continue

    # project a grid of uv coordinates from image 2 out onto the
    # supposed ground plane.  Then back project these 3d world points
    # into image 1 uv coordinates.  Compute an estimated 'ideal'
    # homography relationship between the two images as a starting
    # search point for feature matches.
    
    proj_list = project.projectVectors( IK, i2.get_body2ned(),
                                        i2.get_cam2body(), grid_list )
    ned2, ypr2, quat2 = i2.get_camera_pose()
    pts_ned = project.intersectVectorsWithGroundPlane(ned2, args.ground,
                                                      proj_list)
    rvec1, tvec1 = i1.get_proj()
    reproj_points, jac = cv2.projectPoints(np.array(pts_ned), rvec1, tvec1,
                                           K, dist_coeffs)
    reproj_list = reproj_points.reshape(-1,2).tolist()
    # print("reprojected points:", reproj_list)

    print("Should filter points outside of 2nd image space here and now!")

    # affine, status = \
    #     cv2.estimateAffinePartial2D(np.array([reproj_list]).astype(np.float32),
    #                                 np.array([grid_list]).astype(np.float32))
    # (rot, tx, ty, sx, sy) = decomposeAffine(affine)
예제 #3
0
        scale = 0.4
        print(srcname)
        pt_list = by_image[srcname]
        print(srcname, pt_list)

        # project marked points back to ned space
        base, ext = os.path.splitext(srcname)
        image = proj.findImageByName(base)
        if not image:
            continue
        print(srcname, image)

        distorted_uv = proj.redistort(pt_list, optimized=True)
        print("distorted:", distorted_uv)

        proj_list = project.projectVectors(IK, image.get_body2ned(opt=True),
                                           image.get_cam2body(), distorted_uv)
        print("proj_list:", proj_list)

        ned, ypr, quat = image.get_camera_pose(opt=True)

        # intersect with our polygon surface approximation
        pts_ned = intersect_vectors(ned, proj_list, -image.z_avg)
        print("pts_ned:", pts_ned)
        ned_list += pts_ned

        if True:
            fullpath = os.path.join(project_path, srcname)
            rgb = cv2.imread(fullpath,
                             flags=cv2.IMREAD_ANYCOLOR | cv2.IMREAD_ANYDEPTH
                             | cv2.IMREAD_IGNORE_ORIENTATION)
            for pt in pt_list:
srtm.initialize(ref, 2000, 2000, 30)

camw, camh = proj.cam.get_image_params()
dist_coeffs = proj.cam.get_dist_coeffs()
for image in proj.image_list:
    print image.name
    scale = float(image.width) / float(camw)
    K = proj.cam.get_K(scale)
    IK = np.linalg.inv(K)
    corner_list = []
    corner_list.append([0, 0])
    corner_list.append([image.width, 0])
    corner_list.append([0, image.height])
    corner_list.append([image.width, image.height])

    proj_list = project.projectVectors(IK, image, corner_list, pose=args.pose)
    #print "proj_list:\n", proj_list
    if args.pose == 'direct':
        pts_ned = srtm.interpolate_vectors(image.camera_pose, proj_list)
    elif args.pose == 'sba':
        pts_ned = srtm.interpolate_vectors(image.camera_pose_sba, proj_list)
    # print "pts (ned):\n", pts_ned

    image.corner_list_ned = []
    image.corner_list_lla = []
    image.corner_list_xy = []
    for ned in pts_ned:
        #print p
        image.corner_list_ned.append([ned[0], ned[1]])
        image.corner_list_lla.append(
            navpy.ned2lla([ned], ref[0], ref[1], ref[2]))
    #         del matches[i]
elif args.method == 'triangulate':
    for i, match in enumerate(matches):
        if match[1] == args.group:  # used in current group
            # print(match)
            points = []
            vectors = []
            for m in match[2:]:
                if proj.image_list[m[0]].name in group_list[args.group]:
                    # print(m)
                    image = proj.image_list[m[0]]
                    cam2body = image.get_cam2body()
                    body2ned = image.get_body2ned()
                    ned, ypr, quat = image.get_camera_pose(opt=True)
                    uv_list = [undistort(m[1])]  # just one uv element
                    vec_list = project.projectVectors(IK, body2ned, cam2body,
                                                      uv_list)
                    points.append(ned)
                    vectors.append(vec_list[0])
                    # print(' ', image.name)
                    # print(' ', uv_list)
                    # print('  ', vec_list)
            if len(points) >= 2:
                # print('points:', points)
                # print('vectors:', vectors)
                p = line_solver.ls_lines_intersection(points,
                                                      vectors,
                                                      transpose=True).tolist()
                # print('result:',  p, p[0])
                print(i, match[0], '>>>', end=" ")
                match[0] = [p[0][0], p[1][0], p[2][0]]
                if p[2][0] > 0: