Пример #1
0
def SfM_gcp2xyz(GPS, RTK):
    from opensfm.dataset import DataSet
    RRF = DataSet(RTK).load_reference()
    a = 3 if os.path.isdir(GPS + '/matches') else 2
    if not os.path.isfile(GPS + '/reconstruction.json'):
        SfM_cmd(GPS, range(a, 5))
    GTM = DataSet(GPS).load_tracks_manager()
    res = []
    GRC = GPS + '/reconstruction.topocentric.json'
    if not os.path.isfile(GRC): GRC = GRC[:-16] + 'json'
    with open(GRC) as f:
        GRC = json.load(f)[0]['points']
    GIM = GPS if os.path.isdir(GPS + '/images') else GPS + '/..'
    with open(GPS + '/gcp_list.txt') as f:
        gcp = f.readlines()
    #dtp = [(k,float) for k in ('lon','lat','alt','x','y')]
    for v in gcp[1:]:  # skip 1st-row
        *v, im = v.split()
        v = np.float64(v)
        x = []
        h, w, c = cv2.imread(f'{GIM}/images/{im}').shape
        denm = lambda x: (x * max(w, h) * 2 + (w, h) - 1) / 2
        for tid, ob in GTM.get_shot_observations(im).items():
            d = np.linalg.norm(denm(ob.point) - v[3:5])
            if d < 1: x.append([d, tid])  # pixel
        d, tid = min(x) if len(x) else [np.inf, '']
        if tid not in GRC: INFO(f'skip {tid}: {[*v,im]}')
        else: res += [(*GRC[tid]['coordinates'], *v[[1, 0, 2]])]
    v = np.array(res).T
    v[3:] = RRF.to_topocentric(*v[3:])
    return v.T
Пример #2
0
def SfM_reconstruct(src): # incremental_reconstruction
    from opensfm.dataset import DataSet
    from opensfm.reconstruction import (tracking, compute_image_pairs,
        bootstrap_reconstruction, grow_reconstruction)

    data = DataSet(src); result = []
    gcp = data.load_ground_control_points()
    tracks = data.load_tracks_manager()
    imgs = tracks.get_shot_ids()
    if not data.reference_lla_exists():
        data.invent_reference_lla(imgs)

    camera_priors = data.load_camera_models()
    common_tracks = tracking.all_common_tracks(tracks)
    pairs = compute_image_pairs(common_tracks, camera_priors, data)
    imgs = set(imgs); report = {'candidate_image_pairs': len(pairs)}
    for im1, im2 in pairs:
        if im1 in imgs and im2 in imgs:
            report[im1+' & '+im2] = log = {}
            v, p1, p2 = common_tracks[im1, im2]
            rec, log['bootstrap'] = bootstrap_reconstruction(
                data, tracks, camera_priors, im1, im2, p1, p2)
            if rec:
                imgs.remove(im1); imgs.remove(im2)
                rec, log['grow'] = grow_reconstruction(
                    data, tracks, rec, imgs, camera_priors, gcp)
                result.append(rec)
    result = sorted(result, key=lambda x: -len(x.shots))
    data.save_reconstruction(result)
    report['not_reconstructed_images'] = list(imgs)
    with open(f'{src}/reports/reconstruction.json','w') as f:
        json.dump(report, f, indent=4)
Пример #3
0
 def run(self, args):
     start = timer()
     data = DataSet(args.dataset)
     self.run_impl(data, args)
     end = timer()
     with open(data.profile_log(), 'a') as fout:
         fout.write(type(self).name + ': {0}\n'.format(end - start))
Пример #4
0
def SfM_export_pos(src, dop=0.1, tsv='image_geocoords.tsv'):
    rename_rec(src)
    dif = []
    gjs = {}  # for sfm
    SfM_cmd(src, f'export_geocoords --image-positions --proj="{LLA}"')
    rename_rec(src)
    geo = load_sort_save(f'{src}/{tsv}', a=1)  # sort
    with open(src + '/geo.txt', 'w') as f:
        f.writelines([LLA + '\n'] + geo[1:])

    from opensfm.dataset import DataSet
    data = DataSet(src)
    ref = data.load_reference()
    for v in geo[1:]:  # skip 1st-row
        im, *v = v.split()
        v = np.float64(v)[[1, 0, 2]]
        o = [*data.load_exif(im)['gps'].values()][:3]  # lat,lon,alt
        ov = ref.to_topocentric(*v) - np.array(ref.to_topocentric(*o))
        gjs[im] = {
            'gps': dict(latitude=v[0], longitude=v[1], altitude=v[2], dop=dop)
        }
        dif += [f'{im} lla={v} exif={o}\tdif={ov.tolist()}\n']
    with open(f'{src}/{tsv[:-4]}.dif.txt', 'w') as f:
        f.writelines(dif)
    with open(src + '/geo.json', 'w') as f:
        json.dump(gjs, f, indent=4)
Пример #5
0
    def ground_control_points(self, proj4):
        """
        Load ground control point information.
        """
        gcp_stats_file = self.path("stats", "ground_control_points.json")

        if not io.file_exists(gcp_stats_file):
            return []

        gcps_stats = {}
        try:
            with open(gcp_stats_file) as f:
                gcps_stats = json.loads(f.read())
        except:
            log.ODM_INFO("Cannot parse %s" % gcp_stats_file)

        if not gcps_stats:
            return []

        ds = DataSet(self.opensfm_project_path)
        reference = ds.load_reference()
        projection = pyproj.Proj(proj4)

        result = []
        for gcp in gcps_stats:
            geocoords = _transform(gcp['coordinates'], reference, projection)
            result.append({
                'id': gcp['id'],
                'observations': gcp['observations'],
                'coordinates': geocoords,
                'error': gcp['error']
            })

        return result
Пример #6
0
    def create_submodels(self, clusters):
        data = DataSet(self.data_path)
        for i, cluster in enumerate(clusters):
            # create sub model dirs
            submodel_path = self._submodel_path(i)
            submodel_images_path = self._submodel_images_path(i)
            io.mkdir_p(submodel_path)
            io.mkdir_p(submodel_images_path)

            # create image list file
            image_list_path = os.path.join(submodel_path, 'image_list.txt')
            with io.open_wt(image_list_path) as txtfile:
                for image in cluster:
                    src = data.image_files[image]
                    dst = os.path.join(submodel_images_path, image)
                    src_relpath = os.path.relpath(src, submodel_images_path)
                    if not os.path.isfile(dst):
                        os.symlink(src_relpath, dst)
                    dst_relpath = os.path.relpath(dst, submodel_path)
                    txtfile.write(dst_relpath + "\n")

            # copy config.yaml if exists
            config_file_path = os.path.join(self.data_path, 'config.yaml')
            if os.path.exists(config_file_path):
                shutil.copyfile(config_file_path,
                                os.path.join(submodel_path, 'config.yaml'))

            # create symlinks to additional files
            filenames = [
                'camera_models.json', 'reference_lla.json', 'exif', 'features',
                'matches', 'masks', 'mask_list.txt', 'segmentations'
            ]
            for filename in filenames:
                self._create_symlink(submodel_path, filename)
Пример #7
0
def create_default_dataset_context(
    dataset_path: str, dataset_type: str = ""
) -> Generator[DataSet, None, None]:
    dataset = DataSet(dataset_path)
    try:
        yield dataset
    finally:
        dataset.clean_up()
Пример #8
0
    def convert_and_undistort(self, rerun=False, imageFilter=None):
        log.ODM_INFO("Undistorting %s ..." % self.opensfm_project_path)
        undistorted_images_path = self.path("undistorted", "images")

        if not io.dir_exists(undistorted_images_path) or rerun:
            undistort.run_dataset(DataSet(self.opensfm_project_path), "reconstruction.json", 
                                  0, None, "undistorted", imageFilter)
        else:
            log.ODM_WARNING("Found an undistorted directory in %s" % undistorted_images_path)
Пример #9
0
def filter_reconstruct(src, thd=0.3):
    src = os.path.abspath(src)
    res = {}
    if os.path.isdir(src + '/opensfm'):  # for odm
        cam = src + '/cameras.json'
        src += '/opensfm'
        rec = src + '/reconstruction.topocentric.json'
    elif os.path.isfile(src + '/../cameras.json'):
        cam = src + '/../cameras.json'  # for odm
        rec = src + '/reconstruction.topocentric.json'
    elif os.path.isfile(src + '/camera_models.json'):
        cam = src + '/camera_models.json'  # for sfm
        rec = src + '/reconstruction.json'
    cam = Camera(cam)
    bak = rec[:-4] + 'bak'
    if os.path.isfile(bak):
        if os.path.isfile(rec): os.remove(rec)
        os.rename(bak, rec)  # for win
    with open(rec) as f:
        data = json.load(f)[0]
    os.rename(rec, bak)
    INFO(f'Filter: {rec}')

    from opensfm.dataset import DataSet
    T = DataSet(src).load_tracks_manager()
    for im in T.get_shot_ids():
        v = data['shots'][im]
        rotation = np.array(v['rotation'])
        translation = np.array(v['translation'])
        O, X, Y, Z = calc_axis_cam(translation, rotation)
        feat = load_feature(f'{src}/features/{im}', cam, 1)
        for tid, x in T.get_shot_observations(im).items():
            if not tid in data['points']: continue
            dp = data['points'][tid]['coordinates'] - O
            ddp = np.linalg.norm(dp)
            u, v = feat[x.id][:2]  # fid
            qt = u * X + v * Y + Z
            qt /= np.linalg.norm(qt)
            delta = np.dot(dp, qt)
            dis = np.sqrt(ddp**2 - delta**2)
            if tid not in res: res[tid] = dis
            elif dis > res[tid]: res[tid] = dis  # meters
            #print(f'{im} %6s %6s %.3f'%(tid,x.id,dis))
    dis = [*res.values()]
    md = np.mean(dis)
    thd = min(thd, md)
    out = {k: v
           for k, v in res.items() if v > thd}
    #print(out)
    #plt.hist(dis, [0.01,0.05,0.1,0.5,1,2]); plt.show()
    for tid in out:
        data['points'].pop(tid)
    with open(rec, 'w') as f:
        json.dump([data], f, indent=4)
    INFO('Out=%d/%d, Thd=%.3f, Max=%.3f' % (len(out), len(res), thd, max(dis)))
Пример #10
0
def SfM_parse_track(src):
    from opensfm.dataset import DataSet
    #from opensfm.pymap import Observation
    TM = DataSet(src).load_tracks_manager()
    T = {}
    for im in TM.get_shot_ids():
        T.setdefault(im, [[], [], [], []])  # tid=str
        for tid, v in TM.get_shot_observations(im).items():
            T[im][0] += [tid]
            T[im][1] += [v.id]  # xys
            T[im][2] += [np.hstack([v.point, v.scale])]
            T[im][3] += [v.color]  # RGB
    return T  # {im: [tid,fid,xys,RGB]}
Пример #11
0
def SfM_match(src, pre, mix=0):  # match_features
    from opensfm.actions.match_features import timer, matching, write_report
    from opensfm.dataset import DataSet
    data = DataSet(src)
    t = timer()
    INFO(f'{SfM_DIR}/bin/opensfm match_features: {src}')
    GPS, RTK = [], []
    if os.path.isdir(pre):
        merge_dir(pre + '/exif', src + '/exif')
        merge_dir(pre + '/features', src + '/features')
        merge_json(pre, src, 'camera_models.json')
        #merge_json(pre, src, 'reports/features.json')
        #merge_dir(pre+'/reports/features', src+'/reports/features')
        GPS, RTK = data.images(), DataSet(pre).images()
    else:  # split data->(GPS,RTK)
        for i in data.images():
            (RTK if i.startswith(pre) else GPS).append(i)
    if mix in (1, 3): GPS += RTK  # 1: match (GPS+RTK, RTK)
    if mix in (2, 3): RTK += GPS  # 2: match (GPS, RTK+GPS)
    pairs, preport = matching.match_images(data, {}, GPS, RTK)
    matching.save_matches(data, GPS, pairs)
    write_report(data, preport, list(pairs.keys()), timer() - t)
Пример #12
0
    def check_merge_partial_reconstructions(self):
        if self.reconstructed():
            data = DataSet(self.opensfm_project_path)
            reconstructions = data.load_reconstruction()
            tracks_manager = data.load_tracks_manager()

            if len(reconstructions) > 1:
                log.ODM_WARNING(
                    "Multiple reconstructions detected (%s), this might be an indicator that some areas did not have sufficient overlap"
                    % len(reconstructions))
                log.ODM_INFO("Attempting merge")

                merged = Reconstruction()
                merged.set_reference(reconstructions[0].reference)

                for ix_r, rec in enumerate(reconstructions):
                    if merged.reference != rec.reference:
                        # Should never happen
                        continue

                    log.ODM_INFO("Merging reconstruction %s" % ix_r)

                    for camera in rec.cameras.values():
                        merged.add_camera(camera)

                    for point in rec.points.values():
                        try:
                            new_point = merged.create_point(
                                point.id, point.coordinates)
                            new_point.color = point.color
                        except RuntimeError as e:
                            log.ODM_WARNING("Cannot merge shot id %s (%s)" %
                                            (shot.id, str(e)))
                            continue

                    for shot in rec.shots.values():
                        merged.add_shot(shot)
                        try:
                            obsdict = tracks_manager.get_shot_observations(
                                shot.id)
                        except RuntimeError:
                            log.ODM_WARNING(
                                "Shot id %s missing from tracks_manager!" %
                                shot.id)
                            continue
                        for track_id, obs in obsdict.items():
                            if track_id in merged.points:
                                merged.add_observation(shot.id, track_id, obs)

                data.save_reconstruction([merged])
Пример #13
0
    def create_submodels(self, clusters):
        data = DataSet(self.data_path)
        for i, cluster in enumerate(clusters):
            # create sub model dirs
            submodel_path = self._submodel_path(i)
            submodel_images_path = self._submodel_images_path(i)
            io.mkdir_p(submodel_path)
            io.mkdir_p(submodel_images_path)

            # create image list file
            image_list_path = os.path.join(submodel_path, "image_list.txt")
            with io.open_wt(image_list_path) as txtfile:
                for image in cluster:
                    src = data.image_files[image]
                    dst = os.path.join(submodel_images_path, image)
                    if not os.path.isfile(dst):
                        if sys.platform == 'win32':
                            os.link(src, dst)
                        else:
                            os.symlink(os.path.relpath(src, submodel_images_path), dst)
                    dst_relpath = os.path.relpath(dst, submodel_path)
                    txtfile.write(dst_relpath + "\n")

            # copy config.yaml if exists
            config_file_path = os.path.join(self.data_path, "config.yaml")
            if os.path.exists(config_file_path):
                shutil.copyfile(
                    config_file_path, os.path.join(submodel_path, "config.yaml")
                )

            # Create reports folder
            io.mkdir_p(os.path.join(submodel_path, "reports"))

            # create symlinks to additional files
            filepaths = [
                "camera_models.json",
                "reference_lla.json",
                "exif",
                "features",
                "matches",
                "masks",
                "mask_list.txt",
                "segmentations",
                os.path.join("reports", "features"),
                os.path.join("reports", "features.json"),
                os.path.join("reports", "matches.json"),
            ]
            for filepath in filepaths:
                self._create_symlink(submodel_path, filepath)
Пример #14
0
    def export_report(self, report_path, odm_stats, rerun=False):
        log.ODM_INFO("Exporting report to %s" % report_path)

        osfm_report_path = self.path("stats", "report.pdf")
        if not os.path.exists(report_path) or rerun:
            data = DataSet(self.opensfm_project_path)
            pdf_report = report.Report(data, odm_stats)
            pdf_report.generate_report()
            pdf_report.save_report("report.pdf")

            if os.path.exists(osfm_report_path):
                shutil.move(osfm_report_path, report_path)
            else:
                log.ODM_WARNING("Report could not be generated")
        else:
            log.ODM_WARNING("Report %s already exported" % report_path)
Пример #15
0
    def convert_and_undistort(self, rerun=False, imageFilter=None, image_list=None, runId="nominal"):
        log.ODM_INFO("Undistorting %s ..." % self.opensfm_project_path)
        done_flag_file = self.path("undistorted", "%s_done.txt" % runId)

        if not io.file_exists(done_flag_file) or rerun:
            ds = DataSet(self.opensfm_project_path)

            if image_list is not None:
                ds._set_image_list(image_list)

            undistort.run_dataset(ds, "reconstruction.json", 
                                  0, None, "undistorted", imageFilter)
            
            self.touch(done_flag_file)
        else:
            log.ODM_WARNING("Already undistorted (%s)" % runId)
Пример #16
0
def SfM_gcp_gz(GPS, RTK='', thd=1, dst=0):
    from opensfm.dataset import DataSet
    if not os.path.isdir(RTK): RTK = GPS
    R = RTK + '/reconstruction.topocentric.json'
    if not os.path.isfile(R): R = R[:-16] + 'json'
    with open(R) as f:
        R = json.load(f)[0]['points']
    T = SfM_parse_track(RTK)  # {im2:[tid2,fid2,xys2,rgb2]}
    ref = DataSet(RTK).load_reference()
    gcp = [LLA + '\n']

    for gz in os.scandir(GPS + '/matches_gcp'):
        im1 = gz.name[:-15]  # parse *_matches.pkl.gz
        with gzip.open(gz.path, 'rb') as f:
            gz = pickle.load(f)
        for im2, fid in gz.items():  # (im1,im2):[fid1,fid2]
            if len(fid) < 7:
                INFO(f'skip: {im1} {im2}')
                continue  # filter
            _, uv1 = SfM_feat_uv(im1, src=GPS, idx=fid[:, 0])  # norm->pixel
            _, uv2 = SfM_feat_uv(im2, src=RTK, idx=fid[:, 1])  # norm->pixel
            _, idx = filter_match(uv1, uv2, thd=0.5)
            fid = fid[idx]

            idx = IDX(T[im2][1], fid[:, 1])  # filter: track+fid2->fid2
            tid2, fid2, xys2, rgb2 = [np.array(k)[idx] for k in T[im2]]
            idx = IDX(tid2, list(R))  # filter: reconstruct+tid2->tid2
            INFO(f'gz_gcp: {im1} {im2} {len(uv1)}->{len(fid)}->{len(idx)}')
            if len(idx) < 1: continue  # skip ref.to_lla() when idx=[]
            tid2, fid2, xys2, rgb2 = tid2[idx], fid2[idx], xys2[idx], rgb2[idx]
            xyz2 = np.array([R[k]['coordinates'] for k in tid2])  # local xyz
            lla2 = np.array(ref.to_lla(*xyz2.T)).T  # xyz->lat,lon,alt

            idx = IDX(fid[:, 1], fid2)
            fid = fid[idx]
            _, uv1 = SfM_feat_uv(im1, src=GPS, idx=fid[:, 0])  # fid1
            _, uv2 = SfM_feat_uv(im2, src=RTK, pt=xys2)  # norm->pixel
            for pt, uv in zip(lla2, uv1):
                gcp += [GCF([*pt[[1, 0, 2]], *uv, im1])]
    with open(GPS + '/gcp_list.txt', 'w') as f:
        f.writelines(gcp)
    gcp = dedup_gcp(GPS)
    gcp = filter_gcp(GPS, RTK, thd=thd)
    INFO(f'Created {len(gcp)-1} GCPs: {GPS}/gcp_list.txt\n')
    cv2.destroyAllWindows()
    return gcp  # list
Пример #17
0
    def photos_to_metadata(self,
                           photos,
                           rolling_shutter,
                           rolling_shutter_readout,
                           rerun=False):
        metadata_dir = self.path("exif")

        if io.dir_exists(metadata_dir) and not rerun:
            log.ODM_WARNING(
                "%s already exists, not rerunning photo to metadata" %
                metadata_dir)
            return

        if io.dir_exists(metadata_dir):
            shutil.rmtree(metadata_dir)

        os.makedirs(metadata_dir, exist_ok=True)

        camera_models = {}
        data = DataSet(self.opensfm_project_path)

        for p in photos:
            d = p.to_opensfm_exif(rolling_shutter, rolling_shutter_readout)
            with open(os.path.join(metadata_dir, "%s.exif" % p.filename),
                      'w') as f:
                f.write(json.dumps(d, indent=4))

            camera_id = p.camera_id()
            if camera_id not in camera_models:
                camera = exif.camera_from_exif_metadata(d, data)
                camera_models[camera_id] = camera

        # Override any camera specified in the camera models overrides file.
        if data.camera_models_overrides_exists():
            overrides = data.load_camera_models_overrides()
            if "all" in overrides:
                for key in camera_models:
                    camera_models[key] = copy.copy(overrides["all"])
                    camera_models[key].id = key
            else:
                for key, value in overrides.items():
                    camera_models[key] = value
        data.save_camera_models(camera_models)
Пример #18
0
def check_gcp(gcp, cam, org=0, n=0):
    res = {}
    K = Camera(cam).K()
    if os.path.isdir(org):
        from opensfm.dataset import DataSet
        ref = DataSet(org).load_reference()
    with open(gcp) as f:
        data = f.readlines()[n:]
    for v in data:  # skip first n-rows
        v = v.split()
        im = v[-1]
        v = v[:5] + [np.inf] * 2
        if os.path.isdir(org):  # lat,lon.alt->xyz
            lon, lat, alt = [float(i) for i in v[:3]]
            v[:3] = ref.to_topocentric(lat, lon, alt)
        if im not in res: res[im] = [v]
        else: res[im].append(v)

    for k, v in res.items():
        v = res[k] = np.float64(v)
        if len(v) < 5: continue  # skip
        pt, uv = v[:, :3].copy(), v[:, 3:5]  # copy()->new mem-block
        _, Rvec, Tvec, Ins = cv2.solvePnPRansac(pt, uv, K, None)
        xy, Jacob = cv2.projectPoints(pt, Rvec, Tvec, K, None)
        err = v[:, 5] = np.linalg.norm(xy.squeeze() - uv, axis=1)

        his = np.histogram(err, bins=[*range(11), np.inf])[0]
        for c in range(len(his) - 1, 0, -1):  # len(v)=sum(his)
            if sum(his[c:]) >= len(v) * 0.2: break
        idx = np.where(err <= c)[0]
        #print(c, his)
        if len(idx) < 7: continue  # skip
        _, Rvec, Tvec = cv2.solvePnP(pt[idx], uv[idx], K, None)
        xy, Jacob = cv2.projectPoints(pt, Rvec, Tvec, K, None)
        v[:, -1] = np.linalg.norm(xy.squeeze() - uv, axis=1)  # err2

    out = os.path.abspath(gcp + '.err')
    print(out)
    with open(out, 'w') as f:
        for k, v in zip(data, np.vstack([*res.values()])):
            f.write(k[:-1] + '%11.3f%11.3f\n' % (*v[-2:], ))
    def create_submodels(self, clusters, no_symlinks=False):
        data = DataSet(self.data_path)
        for i, cluster in enumerate(clusters):
            # create sub model dirs
            submodel_path = self._submodel_path(i)
            submodel_images_path = self._submodel_images_path(i)
            io.mkdir_p(submodel_path)
            io.mkdir_p(submodel_images_path)

            # link images and create image list file
            image_list_path = os.path.join(submodel_path, 'image_list.txt')
            with open(image_list_path, 'w') as txtfile:
                for image in cluster:
                    src = data.image_files[image]
                    dst = os.path.join(submodel_images_path, image)
                    if not os.path.isfile(dst):
                        os.symlink(src, dst)
                    dst_relpath = os.path.relpath(dst, submodel_path)
                    txtfile.write(dst_relpath + "\n")

            # copy config.yaml if exists
            config_file_path = os.path.join(self.data_path, 'config.yaml')
            if os.path.exists(config_file_path):
                shutil.copyfile(config_file_path,
                                os.path.join(submodel_path, 'config.yaml'))

            if no_symlinks:
                reference_file_path = os.path.join(self.data_path,
                                                   'reference_lla.json')
                if os.path.exists(reference_file_path):
                    shutil.copyfile(
                        reference_file_path,
                        os.path.join(submodel_path, 'reference_lla.json'))
            else:
                # create symlinks to metadata files
                for symlink_path in [
                        'camera_models.json', 'reference_lla.json', 'exif',
                        'features', 'matches'
                ]:
                    self._create_symlink(submodel_path, symlink_path)
Пример #20
0
def default_dataset_type(dataset_path):
    return DataSet(dataset_path)
Пример #21
0
def create_default_dataset(dataset_path, dataset_type):
    return DataSet(dataset_path)
Пример #22
0
def filter_gcp(GPS, RTK, thd=1):  # reproject
    from odm_filter import Camera
    from opensfm.dataset import DataSet
    K = Camera(GPS + '/camera_models.json').K()
    ref = DataSet(RTK).load_reference()
    res = {}
    PM = 8 if hasattr(cv2, 'SOLVEPNP_SQPNP') else 1
    # cv2.SOLVEPNP_ITERATIVE=0: need n>=6 non-planar
    # cv2.SOLVEPNP_EPNP=1, cv2.SOLVEPNP_SQPNP=8: n>=4
    out, err = GPS + '/gcp_list.txt', GPS + '/gcp_err.txt'
    if os.path.isfile(out):
        with open(out) as f:
            gcp = f.readlines()
    elif os.path.isfile(err):
        with open(err) as f:
            gcp = f.readlines()
        for i, v in enumerate(gcp):
            v = v.split()
            x = np.float64(v[1:4])
            gcp[i] = GCF([*x, *v[4:6], v[0]])
        gcp.insert(0, LLA + '\n')
    for v in gcp[1:]:  # skip 1st-row
        *v, im = v.split()
        v = np.float64(v + 2 * [np.inf])
        res.setdefault(im, [])
        res[im].append(v)

    for im, v in res.items():
        P = 0 if len(v) > 5 else PM
        if len(v) < (4 if P > 0 else 6): continue
        v = res[im] = np.float64(v)
        uv = v[:, 3:5]  # lon,lat,alt
        pt = np.array(ref.to_topocentric(*v[:, :3].T[[1, 0, 2]])).T
        # for coplanar points; cv2.Rodrigues(Rv): RotVector->RotMatrix
        try:
            _, Rv, Tv, _ = cv2.solvePnPRansac(pt, uv, K, None, flags=P)
        except:
            _, Rv, Tv, _ = cv2.solvePnPRansac(pt, uv, K, None, flags=PM)
        # cv2.projectPoints: np.array/np.ascontiguousarray->mem-block
        xy, Jacob = cv2.projectPoints(pt, Rv, Tv, K, None)
        dis = v[:, 5] = np.linalg.norm(xy.squeeze() - uv, axis=1)

        his = np.histogram(dis, bins=[*range(11), np.inf])[0]
        for c in range(len(his) - 1, -1, -1):  # len(v)=sum(his)
            if sum(his[c:]) >= len(v) * 0.2: break
        idx = np.where(dis <= c)[0]
        P = 0 if len(idx) > 5 else PM
        if len(idx) < (4 if P > 0 else 6): continue  # for cv2.solvePnP
        try:
            _, Rv, Tv = cv2.solvePnP(pt[idx], uv[idx], K, None, flags=P)
        except:
            _, Rv, Tv = cv2.solvePnP(pt[idx], uv[idx], K, None, flags=PM)
        xy, Jacob = cv2.projectPoints(pt, Rv, Tv, K, None)
        v[:, 6] = np.linalg.norm(xy.squeeze() - uv, axis=1)  # dis2
    with open(GPS + '/gcp_err.txt', 'w') as f:  # save
        F = lambda x: ('%s' + 3 * ' %.15f' + 2 * ' %5d' + 2 * ' %9.3f' + '\n'
                       ) % x
        for k, v in res.items():
            f.writelines([F((k, *e)) for e in v])

    F = lambda x: np.where(
        x.max(axis=1) < np.inf, x.mean(axis=1), x.min(axis=1)
    )  # np.mean(v) if max(v)<np.inf else min(v)
    F = lambda x: np.where(x[:, 1] < np.inf, x[:, 1], x[:, 0])
    dis = F(np.vstack([*res.values()])[:, 5:]) < thd
    new = [gcp[0]] + [gcp[i] for i in np.where(dis)[0] + 1]
    with open(GPS + '/gcp_list.txt', 'w') as f:
        f.writelines(new)
    INFO(f'Filter_GCPs: {len(gcp)} -> {len(new)}')
    return new