예제 #1
0
    def solve_pnp_ransac(sm,
                         sce_kp_2d,
                         ref_kp_3d,
                         ransac_err=DEF_RANSAC_ERROR,
                         n_iter=RANSAC_ITERATIONS,
                         kernel=RANSAC_KERNEL):

        # assuming no lens distortion
        dist_coeffs = None
        cam_mx = sm.cam.intrinsic_camera_mx()
        ref_kp_3d = np.reshape(ref_kp_3d, (len(ref_kp_3d), 1, 3))
        sce_kp_2d = np.reshape(sce_kp_2d, (len(sce_kp_2d), 1, 2))

        retval, rvec, tvec, inliers = cv2.solvePnPRansac(
            ref_kp_3d,
            sce_kp_2d,
            cam_mx,
            dist_coeffs,
            iterationsCount=n_iter,
            reprojectionError=ransac_err,
            flags=kernel)

        if not retval:
            raise PositioningException('RANSAC algorithm returned False')
        if len(inliers) >= KeypointAlgo.MIN_FEATURES and np.linalg.norm(
                tvec) > sm.max_distance * 1.1:
            # BUG in solvePnPRansac: sometimes estimates object to be very far even though enough good inliers
            # happens with all kernels, reprojectionErrors and iterationsCounts
            raise PositioningException(
                'RANSAC estimated that asteroid at %s km' % (tvec.T, ))

        return rvec, tvec, inliers
예제 #2
0
    def match_features(desc1,
                       desc2,
                       norm,
                       mask=None,
                       method='brute',
                       symmetry_test=False,
                       ratio_test=True):

        if desc1 is None or desc2 is None or len(
                desc1) < KeypointAlgo.MIN_FEATURES or len(
                    desc2) < KeypointAlgo.MIN_FEATURES:
            raise PositioningException('Not enough features found')

        if method == 'flann':
            ss = norm != cv2.NORM_HAMMING

            FLANN_INDEX_LSH = 6  # for ORB, AKAZE
            FLANN_INDEX_KDTREE = 0  # for SIFT, SURF

            index_params = {
                'algorithm': FLANN_INDEX_KDTREE if ss else FLANN_INDEX_LSH,
                'table_number': 6,  # 12
                'key_size': 12,  # 20
                'multi_probe_level': 1,  # 2
            }
            if ss:
                index_params['trees'] = 5

            search_params = {
                'checks': 100,
            }

            matcher = cv2.FlannBasedMatcher(index_params, search_params)
        elif method == 'brute':
            matcher = cv2.BFMatcher(norm, symmetry_test)
        else:
            assert False, 'unknown method %s' % method

        if ratio_test:
            matches = matcher.knnMatch(np.array(desc1),
                                       np.array(desc2),
                                       2,
                                       mask=mask)
        else:
            matches = matcher.match(np.array(desc1),
                                    np.array(desc2),
                                    mask=mask)

        if len(matches) < KeypointAlgo.MIN_FEATURES:
            raise PositioningException('Not enough features matched')

        if ratio_test:
            # ratio test as per "Lowe's paper"
            matches = list(
                m[0] for m in matches if len(m) > 1 and
                m[0].distance < KeypointAlgo.LOWE_METHOD_COEF * m[1].distance)
            if len(matches) < KeypointAlgo.MIN_FEATURES:
                raise PositioningException('Too many features discarded')

        return matches
예제 #3
0
    def detect_asteroid(self, sce_img):
        ih, iw = sce_img.shape[0:2]
        
        # Threshold it so it becomes binary
        ret, bin_img = cv2.threshold(sce_img, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
        
        # connect close pixels by dilating and eroding
        kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
        bin_img = cv2.morphologyEx(bin_img, cv2.MORPH_CLOSE, kernel)
        
        # get connected regions and their stats
        n, regs, stats, c = cv2.connectedComponentsWithStats(bin_img, 8, cv2.CV_32S)
        
        # detect asteroid
        parts = list(s[cv2.CC_STAT_AREA] for i,s in enumerate(stats) if i>0)
        if not parts or stats[np.argmax(parts)+1][cv2.CC_STAT_AREA] < self.MIN_PIXELS_FOR_DETECTION:
            raise PositioningException('No asteroid found')
        
        # asteroid parts (i=0 is background)
        ast_parts = (
                i for i,s in enumerate(stats)
                if i>0 and s[cv2.CC_STAT_AREA]>=self.MIN_PIXELS_FOR_DETECTION
        )
        
        # check asteroid extent
        ast_parts = [
            (
                i, s[cv2.CC_STAT_AREA],
                s[cv2.CC_STAT_LEFT]/iw, (iw - (s[cv2.CC_STAT_LEFT]+s[cv2.CC_STAT_WIDTH]))/iw,
                s[cv2.CC_STAT_TOP]/ih, (ih - (s[cv2.CC_STAT_TOP]+s[cv2.CC_STAT_HEIGHT]))/ih
            )
            for i,s in enumerate(stats) if i>0 and s[cv2.CC_STAT_AREA]>=self.MIN_PIXELS_FOR_DETECTION]

        tot_area = sum((p[1] for p in ast_parts))/iw/ih
        t1, t2, lm, rm, tm, bm = np.min(ast_parts, axis=0)
        
        if DEBUG:
            print('Asteroid l,r,t,b margins: %.2f, %.2f, %.2f, %.2f'%(lm, rm, tm, bm), flush=True)
        
        if(False):
            lim = self.ASTEROID_MIN_BORDER_MARGIN
            if (lm < lim and rm < lim or tm < lim and bm < lim):
                raise PositioningException('Asteroid too close: margins (l,r,t,b): %.2f, %.2f, %.2f, %.2f'%(lm, rm, tm, bm))
        else:
            lim = self.ASTEROID_MAX_SPAN
            if (1-lm-rm > lim or 1-tm-bm > lim):
                raise PositioningException('Asteroid too close: span (w,h): %.2f, %.2f'%(1-lm-rm, 1-tm-bm))
        
        # set too small regions to zero in scene image (stars)
        for i in range(n):
            if i>0 and i not in [p[0] for p in ast_parts]:
                sce_img[regs==i] = 0
        
        return tot_area
예제 #4
0
    def _update_matched_features_inner(self, fdb, idxs1, idxs2):
        nf1 = fdb[4][idxs1[0], idxs1[1]]
        nf2 = fdb[4][idxs2[0], idxs2[1]]
        if idxs1 == idxs2 or nf1 == 0 or nf2 == 0:
            return

        sc1_desc = fdb[0][idxs1[0], idxs1[1], 0:nf1, :].reshape((nf1, fdb[0].shape[3]))
        sc1_kp_2d = fdb[1][idxs1[0], idxs1[1], 0:nf1, :].reshape((nf1, fdb[1].shape[3]))

        sc2_desc = fdb[0][idxs2[0], idxs2[1], 0:nf2, :].reshape((nf2, fdb[0].shape[3]))
        sc2_kp_3d = fdb[2][idxs2[0], idxs2[1], 0:nf2, :].reshape((nf2, fdb[2].shape[3]))

        try:
            matches = KeypointAlgo.match_features(sc1_desc, sc2_desc, self._latest_detector.defaultNorm(), method='brute')

            # solve pnp with ransac
            ref_kp_3d = sc2_kp_3d[[m.trainIdx for m in matches], :]
            sce_kp_2d = sc1_kp_2d[[m.queryIdx for m in matches], :]
            rvec, tvec, inliers = KeypointAlgo.solve_pnp_ransac(self.system_model, sce_kp_2d, ref_kp_3d, self._ransac_err)

            # check if solution ok
            ok, err1, err2 = self.calc_err(rvec, tvec, idxs1[0], idxs2[0], warn=len(inliers) > 30)
            if not ok:
                raise PositioningException()

            fdb[3][idxs1[0], idxs1[1], [matches[i[0]].queryIdx for i in inliers]] = True
            fdb[3][idxs2[0], idxs2[1], [matches[i[0]].trainIdx for i in inliers]] = True
        except PositioningException as e:
            # assert inliers is not None, 'at (%s, %s): ransac failed'%(idxs1, idxs2)
            pass
예제 #5
0
 def get_image_centroid(self, img, is_scene=False, binary=False):
     ih, iw, cs = img.shape
     m = cv2.moments(img[:,:,0], binaryImage=binary)
     
     if np.isclose(m['m00'],0):
         if is_scene:
             raise PositioningException('No asteroid found')
         else:
             raise PositioningException('Algorithm failure: model moved out of view')
     
     # image centroid
     icx = m['m10']/m['m00']/iw*self._cam.width
     icy = m['m01']/m['m00']/ih*self._cam.height
     brightness = m['m00']/iw/ih*self._cam.width*self._cam.height
     
     # pixel spreads
     # used to model dimensions of asteroid parts visible in image
     hr = math.sqrt(m['mu20']/m['m00']) if m['mu20']>0 else 1
     vr = math.sqrt(m['mu02']/m['m00']) if m['mu02']>0 else 1
     
     return icx, icy, brightness, hr, vr
예제 #6
0
    def adjust_iteratively(self, sce_img, outfile=None, **kwargs):
        self.debug_filebase = outfile
        self._bg_threshold = kwargs.get('bg_threshold', self._bg_threshold)
        sce_img = self.maybe_load_scene_image(sce_img)

        if DEBUG:
            cv2.imshow('target img', sce_img)

        self.system_model.spacecraft_pos = (0, 0, -self.system_model.min_med_distance)
        for i in range(self.MAX_ITERATIONS):
            ox, oy, oz = self.system_model.spacecraft_pos
            od = math.sqrt(ox**2 + oy**2 + oz**2)
            
            if not DEBUG:
                self.adjust(sce_img)
            else:
                try:
                    self.adjust(sce_img)
                except PositioningException as e:
                    print(str(e))
                    break
                finally:
                    cv2.imshow('rendered img', self._ref_img)
                    cv2.waitKey()
            
            nx, ny, nz = self.system_model.spacecraft_pos
            ch = math.sqrt((nx-ox)**2 + (ny-oy)**2 + (nz-oz)**2)
            if DEBUG:
                print('i%d: d0=%.2f, ch=%.2f, rel_ch=%.2f%%'%(i, od, ch, ch/od*100))
            if ch/od < self.ITERATION_TOL:
                break
        
        if self.CHECK_RESULT_VALIDITY:
            result_quality = ImageProc.norm_xcorr(sce_img, self._ref_img)
            if result_quality < self.MIN_RESULT_XCORR:
                raise PositioningException('Result failed quality test with score: %.3f'%(result_quality,))
        
        if BATCH_MODE and self.debug_filebase:
            img = self.render(shadows=self.RENDER_SHADOWS)
            cv2.imwrite(self.debug_filebase+'r.png', img)

        if DEBUG:
            cv2.waitKey()
            cv2.destroyAllWindows()
예제 #7
0
    def optfun(self, *args):
        if any(map(math.isnan, args)):
            raise PositioningException('Position resulted in a NAN: %s'(sc_v,))
        
        # disable update events fired when parameters are changed
        #self.system_model.param_change_events(False)
        
        for (n, p), v in zip(self.system_model.get_params(), args):
            p.nvalue = v
            
        self.iter_count += 1
        err = self.errfun()

        # log result
        self.extra_values.append(err)
        
        # enable events again
        #self.system_model.param_change_events(True)
        #QCoreApplication.processEvents()
        return err
예제 #8
0
 def adjust(self, sce_img, ref_img=None, simple=False):
     sce_img = self.maybe_load_scene_image(sce_img)
     
     if not simple:
         if ref_img is None:
             ref_img = self.render(shadows=self.RENDER_SHADOWS)
             th, ref_img = cv2.threshold(ref_img, self._bg_threshold, 255, cv2.THRESH_TOZERO)
             ref_img = np.atleast_3d(ref_img)
             self._ref_img = ref_img
             
         sc_v = self.match_brightness_centroids(sce_img, ref_img)
     else:
         sc_v = self.simple_adjusted_centroid(sce_img)
     
     if not BATCH_MODE and DEBUG:
         print('\nreal pos:\n%s\nest pos:\n%s\n' % (
             self.system_model.real_spacecraft_pos, sc_v))
     
     if any(map(math.isnan, sc_v)):
         raise PositioningException('Position resulted in a NAN: %s'(sc_v,))
     
     self.system_model.spacecraft_pos = sc_v
예제 #9
0
    def solve_pnp(self,
                  orig_sce_img,
                  outfile,
                  feat=ORB,
                  use_feature_db=False,
                  adjust_sc_rot=False,
                  add_noise=False,
                  scale_cam_img=False,
                  vary_scale=False,
                  match_mask_params=None,
                  processing=True,
                  win=35,
                  sigma_col=5,
                  sigma_space=10,
                  sigma_coeff=100,
                  **kwargs):

        # set max mem usable by reference features, scene features use rest of MAX_WORK_MEM
        ref_max_mem = KeypointAlgo.FDB_MAX_MEM if use_feature_db else KeypointAlgo.MAX_WORK_MEM / 2
        sm = self.system_model
        self._ransac_err = KeypointAlgo.DEF_RANSAC_ERROR
        self._render_z = -sm.min_med_distance
        init_z = kwargs.get('init_z', self._render_z)
        ref_img_sc = min(
            1, self._render_z / init_z) * (sm.view_width if scale_cam_img else
                                           self._cam.width) / sm.view_width
        self.extra_values = None

        if outfile is not None:
            self.debug_filebase = outfile + (self.DEBUG_IMG_POSTFIX
                                             if isinstance(orig_sce_img, str)
                                             else '')

        if self.est_real_ast_orient:
            # so that can track rotation of 67P
            sm.reset_to_real_vals()

        if use_feature_db and self._fdb_helper is None:
            from algo.fdbgen import FeatureDatabaseGenerator
            self._fdb_helper = FeatureDatabaseGenerator(
                self.system_model, self.render_engine, self.obj_idx)

        # maybe load scene image
        if isinstance(orig_sce_img, str):
            orig_sce_img = self.load_target_image(orig_sce_img)

        if add_noise:
            self._shape_model_rng = np.max(
                np.ptp(sm.asteroid.real_shape_model.vertices, axis=0))

        self.timer = Stopwatch()
        self.timer.start()

        if use_feature_db:
            if KeypointAlgo.FDB_REAL:
                # find correct set of keypoints & descriptors from features db
                ref_desc, ref_kp_3d, ref_kp, ref_img = self._query_fdb(feat)
            else:
                # calculate on-the-fly exactly the same features that would be returned from a feature db
                ref_desc, ref_kp_3d, ref_kp, ref_img = self._fake_fdb(feat)
        else:
            # render model image
            ref_img, depth_result = self.render_ref_img(ref_img_sc)

            if False:
                # normalize ref_img to match sce_img
                ref_img = ImageProc.equalize_brightness(ref_img,
                                                        orig_sce_img,
                                                        percentile=99.999,
                                                        image_gamma=1.8)
            if processing:
                #print("\033[0;32mINFO\033[00m: Bilateral Filter Applied")
                ref_img = ImageProc.bilateral_filtering(
                    ref_img, win, sigma_col, sigma_space, sigma_coeff)
                orig_sce_img = ImageProc.bilateral_filtering(
                    orig_sce_img, win, sigma_col, sigma_space, sigma_coeff)
            if False:
                gamma = 1.0 / 1.8
                ref_img = ImageProc.adjust_gamma(ref_img, gamma)
                orig_sce_img = ImageProc.adjust_gamma(orig_sce_img, gamma)

            # get keypoints and descriptors
            ee = sm.pixel_extent(abs(self._render_z))
            ref_kp, ref_desc, self._latest_detector = KeypointAlgo.detect_features(
                ref_img,
                feat,
                maxmem=ref_max_mem,
                max_feats=KeypointAlgo.MAX_FEATURES,
                for_ref=True,
                expected_pixel_extent=ee)

        if BATCH_MODE and self.debug_filebase:
            # save start situation in log archive
            self.timer.stop()
            img1 = cv2.resize(orig_sce_img, (sm.view_width, sm.view_height))
            img2 = cv2.resize(ref_img, (sm.view_width, sm.view_height))
            cv2.imwrite(self.debug_filebase + 'a.png',
                        np.concatenate((img1, img2), axis=1))
            if DEBUG:
                cv2.imshow('compare', np.concatenate((img1, img2), axis=1))
            self.timer.start()

        # AKAZE, SIFT, SURF are truly scale invariant, couldnt get ORB to work as good
        vary_scale = vary_scale if feat == self.ORB else False

        if len(ref_kp) < KeypointAlgo.MIN_FEATURES:
            raise PositioningException(
                'Too few (%d) reference features found' % (len(ref_kp), ))

        ok = False
        for i in range(self.MAX_SCENE_SCALE_STEPS):
            try:
                # resize scene image if necessary
                sce_img_sc = (sm.view_width
                              if scale_cam_img else self._cam.width
                              ) / self._cam.width / self.SCENE_SCALE_STEP**i
                if np.isclose(sce_img_sc, 1):
                    sce_img = orig_sce_img
                else:
                    sce_img = cv2.resize(orig_sce_img,
                                         None,
                                         fx=sce_img_sc,
                                         fy=sce_img_sc,
                                         interpolation=cv2.INTER_CUBIC)

                # detect features in scene image
                sce_max_mem = KeypointAlgo.MAX_WORK_MEM - (
                    KeypointAlgo.BYTES_PER_FEATURE[feat] + 12) * len(ref_desc)
                ee = sm.pixel_extent(abs(match_mask_params[2])
                                     ) if match_mask_params is not None else 0
                sce_kp, sce_desc, self._latest_detector = KeypointAlgo.detect_features(
                    sce_img,
                    feat,
                    maxmem=sce_max_mem,
                    max_feats=KeypointAlgo.MAX_FEATURES,
                    expected_pixel_extent=ee)
                if len(sce_kp) < KeypointAlgo.MIN_FEATURES:
                    raise PositioningException(
                        'Too few (%d) scene features found' % (len(sce_kp), ))

                # match descriptors
                try:
                    mask = None
                    if match_mask_params is not None:
                        mask = KeypointAlgo.calc_match_mask(
                            sm, sce_kp, ref_kp, self._render_z, sce_img_sc,
                            ref_img_sc, *match_mask_params)
                    matches = KeypointAlgo.match_features(
                        sce_desc,
                        ref_desc,
                        self._latest_detector.defaultNorm(),
                        mask=mask,
                        method='brute')
                    error = None
                except PositioningException as e:
                    matches = []
                    error = e

                # debug by drawing matches
                if not BATCH_MODE or DEBUG:
                    print('matches: %s/%s' %
                          (len(matches), min(len(sce_kp), len(ref_kp))),
                          flush=True,
                          end=", ")
                self._draw_matches(sce_img,
                                   sce_img_sc,
                                   sce_kp,
                                   ref_img,
                                   ref_img_sc,
                                   ref_kp,
                                   matches,
                                   pause=False,
                                   show=DEBUG)

                if error is not None:
                    raise error

                # select matched scene feature image coordinates
                sce_kp_2d = np.array([
                    tuple(np.divide(sce_kp[m.queryIdx].pt, sce_img_sc))
                    for m in matches
                ],
                                     dtype='float')

                # prepare reference feature 3d coordinates (for only matched features)
                if use_feature_db:
                    ref_kp_3d = ref_kp_3d[[m.trainIdx for m in matches], :]
                    if add_noise:
                        # add noise to noiseless 3d ref points from fdb
                        self.timer.stop()
                        ref_kp_3d, self.sm_noise, _ = tools.points_with_noise(
                            ref_kp_3d,
                            only_z=True,
                            noise_lv=SHAPE_MODEL_NOISE_LV[add_noise],
                            max_rng=self._shape_model_rng)
                        self.timer.start()
                else:
                    # get feature 3d points using 3d model
                    ref_kp_3d = KeypointAlgo.inverse_project(
                        sm, [ref_kp[m.trainIdx].pt for m in matches],
                        depth_result, self._render_z, ref_img_sc)

                if KeypointAlgo.DISCARD_OFF_OBJECT_FEATURES:
                    I = np.where(np.logical_not(np.isnan(ref_kp_3d[:, 0])))[0]
                    if len(I) < self.MIN_FEATURES:
                        raise PositioningException('Too few matches found')
                    sce_kp_2d = sce_kp_2d[I, :]
                    ref_kp_3d = ref_kp_3d[I, :]
                    matches = [matches[i] for i in I]

                # finally solve pnp with ransac
                rvec, tvec, inliers = KeypointAlgo.solve_pnp_ransac(
                    sm, sce_kp_2d, ref_kp_3d, self._ransac_err)

                # debug by drawing inlier matches
                self._draw_matches(sce_img,
                                   sce_img_sc,
                                   sce_kp,
                                   ref_img,
                                   ref_img_sc,
                                   ref_kp, [matches[i[0]] for i in inliers],
                                   label='c) inliers',
                                   pause=self._pause)

                inlier_count = self.count_inliers(sce_kp, ref_kp, matches,
                                                  inliers)
                if DEBUG:
                    print('inliers: %s/%s, ' % (inlier_count, len(matches)),
                          end='',
                          flush=True)
                if inlier_count < KeypointAlgo.MIN_FEATURES:
                    raise PositioningException(
                        'RANSAC algorithm was left with too few inliers')

                # dont try again if found enough inliers
                ok = True
                break

            except PositioningException as e:
                if not vary_scale:
                    raise e
                # maybe try again using scaled down scene image

        if not ok:
            raise PositioningException(
                'Not enough inliers even if tried scaling scene image down x%.1f'
                % (1 / sce_img_sc))
        elif vary_scale:
            print('success at x%.1f' % (1 / sce_img_sc))

        self.timer.stop()

        # set model params to solved pose & pos
        self._set_sc_from_ast_rot_and_trans(rvec,
                                            tvec,
                                            self.latest_discretization_err_q,
                                            rotate_sc=adjust_sc_rot)

        # debugging
        if not BATCH_MODE or DEBUG:
            rp_err = KeypointAlgo.reprojection_error(self._cam, sce_kp_2d,
                                                     ref_kp_3d, inliers, rvec,
                                                     tvec)
            sh_err = sm.calc_shift_err()

            print(
                'repr-err: %.2f, rel-rot-err: %.2f°, dist-err: %.2f%%, lat-err: %.2f%%, shift-err: %.1fm'
                % (
                    rp_err,
                    math.degrees(sm.rel_rot_err()),
                    sm.dist_pos_err() * 100,
                    sm.lat_pos_err() * 100,
                    sh_err * 1000,
                ),
                flush=True)

        # save result image
        if BATCH_MODE and self.debug_filebase:
            # save result in log archive
            res_img = self.render(shadows=self.RENDER_SHADOWS)
            sce_img = cv2.resize(orig_sce_img, tuple(np.flipud(res_img.shape)))
            cv2.imwrite(self.debug_filebase + 'd.png',
                        np.concatenate((sce_img, res_img), axis=1))