def perterb_kpts(kpts, xy_std=None, invV_std=None, ori_std=None, damping=None, seed=None, **kwargs): """ Adds normally distributed pertibations to keypoints """ # TODO: Move to ktool # Get standard deviations of pertibations if xy_std is None: xy_std = ktool.get_xys(kpts).std(1) + mtool.eps if invV_std is None: invV_std = ktool.get_invVs(kpts).std(1) + mtool.eps if ori_std is None: ori_std = ktool.get_oris(kpts).std() + mtool.eps xy_std = np.array(xy_std, dtype=ktool.KPTS_DTYPE) invV_std = np.array(invV_std, dtype=ktool.KPTS_DTYPE) if damping is not None: xy_std /= damping invV_std /= damping ori_std /= damping if seed is not None: np.random.seed(seed) # Create normally distributed pertibations xy_aug = np.random.normal(0, scale=xy_std, size=(len(kpts), 2)).astype(ktool.KPTS_DTYPE) try: invV_aug = np.random.normal(0, scale=invV_std, size=(len(kpts), 3)).astype(ktool.KPTS_DTYPE) except ValueError as ex: ut.printex(ex, key_list=[(type, 'invV_std')]) raise ori_aug = np.random.normal(0, scale=ori_std, size=(len(kpts), 1)).astype(ktool.KPTS_DTYPE) # Augment keypoints aug = np.hstack((xy_aug, invV_aug, ori_aug)) kpts_ = kpts + aug # Ensure keypoint feasibility kpts_ = force_kpts_feasibility(kpts_) #print(ut.dict_str({key: type(val) if not isinstance(val, np.ndarray) else val.dtype for key, val in locals().items()})) #assert kpts_.dtype == ktool.KPTS_DTYPE, 'bad cast somewhere kpts_.dtype=%r' % (kpts_.dtype) return kpts_
def kp_info(kp): kpts = np.array([kp]) xy_str = ktool.get_xy_strs(kpts)[0] shape_str = ktool.get_shape_strs(kpts)[0] ori_ = ktool.get_oris(kpts)[0] ori_str = 'ori=%.2f' % ori_ scale = ktool.get_scales(kpts)[0] return xy_str, shape_str, scale, ori_str
def orientation_actors(kpts, H=None): """ creates orientation actors w.r.t. the gravity vector """ import vtool.keypoint as ktool try: # Get xy diretion of the keypoint orientations _xs, _ys = ktool.get_xys(kpts) _iv11s, _iv21s, _iv22s = ktool.get_invVs(kpts) _oris = ktool.get_oris(kpts) # mpl's 0 ori == (-tau / 4) w.r.t GRAVITY_THETA abs_oris = _oris + ktool.GRAVITY_THETA _sins = np.sin(abs_oris) _coss = np.cos(abs_oris) # The following is essentially # invV.dot(R) _dxs = _coss * _iv11s _dys = _coss * _iv21s + _sins * _iv22s # ut.embed() # if H is not None: # # adjust for homogrpahy # import vtool as vt # _xs, _ys = vt.transform_points_with_homography(H, np.vstack((_xs, _ys))) # _dxs, _dys = vt.transform_points_with_homography(H, np.vstack((_dxs, _dys))) # head_width_list = np.log(_iv11s * _iv22s) / 5 head_width_list = np.ones(len(_iv11s)) / 10 kwargs = { 'length_includes_head': True, 'shape': 'full', 'overhang': 0, 'head_starts_at_zero': False, } if H is not None: kwargs['transform'] = HomographyTransform(H) ori_actors = [ mpl.patches.FancyArrow(x, y, dx, dy, head_width=hw, **kwargs) for (x, y, dx, dy, hw) in zip(_xs, _ys, _dxs, _dys, head_width_list) ] except ValueError as ex: print('\n[mplkp.2] !!! ERROR %s: ' % str(ex)) print('_oris.shape = %r' % (_oris.shape, )) # print('x, y, dx, dy = %r' % ((x, y, dx, dy),)) print('_dxs = %r' % (_dxs, )) print('_dys = %r' % (_dys, )) print('_xs = %r' % (_xs, )) print('_ys = %r' % (_ys, )) raise return ori_actors
def orientation_actors(kpts, H=None): """ creates orientation actors w.r.t. the gravity vector """ try: # Get xy diretion of the keypoint orientations _xs, _ys = ktool.get_xys(kpts) _iv11s, _iv21s, _iv22s = ktool.get_invVs(kpts) _oris = ktool.get_oris(kpts) # mpl's 0 ori == (-tau / 4) w.r.t GRAVITY_THETA abs_oris = _oris + ktool.GRAVITY_THETA _sins = np.sin(abs_oris) _coss = np.cos(abs_oris) # The following is essentially # invV.dot(R) _dxs = _coss * _iv11s _dys = _coss * _iv21s + _sins * _iv22s #ut.embed() #if H is not None: # # adjust for homogrpahy # import vtool as vt # _xs, _ys = vt.transform_points_with_homography(H, np.vstack((_xs, _ys))) # _dxs, _dys = vt.transform_points_with_homography(H, np.vstack((_dxs, _dys))) #head_width_list = np.log(_iv11s * _iv22s) / 5 head_width_list = np.ones(len(_iv11s)) / 10 kwargs = { 'length_includes_head': True, 'shape': 'full', 'overhang': 0, 'head_starts_at_zero': False, } if H is not None: kwargs['transform'] = HomographyTransform(H) ori_actors = [mpl.patches.FancyArrow(x, y, dx, dy, head_width=hw, **kwargs) for (x, y, dx, dy, hw) in zip(_xs, _ys, _dxs, _dys, head_width_list)] except ValueError as ex: print('\n[mplkp.2] !!! ERROR %s: ' % str(ex)) print('_oris.shape = %r' % (_oris.shape,)) print('x, y, dx, dy = %r' % ((x, y, dx, dy),)) print('_dxs = %r' % (_dxs,)) print('_dys = %r' % (_dys,)) print('_xs = %r' % (_xs,)) print('_ys = %r' % (_ys,)) raise return ori_actors
def test_affine_errors(H, kpts1, kpts2, fm, xy_thresh_sqrd, scale_thresh_sqrd, ori_thresh): """ used for refinement as opposed to initial estimation """ kpts1_m = kpts1.take(fm.T[0], axis=0) kpts2_m = kpts2.take(fm.T[1], axis=0) invVR1s_m = ktool.get_invVR_mats3x3(kpts1_m) xy2_m = ktool.get_xys(kpts2_m) det2_m = ktool.get_sqrd_scales(kpts2_m) ori2_m = ktool.get_oris(kpts2_m) refined_inliers, refined_errors = _test_hypothesis_inliers( H, invVR1s_m, xy2_m, det2_m, ori2_m, xy_thresh_sqrd, scale_thresh_sqrd, ori_thresh) refined_tup1 = (refined_inliers, refined_errors, H) return refined_tup1
def filter_neighbors(ibs, qaid2_nns, filt2_weights, qreq): qaid2_nnfilt = {} # Configs filt_cfg = qreq.cfg.filt_cfg cant_match_sameimg = not filt_cfg.can_match_sameimg cant_match_samename = not filt_cfg.can_match_samename K = qreq.cfg.nn_cfg.K if NOT_QUIET: print('[mf] Step 3) Filter neighbors: ') if filt_cfg.gravity_weighting: # We dont have an easy way to access keypoints from nearest neighbors yet aid_list = np.unique( qreq.data_index.dx2_aid) # FIXME: Highly inefficient kpts_list = ibs.get_annot_kpts(aid_list) dx2_kpts = np.vstack(kpts_list) dx2_oris = ktool.get_oris(dx2_kpts) assert len(dx2_oris) == len(qreq.data_index.dx2_data) # Filter matches based on config and weights mark_, end_ = log_prog('Filter NN: ', len(qaid2_nns)) for count, qaid in enumerate(six.iterkeys(qaid2_nns)): mark_(count) # progress (qfx2_dx, _) = qaid2_nns[qaid] qfx2_nndx = qfx2_dx[:, 0:K] # Get a numeric score score and valid flag for each feature match qfx2_score, qfx2_valid = _apply_filter_scores(qaid, qfx2_nndx, filt2_weights, filt_cfg) qfx2_aid = qreq.data_index.dx2_aid[qfx2_nndx] if VERBOSE: print('[mf] * %d assignments are invalid by thresh' % ((True - qfx2_valid).sum())) if filt_cfg.gravity_weighting: qfx2_nnori = dx2_oris[qfx2_nndx] qfx2_kpts = ibs.get_annot_kpts(qaid) # FIXME: Highly inefficient qfx2_oris = ktool.get_oris(qfx2_kpts) # Get the orientation distance qfx2_oridist = ltool.rowwise_oridist(qfx2_nnori, qfx2_oris) # Normalize into a weight (close orientations are 1, far are 0) qfx2_gvweight = (np.tau - qfx2_oridist) / np.tau # Apply gravity vector weight to the score qfx2_score *= qfx2_gvweight # Remove Impossible Votes: # dont vote for yourself or another chip in the same image cant_match_self = not cant_match_sameimg if cant_match_self: ####DBG qfx2_notsamechip = qfx2_aid != qaid if VERBOSE: nChip_all_invalid = ((True - qfx2_notsamechip)).sum() nChip_new_invalid = (qfx2_valid * (True - qfx2_notsamechip)).sum() print('[mf] * %d assignments are invalid by self' % nChip_all_invalid) print('[mf] * %d are newly invalided by self' % nChip_new_invalid) #### qfx2_valid = np.logical_and(qfx2_valid, qfx2_notsamechip) if cant_match_sameimg: qfx2_gid = ibs.get_annot_gids(qfx2_aid) qgid = ibs.get_annot_gids(qaid) qfx2_notsameimg = qfx2_gid != qgid ####DBG if VERBOSE: nImg_all_invalid = ((True - qfx2_notsameimg)).sum() nImg_new_invalid = (qfx2_valid * (True - qfx2_notsameimg)).sum() print('[mf] * %d assignments are invalid by gid' % nImg_all_invalid) print('[mf] * %d are newly invalided by gid' % nImg_new_invalid) #### qfx2_valid = np.logical_and(qfx2_valid, qfx2_notsameimg) if cant_match_samename: qfx2_nid = ibs.get_annot_nids(qfx2_aid) qnid = ibs.get_annot_nids(qaid) qfx2_notsamename = qfx2_nid != qnid ####DBG if VERBOSE: nName_all_invalid = ((True - qfx2_notsamename)).sum() nName_new_invalid = (qfx2_valid * (True - qfx2_notsamename)).sum() print('[mf] * %d assignments are invalid by nid' % nName_all_invalid) print('[mf] * %d are newly invalided by nid' % nName_new_invalid) #### qfx2_valid = np.logical_and(qfx2_valid, qfx2_notsamename) #printDBG('[mf] * Marking %d assignments as invalid' % ((True - qfx2_valid).sum())) qaid2_nnfilt[qaid] = (qfx2_score, qfx2_valid) end_() return qaid2_nnfilt
def test_homog_errors(H, kpts1, kpts2, fm, xy_thresh_sqrd, scale_thresh, ori_thresh, full_homog_checks=True): r""" Test to see which keypoints the homography correctly maps Args: H (ndarray[float64_t, ndim=2]): homography/perspective matrix kpts1 (ndarray[float32_t, ndim=2]): keypoints kpts2 (ndarray[float32_t, ndim=2]): keypoints fm (list): list of feature matches as tuples (qfx, dfx) xy_thresh_sqrd (float): scale_thresh (float): ori_thresh (float): angle in radians full_homog_checks (bool): Returns: tuple: homog_tup1 CommandLine: python -m vtool.spatial_verification --test-test_homog_errors:0 --show python -m vtool.spatial_verification --test-test_homog_errors:0 --show --rotation_invariance python -m vtool.spatial_verification --test-test_homog_errors:0 --show --rotation_invariance --no-affine-invariance --xy-thresh=.001 python -m vtool.spatial_verification --test-test_homog_errors:0 --show --rotation_invariance --no-affine-invariance --xy-thresh=.001 --no-full-homog-checks python -m vtool.spatial_verification --test-test_homog_errors:0 --show --no-full-homog-checks # -------------- # Shows (sorta) how inliers are computed python -m vtool.spatial_verification --test-test_homog_errors:1 --show python -m vtool.spatial_verification --test-test_homog_errors:1 --show --rotation_invariance python -m vtool.spatial_verification --test-test_homog_errors:1 --show --rotation_invariance --no-affine-invariance --xy-thresh=.001 python -m vtool.spatial_verification --test-test_homog_errors:1 --show --rotation_invariance --xy-thresh=.001 python -m vtool.spatial_verification --test-test_homog_errors:0 --show --rotation_invariance --xy-thresh=.001 Example0: >>> # DISABLE_DOCTEST >>> from vtool.spatial_verification import * # NOQA >>> import plottool as pt >>> kpts1, kpts2, fm, aff_inliers, rchip1, rchip2, xy_thresh_sqrd = testdata_matching_affine_inliers() >>> H = estimate_refined_transform(kpts1, kpts2, fm, aff_inliers) >>> scale_thresh, ori_thresh = 2.0, 1.57 >>> full_homog_checks = not ut.get_argflag('--no-full-homog-checks') >>> homog_tup1 = test_homog_errors(H, kpts1, kpts2, fm, xy_thresh_sqrd, scale_thresh, ori_thresh, full_homog_checks) >>> homog_tup = (homog_tup1[0], homog_tup1[2]) >>> ut.quit_if_noshow() >>> pt.draw_sv.show_sv(rchip1, rchip2, kpts1, kpts2, fm, homog_tup=homog_tup) >>> ut.show_if_requested() Example1: >>> # DISABLE_DOCTEST >>> from vtool.spatial_verification import * # NOQA >>> import plottool as pt >>> kpts1, kpts2, fm_, aff_inliers, rchip1, rchip2, xy_thresh_sqrd = testdata_matching_affine_inliers() >>> H = estimate_refined_transform(kpts1, kpts2, fm_, aff_inliers) >>> scale_thresh, ori_thresh = 2.0, 1.57 >>> full_homog_checks = not ut.get_argflag('--no-full-homog-checks') >>> # ---------------- >>> # Take subset of feature matches >>> fm = fm_ >>> scale_err, xy_err, ori_err = \ ... ut.exec_func_src(test_homog_errors, globals(), locals(), ... 'scale_err, xy_err, ori_err'.split(', ')) >>> # we only care about checking out scale and orientation here. ignore bad xy points >>> xy_inliers_flag = np.less(xy_err, xy_thresh_sqrd) >>> scale_err[~xy_inliers_flag] = 0 >>> # filter >>> fm = fm_[np.array(scale_err).argsort()[::-1][:10]] >>> fm = fm_[np.array(scale_err).argsort()[::-1][:10]] >>> # Exec sourcecode >>> kpts1_m, kpts2_m, off_xy1_m, off_xy1_mt, dxy1_m, dxy1_mt, xy2_m, xy1_m, xy1_mt, scale_err, xy_err, ori_err = \ ... ut.exec_func_src(test_homog_errors, globals(), locals(), ... 'kpts1_m, kpts2_m, off_xy1_m, off_xy1_mt, dxy1_m, dxy1_mt, xy2_m, xy1_m, xy1_mt, scale_err, xy_err, ori_err'.split(', ')) >>> #--------------- >>> ut.quit_if_noshow() >>> pt.figure(fnum=1, pnum=(1, 2, 1), title='orig points and offset point') >>> segments_list1 = np.array(list(zip(xy1_m.T.tolist(), off_xy1_m.T.tolist()))) >>> pt.draw_line_segments(segments_list1, color=pt.LIGHT_BLUE) >>> pt.dark_background() >>> #--------------- >>> pt.figure(fnum=1, pnum=(1, 2, 2), title='transformed points and matching points') >>> #--------------- >>> # first have to make corresponding offset points >>> # Use reference point for scale and orientation tests >>> oris2_m = ktool.get_oris(kpts2_m) >>> scales2_m = ktool.get_scales(kpts2_m) >>> dxy2_m = np.vstack((np.sin(oris2_m), -np.cos(oris2_m))) >>> scaled_dxy2_m = dxy2_m * scales2_m[None, :] >>> off_xy2_m = xy2_m + scaled_dxy2_m >>> # Draw transformed semgents >>> segments_list2 = np.array(list(zip(xy2_m.T.tolist(), off_xy2_m.T.tolist()))) >>> pt.draw_line_segments(segments_list2, color=pt.GREEN) >>> # Draw corresponding matches semgents >>> segments_list3 = np.array(list(zip(xy1_mt.T.tolist(), off_xy1_mt.T.tolist()))) >>> pt.draw_line_segments(segments_list3, color=pt.RED) >>> # Draw matches between correspondences >>> segments_list4 = np.array(list(zip(xy1_mt.T.tolist(), xy2_m.T.tolist()))) >>> pt.draw_line_segments(segments_list4, color=pt.ORANGE) >>> pt.dark_background() >>> #--------------- >>> #vt.get _xy_axis_extents(kpts1_m) >>> #pt.draw_sv.show_sv(rchip1, rchip2, kpts1, kpts2, fm, homog_tup=homog_tup) >>> ut.show_if_requested() """ kpts1_m = kpts1.take(fm.T[0], axis=0) kpts2_m = kpts2.take(fm.T[1], axis=0) # Transform all xy1 matches to xy2 space xy1_m = ktool.get_xys(kpts1_m) #with ut.embed_on_exception_context: xy1_mt = ltool.transform_points_with_homography(H, xy1_m) #xy1_mt = ktool.transform_kpts_xys(H, kpts1_m) xy2_m = ktool.get_xys(kpts2_m) # --- Find (Squared) Homography Distance Error --- # You cannot test for scale or orientation easily here because # you no longer have an ellipse? (maybe, probably have a conic) when using a # projective transformation xy_err = dtool.L2_sqrd(xy1_mt.T, xy2_m.T) # Estimate final inliers #ut.embed() if full_homog_checks: # TODO: may need to use more than one reference point # Use reference point for scale and orientation tests oris1_m = ktool.get_oris(kpts1_m) scales1_m = ktool.get_scales(kpts1_m) # Get point offsets with unit length dxy1_m = np.vstack((np.sin(oris1_m), -np.cos(oris1_m))) scaled_dxy1_m = dxy1_m * scales1_m[None, :] off_xy1_m = xy1_m + scaled_dxy1_m # transform reference point off_xy1_mt = ltool.transform_points_with_homography(H, off_xy1_m) scaled_dxy1_mt = xy1_mt - off_xy1_mt scales1_mt = npl.norm(scaled_dxy1_mt, axis=0) #with warnings.catch_warnings(): # warnings.simplefilter("ignore") dxy1_mt = scaled_dxy1_mt / scales1_mt # adjust for gravity vector being 0 oris1_mt = np.arctan2(dxy1_mt[1], dxy1_mt[0]) - ktool.GRAVITY_THETA _det1_mt = scales1_mt**2 det2_m = ktool.get_sqrd_scales(kpts2_m) ori2_m = ktool.get_oris(kpts2_m) #xy_err = dtool.L2_sqrd(xy2_m.T, _xy1_mt.T) scale_err = dtool.det_distance(_det1_mt, det2_m) ori_err = dtool.ori_distance(oris1_mt, ori2_m) ### xy_inliers_flag = np.less(xy_err, xy_thresh_sqrd) scale_inliers_flag = np.less(scale_err, scale_thresh) ori_inliers_flag = np.less(ori_err, ori_thresh) hypo_inliers_flag = xy_inliers_flag # Try to re-use memory np.logical_and(hypo_inliers_flag, ori_inliers_flag, out=hypo_inliers_flag) np.logical_and(hypo_inliers_flag, scale_inliers_flag, out=hypo_inliers_flag) # Seems slower due to memory #hypo_inliers_flag = np.logical_and.reduce( # [xy_inliers_flag, ori_inliers_flag, scale_inliers_flag]) # this is also slower #hypo_inliers_flag = np.logical_and.reduce((xy_inliers_flag, #ori_inliers_flag, scale_inliers_flag), out=xy_inliers_flag) refined_inliers = np.where(hypo_inliers_flag)[0].astype(INDEX_DTYPE) refined_errors = (xy_err, ori_err, scale_err) else: refined_inliers = np.where( xy_err < xy_thresh_sqrd)[0].astype(INDEX_DTYPE) refined_errors = (xy_err, None, None) homog_tup1 = (refined_inliers, refined_errors, H) return homog_tup1
def get_affine_inliers(kpts1, kpts2, fm, fs, xy_thresh_sqrd, scale_thresh_sqrd, ori_thresh): """ Estimates inliers deterministically using elliptical shapes Compute all transforms from kpts1 to kpts2 (enumerate all hypothesis) We transform from chip1 -> chip2 The determinants are squared keypoint scales FROM PERDOCH 2009:: H = inv(Aj).dot(Rj.T).dot(Ri).dot(Ai) H = inv(Aj).dot(Ai) The input invVs = perdoch.invA's CommandLine: python -m vtool.spatial_verification --test-get_affine_inliers Example: >>> # ENABLE_DOCTEST >>> from vtool.spatial_verification import * # NOQA >>> import vtool.tests.dummy as dummy >>> import vtool.keypoint as ktool >>> kpts1, kpts2 = dummy.get_dummy_kpts_pair((100, 100)) >>> fm = dummy.make_dummy_fm(len(kpts1)).astype(np.int32) >>> fs = np.ones(len(fm), dtype=np.float64) >>> xy_thresh_sqrd = ktool.KPTS_DTYPE(.009) ** 2 >>> scale_thresh_sqrd = ktool.KPTS_DTYPE(2) >>> ori_thresh = ktool.KPTS_DTYPE(TAU / 4) >>> output = get_affine_inliers(kpts1, kpts2, fm, fs, xy_thresh_sqrd, >>> scale_thresh_sqrd, ori_thresh) >>> result = ut.hashstr(output) >>> print(result) 89kz8nh6p+66t!+u Ignore:: from vtool.spatial_verification import * # NOQA import vtool.tests.dummy as dummy import vtool.keypoint as ktool kpts1, kpts2 = dummy.get_dummy_kpts_pair((100, 100)) a = kpts1[fm.T[0]] b = kpts1.take(fm.T[0]) align = fm.dtype.itemsize * fm.shape[1] align2 = [fm.dtype.itemsize, fm.dtype.itemsize] viewtype1 = np.dtype(np.void, align) viewtype2 = np.dtype(np.int32, align2) c = np.ascontiguousarray(fm).view(viewtype1) fm_view = np.ascontiguousarray(fm).view(viewtype1) qfx = fm.view(np.dtype(np.int32 np.int32.itemsize)) dfx = fm.view(np.dtype(np.int32, np.int32.itemsize)) d = np.ascontiguousarray(c).view(viewtype2) fm.view(np.dtype(np.void, align)) np.ascontiguousarray(fm).view(np.dtype((np.void, Z.dtype.itemsize * Z.shape[1]))) """ #http://ipython-books.github.io/featured-01/ kpts1_m = kpts1.take(fm.T[0], axis=0) kpts2_m = kpts2.take(fm.T[1], axis=0) # Get keypoints to project in matrix form #invVR2s_m = ktool.get_invV_mats(kpts2_m, with_trans=True, with_ori=True) #invVR1s_m = ktool.get_invV_mats(kpts1_m, with_trans=True, with_ori=True) invVR2s_m = ktool.get_invVR_mats3x3(kpts2_m) invVR1s_m = ktool.get_invVR_mats3x3(kpts1_m) RV1s_m = ktool.invert_invV_mats(invVR1s_m) # 539 us # BUILD ALL HYPOTHESIS TRANSFORMS: The transform from kp1 to kp2 is: Aff_mats = matrix_multiply(invVR2s_m, RV1s_m) # Get components to test projects against xy2_m = ktool.get_xys(kpts2_m) det2_m = ktool.get_sqrd_scales(kpts2_m) ori2_m = ktool.get_oris(kpts2_m) # SLOWER EQUIVALENT # RV1s_m = ktool.get_V_mats(kpts1_m, with_trans=True, with_ori=True) # 5.2 ms # xy2_m = ktool.get_invVR_mats_xys(invVR2s_m) # ori2_m = ktool.get_invVR_mats_oris(invVR2s_m) # assert np.all(ktool.get_oris(kpts2_m) == ktool.get_invVR_mats_oris(invVR2s_m)) # assert np.all(ktool.get_xys(kpts2_m) == ktool.get_invVR_mats_xys(invVR2s_m)) # The previous versions of this function were all roughly comparable. # The for loop one was the slowest. I'm deciding to go with the one # where there is no internal function definition. It was moderately faster, # and it gives us access to profile that function inliers_and_errors_list = [ _test_hypothesis_inliers(Aff, invVR1s_m, xy2_m, det2_m, ori2_m, xy_thresh_sqrd, scale_thresh_sqrd, ori_thresh) for Aff in Aff_mats ] aff_inliers_list = [tup[0] for tup in inliers_and_errors_list] aff_errors_list = [tup[1] for tup in inliers_and_errors_list] return aff_inliers_list, aff_errors_list, Aff_mats
def test_homog_errors(H, kpts1, kpts2, fm, xy_thresh_sqrd, scale_thresh, ori_thresh, full_homog_checks=True): r""" Test to see which keypoints the homography correctly maps Args: H (ndarray[float64_t, ndim=2]): homography/perspective matrix kpts1 (ndarray[float32_t, ndim=2]): keypoints kpts2 (ndarray[float32_t, ndim=2]): keypoints fm (list): list of feature matches as tuples (qfx, dfx) xy_thresh_sqrd (float): scale_thresh (float): ori_thresh (float): angle in radians full_homog_checks (bool): Returns: tuple: homog_tup1 CommandLine: python -m vtool.spatial_verification --test-test_homog_errors:0 --show python -m vtool.spatial_verification --test-test_homog_errors:0 --show --rotation_invariance python -m vtool.spatial_verification --test-test_homog_errors:0 --show --rotation_invariance --no-affine-invariance --xy-thresh=.001 python -m vtool.spatial_verification --test-test_homog_errors:0 --show --rotation_invariance --no-affine-invariance --xy-thresh=.001 --no-full-homog-checks python -m vtool.spatial_verification --test-test_homog_errors:0 --show --no-full-homog-checks # -------------- # Shows (sorta) how inliers are computed python -m vtool.spatial_verification --test-test_homog_errors:1 --show python -m vtool.spatial_verification --test-test_homog_errors:1 --show --rotation_invariance python -m vtool.spatial_verification --test-test_homog_errors:1 --show --rotation_invariance --no-affine-invariance --xy-thresh=.001 python -m vtool.spatial_verification --test-test_homog_errors:1 --show --rotation_invariance --xy-thresh=.001 python -m vtool.spatial_verification --test-test_homog_errors:0 --show --rotation_invariance --xy-thresh=.001 Example0: >>> # DISABLE_DOCTEST >>> from vtool.spatial_verification import * # NOQA >>> import plottool as pt >>> kpts1, kpts2, fm, aff_inliers, rchip1, rchip2, xy_thresh_sqrd = testdata_matching_affine_inliers() >>> H = estimate_refined_transform(kpts1, kpts2, fm, aff_inliers) >>> scale_thresh, ori_thresh = 2.0, 1.57 >>> full_homog_checks = not ut.get_argflag('--no-full-homog-checks') >>> homog_tup1 = test_homog_errors(H, kpts1, kpts2, fm, xy_thresh_sqrd, scale_thresh, ori_thresh, full_homog_checks) >>> homog_tup = (homog_tup1[0], homog_tup1[2]) >>> ut.quit_if_noshow() >>> pt.draw_sv.show_sv(rchip1, rchip2, kpts1, kpts2, fm, homog_tup=homog_tup) >>> ut.show_if_requested() Example1: >>> # DISABLE_DOCTEST >>> from vtool.spatial_verification import * # NOQA >>> import plottool as pt >>> kpts1, kpts2, fm_, aff_inliers, rchip1, rchip2, xy_thresh_sqrd = testdata_matching_affine_inliers() >>> H = estimate_refined_transform(kpts1, kpts2, fm_, aff_inliers) >>> scale_thresh, ori_thresh = 2.0, 1.57 >>> full_homog_checks = not ut.get_argflag('--no-full-homog-checks') >>> # ---------------- >>> # Take subset of feature matches >>> fm = fm_ >>> scale_err, xy_err, ori_err = \ ... ut.exec_func_src(test_homog_errors, globals(), locals(), ... 'scale_err, xy_err, ori_err'.split(', ')) >>> # we only care about checking out scale and orientation here. ignore bad xy points >>> xy_inliers_flag = np.less(xy_err, xy_thresh_sqrd) >>> scale_err[~xy_inliers_flag] = 0 >>> # filter >>> fm = fm_[np.array(scale_err).argsort()[::-1][:10]] >>> fm = fm_[np.array(scale_err).argsort()[::-1][:10]] >>> # Exec sourcecode >>> kpts1_m, kpts2_m, off_xy1_m, off_xy1_mt, dxy1_m, dxy1_mt, xy2_m, xy1_m, xy1_mt, scale_err, xy_err, ori_err = \ ... ut.exec_func_src(test_homog_errors, globals(), locals(), ... 'kpts1_m, kpts2_m, off_xy1_m, off_xy1_mt, dxy1_m, dxy1_mt, xy2_m, xy1_m, xy1_mt, scale_err, xy_err, ori_err'.split(', ')) >>> #--------------- >>> ut.quit_if_noshow() >>> pt.figure(fnum=1, pnum=(1, 2, 1), title='orig points and offset point') >>> segments_list1 = np.array(list(zip(xy1_m.T.tolist(), off_xy1_m.T.tolist()))) >>> pt.draw_line_segments(segments_list1, color=pt.LIGHT_BLUE) >>> pt.dark_background() >>> #--------------- >>> pt.figure(fnum=1, pnum=(1, 2, 2), title='transformed points and matching points') >>> #--------------- >>> # first have to make corresponding offset points >>> # Use reference point for scale and orientation tests >>> oris2_m = ktool.get_oris(kpts2_m) >>> scales2_m = ktool.get_scales(kpts2_m) >>> dxy2_m = np.vstack((np.sin(oris2_m), -np.cos(oris2_m))) >>> scaled_dxy2_m = dxy2_m * scales2_m[None, :] >>> off_xy2_m = xy2_m + scaled_dxy2_m >>> # Draw transformed semgents >>> segments_list2 = np.array(list(zip(xy2_m.T.tolist(), off_xy2_m.T.tolist()))) >>> pt.draw_line_segments(segments_list2, color=pt.GREEN) >>> # Draw corresponding matches semgents >>> segments_list3 = np.array(list(zip(xy1_mt.T.tolist(), off_xy1_mt.T.tolist()))) >>> pt.draw_line_segments(segments_list3, color=pt.RED) >>> # Draw matches between correspondences >>> segments_list4 = np.array(list(zip(xy1_mt.T.tolist(), xy2_m.T.tolist()))) >>> pt.draw_line_segments(segments_list4, color=pt.ORANGE) >>> pt.dark_background() >>> #--------------- >>> #vt.get _xy_axis_extents(kpts1_m) >>> #pt.draw_sv.show_sv(rchip1, rchip2, kpts1, kpts2, fm, homog_tup=homog_tup) >>> ut.show_if_requested() """ kpts1_m = kpts1.take(fm.T[0], axis=0) kpts2_m = kpts2.take(fm.T[1], axis=0) # Transform all xy1 matches to xy2 space xy1_m = ktool.get_xys(kpts1_m) #with ut.embed_on_exception_context: xy1_mt = ltool.transform_points_with_homography(H, xy1_m) #xy1_mt = ktool.transform_kpts_xys(H, kpts1_m) xy2_m = ktool.get_xys(kpts2_m) # --- Find (Squared) Homography Distance Error --- # You cannot test for scale or orientation easily here because # you no longer have an ellipse? (maybe, probably have a conic) when using a # projective transformation xy_err = dtool.L2_sqrd(xy1_mt.T, xy2_m.T) # Estimate final inliers #ut.embed() if full_homog_checks: # TODO: may need to use more than one reference point # Use reference point for scale and orientation tests oris1_m = ktool.get_oris(kpts1_m) scales1_m = ktool.get_scales(kpts1_m) # Get point offsets with unit length dxy1_m = np.vstack((np.sin(oris1_m), -np.cos(oris1_m))) scaled_dxy1_m = dxy1_m * scales1_m[None, :] off_xy1_m = xy1_m + scaled_dxy1_m # transform reference point off_xy1_mt = ltool.transform_points_with_homography(H, off_xy1_m) scaled_dxy1_mt = xy1_mt - off_xy1_mt scales1_mt = npl.norm(scaled_dxy1_mt, axis=0) #with warnings.catch_warnings(): # warnings.simplefilter("ignore") dxy1_mt = scaled_dxy1_mt / scales1_mt # adjust for gravity vector being 0 oris1_mt = np.arctan2(dxy1_mt[1], dxy1_mt[0]) - ktool.GRAVITY_THETA _det1_mt = scales1_mt ** 2 det2_m = ktool.get_sqrd_scales(kpts2_m) ori2_m = ktool.get_oris(kpts2_m) #xy_err = dtool.L2_sqrd(xy2_m.T, _xy1_mt.T) scale_err = dtool.det_distance(_det1_mt, det2_m) ori_err = dtool.ori_distance(oris1_mt, ori2_m) ### xy_inliers_flag = np.less(xy_err, xy_thresh_sqrd) scale_inliers_flag = np.less(scale_err, scale_thresh) ori_inliers_flag = np.less(ori_err, ori_thresh) hypo_inliers_flag = xy_inliers_flag # Try to re-use memory np.logical_and(hypo_inliers_flag, ori_inliers_flag, out=hypo_inliers_flag) np.logical_and(hypo_inliers_flag, scale_inliers_flag, out=hypo_inliers_flag) # Seems slower due to memory #hypo_inliers_flag = np.logical_and.reduce( # [xy_inliers_flag, ori_inliers_flag, scale_inliers_flag]) # this is also slower #hypo_inliers_flag = np.logical_and.reduce((xy_inliers_flag, #ori_inliers_flag, scale_inliers_flag), out=xy_inliers_flag) refined_inliers = np.where(hypo_inliers_flag)[0].astype(INDEX_DTYPE) refined_errors = (xy_err, ori_err, scale_err) else: refined_inliers = np.where(xy_err < xy_thresh_sqrd)[0].astype(INDEX_DTYPE) refined_errors = (xy_err, None, None) homog_tup1 = (refined_inliers, refined_errors, H) return homog_tup1
def get_affine_inliers(kpts1, kpts2, fm, fs, xy_thresh_sqrd, scale_thresh_sqrd, ori_thresh): """ Estimates inliers deterministically using elliptical shapes Compute all transforms from kpts1 to kpts2 (enumerate all hypothesis) We transform from chip1 -> chip2 The determinants are squared keypoint scales FROM PERDOCH 2009:: H = inv(Aj).dot(Rj.T).dot(Ri).dot(Ai) H = inv(Aj).dot(Ai) The input invVs = perdoch.invA's CommandLine: python -m vtool.spatial_verification --test-get_affine_inliers Example: >>> # ENABLE_DOCTEST >>> from vtool.spatial_verification import * # NOQA >>> import vtool.tests.dummy as dummy >>> import vtool.keypoint as ktool >>> kpts1, kpts2 = dummy.get_dummy_kpts_pair((100, 100)) >>> fm = dummy.make_dummy_fm(len(kpts1)).astype(np.int32) >>> fs = np.ones(len(fm), dtype=np.float64) >>> xy_thresh_sqrd = ktool.KPTS_DTYPE(.009) ** 2 >>> scale_thresh_sqrd = ktool.KPTS_DTYPE(2) >>> ori_thresh = ktool.KPTS_DTYPE(TAU / 4) >>> output = get_affine_inliers(kpts1, kpts2, fm, fs, xy_thresh_sqrd, >>> scale_thresh_sqrd, ori_thresh) >>> result = ut.hashstr(output) >>> print(result) 89kz8nh6p+66t!+u Ignore:: from vtool.spatial_verification import * # NOQA import vtool.tests.dummy as dummy import vtool.keypoint as ktool kpts1, kpts2 = dummy.get_dummy_kpts_pair((100, 100)) a = kpts1[fm.T[0]] b = kpts1.take(fm.T[0]) align = fm.dtype.itemsize * fm.shape[1] align2 = [fm.dtype.itemsize, fm.dtype.itemsize] viewtype1 = np.dtype(np.void, align) viewtype2 = np.dtype(np.int32, align2) c = np.ascontiguousarray(fm).view(viewtype1) fm_view = np.ascontiguousarray(fm).view(viewtype1) qfx = fm.view(np.dtype(np.int32 np.int32.itemsize)) dfx = fm.view(np.dtype(np.int32, np.int32.itemsize)) d = np.ascontiguousarray(c).view(viewtype2) fm.view(np.dtype(np.void, align)) np.ascontiguousarray(fm).view(np.dtype((np.void, Z.dtype.itemsize * Z.shape[1]))) """ #http://ipython-books.github.io/featured-01/ kpts1_m = kpts1.take(fm.T[0], axis=0) kpts2_m = kpts2.take(fm.T[1], axis=0) # Get keypoints to project in matrix form #invVR2s_m = ktool.get_invV_mats(kpts2_m, with_trans=True, with_ori=True) #invVR1s_m = ktool.get_invV_mats(kpts1_m, with_trans=True, with_ori=True) invVR2s_m = ktool.get_invVR_mats3x3(kpts2_m) invVR1s_m = ktool.get_invVR_mats3x3(kpts1_m) RV1s_m = ktool.invert_invV_mats(invVR1s_m) # 539 us # BUILD ALL HYPOTHESIS TRANSFORMS: The transform from kp1 to kp2 is: Aff_mats = matrix_multiply(invVR2s_m, RV1s_m) # Get components to test projects against xy2_m = ktool.get_xys(kpts2_m) det2_m = ktool.get_sqrd_scales(kpts2_m) ori2_m = ktool.get_oris(kpts2_m) # SLOWER EQUIVALENT # RV1s_m = ktool.get_V_mats(kpts1_m, with_trans=True, with_ori=True) # 5.2 ms # xy2_m = ktool.get_invVR_mats_xys(invVR2s_m) # ori2_m = ktool.get_invVR_mats_oris(invVR2s_m) # assert np.all(ktool.get_oris(kpts2_m) == ktool.get_invVR_mats_oris(invVR2s_m)) # assert np.all(ktool.get_xys(kpts2_m) == ktool.get_invVR_mats_xys(invVR2s_m)) # The previous versions of this function were all roughly comparable. # The for loop one was the slowest. I'm deciding to go with the one # where there is no internal function definition. It was moderately faster, # and it gives us access to profile that function inliers_and_errors_list = [_test_hypothesis_inliers(Aff, invVR1s_m, xy2_m, det2_m, ori2_m, xy_thresh_sqrd, scale_thresh_sqrd, ori_thresh) for Aff in Aff_mats] aff_inliers_list = [tup[0] for tup in inliers_and_errors_list] aff_errors_list = [tup[1] for tup in inliers_and_errors_list] return aff_inliers_list, aff_errors_list, Aff_mats
def filter_neighbors(ibs, qaid2_nns, filt2_weights, qreq): qaid2_nnfilt = {} # Configs filt_cfg = qreq.cfg.filt_cfg cant_match_sameimg = not filt_cfg.can_match_sameimg cant_match_samename = not filt_cfg.can_match_samename K = qreq.cfg.nn_cfg.K if NOT_QUIET: print('[mf] Step 3) Filter neighbors: ') if filt_cfg.gravity_weighting: # We dont have an easy way to access keypoints from nearest neighbors yet aid_list = np.unique(qreq.data_index.dx2_aid) # FIXME: Highly inefficient kpts_list = ibs.get_annot_kpts(aid_list) dx2_kpts = np.vstack(kpts_list) dx2_oris = ktool.get_oris(dx2_kpts) assert len(dx2_oris) == len(qreq.data_index.dx2_data) # Filter matches based on config and weights mark_, end_ = log_prog('Filter NN: ', len(qaid2_nns)) for count, qaid in enumerate(six.iterkeys(qaid2_nns)): mark_(count) # progress (qfx2_dx, _) = qaid2_nns[qaid] qfx2_nndx = qfx2_dx[:, 0:K] # Get a numeric score score and valid flag for each feature match qfx2_score, qfx2_valid = _apply_filter_scores(qaid, qfx2_nndx, filt2_weights, filt_cfg) qfx2_aid = qreq.data_index.dx2_aid[qfx2_nndx] if VERBOSE: print('[mf] * %d assignments are invalid by thresh' % ((True - qfx2_valid).sum())) if filt_cfg.gravity_weighting: qfx2_nnori = dx2_oris[qfx2_nndx] qfx2_kpts = ibs.get_annot_kpts(qaid) # FIXME: Highly inefficient qfx2_oris = ktool.get_oris(qfx2_kpts) # Get the orientation distance qfx2_oridist = ltool.rowwise_oridist(qfx2_nnori, qfx2_oris) # Normalize into a weight (close orientations are 1, far are 0) qfx2_gvweight = (np.tau - qfx2_oridist) / np.tau # Apply gravity vector weight to the score qfx2_score *= qfx2_gvweight # Remove Impossible Votes: # dont vote for yourself or another chip in the same image cant_match_self = not cant_match_sameimg if cant_match_self: ####DBG qfx2_notsamechip = qfx2_aid != qaid if VERBOSE: nChip_all_invalid = ((True - qfx2_notsamechip)).sum() nChip_new_invalid = (qfx2_valid * (True - qfx2_notsamechip)).sum() print('[mf] * %d assignments are invalid by self' % nChip_all_invalid) print('[mf] * %d are newly invalided by self' % nChip_new_invalid) #### qfx2_valid = np.logical_and(qfx2_valid, qfx2_notsamechip) if cant_match_sameimg: qfx2_gid = ibs.get_annot_gids(qfx2_aid) qgid = ibs.get_annot_gids(qaid) qfx2_notsameimg = qfx2_gid != qgid ####DBG if VERBOSE: nImg_all_invalid = ((True - qfx2_notsameimg)).sum() nImg_new_invalid = (qfx2_valid * (True - qfx2_notsameimg)).sum() print('[mf] * %d assignments are invalid by gid' % nImg_all_invalid) print('[mf] * %d are newly invalided by gid' % nImg_new_invalid) #### qfx2_valid = np.logical_and(qfx2_valid, qfx2_notsameimg) if cant_match_samename: qfx2_nid = ibs.get_annot_nids(qfx2_aid) qnid = ibs.get_annot_nids(qaid) qfx2_notsamename = qfx2_nid != qnid ####DBG if VERBOSE: nName_all_invalid = ((True - qfx2_notsamename)).sum() nName_new_invalid = (qfx2_valid * (True - qfx2_notsamename)).sum() print('[mf] * %d assignments are invalid by nid' % nName_all_invalid) print('[mf] * %d are newly invalided by nid' % nName_new_invalid) #### qfx2_valid = np.logical_and(qfx2_valid, qfx2_notsamename) #printDBG('[mf] * Marking %d assignments as invalid' % ((True - qfx2_valid).sum())) qaid2_nnfilt[qaid] = (qfx2_score, qfx2_valid) end_() return qaid2_nnfilt