def assert_almost_eq(arr_test, arr_target, thresh=1E-11): r""" Args: arr_test (ndarray or list): arr_target (ndarray or list): thresh (scalar or ndarray or list): """ if util_arg.NO_ASSERTS: return import utool as ut arr1 = np.array(arr_test) arr2 = np.array(arr_target) passed, error = ut.almost_eq(arr1, arr2, thresh, ret_error=True) if not np.all(passed): failed_xs = np.where(np.logical_not(passed)) failed_error = error.take(failed_xs) failed_arr_test = arr1.take(failed_xs) failed_arr_target = arr2.take(failed_xs) msg_list = [ 'FAILED ASSERT ALMOST EQUAL', ' * failed_xs = %r' % (failed_xs, ), ' * failed_error = %r' % (failed_error, ), ' * failed_arr_test = %r' % (failed_arr_test, ), ' * failed_arr_target = %r' % (failed_arr_target, ), ] msg = '\n'.join(msg_list) raise AssertionError(msg) return error
def assert_almost_eq(arr_test, arr_target, thresh=1E-11): r""" Args: arr_test (ndarray or list): arr_target (ndarray or list): thresh (scalar or ndarray or list): """ if util_arg.NO_ASSERTS: return import utool as ut arr1 = np.array(arr_test) arr2 = np.array(arr_target) passed, error = ut.almost_eq(arr1, arr2, thresh, ret_error=True) if not np.all(passed): failed_xs = np.where(np.logical_not(passed)) failed_error = error.take(failed_xs) failed_arr_test = arr1.take(failed_xs) failed_arr_target = arr2.take(failed_xs) msg_list = [ 'FAILED ASSERT ALMOST EQUAL', ' * failed_xs = %r' % (failed_xs,), ' * failed_error = %r' % (failed_error,), ' * failed_arr_test = %r' % (failed_arr_test,), ' * failed_arr_target = %r' % (failed_arr_target,), ] msg = '\n'.join(msg_list) raise AssertionError(msg) return error
def test_rots(theta): invVR_mats = ltool.matrix_multiply(invV_mats, R_mats(theta)) _oris = ktool.get_invVR_mats_oris(invVR_mats) print('________') print('theta = %r' % (theta % TAU, )) print('b / a = %r' % (_oris, )) passed, error = utool.almost_eq(_oris, theta % TAU, ret_error=True) try: assert np.all(passed) except AssertionError as ex: utool.printex(ex, 'rotation unequal', key_list=['passed', 'error'])
def test_rots(theta): invVR_mats = ltool.matrix_multiply(invV_mats, R_mats(theta)) _oris = ktool.get_invVR_mats_oris(invVR_mats) print('________') print('theta = %r' % (theta % TAU,)) print('b / a = %r' % (_oris,)) passed, error = utool.almost_eq(_oris, theta % TAU, ret_error=True) try: assert np.all(passed) except AssertionError as ex: utool.printex(ex, 'rotation unequal', key_list=['passed', 'error'])
def _compute_multiassign_weights(_idx2_wx, _idx2_wdist, massign_alpha=1.2, massign_sigma=80.0, massign_equal_weights=False): """ Multi Assignment Weight Filtering from Improving Bag of Features Args: massign_equal_weights (): Turns off soft weighting. Gives all assigned vectors weight 1 Returns: tuple : (idx2_wxs, idx2_maws) References: (Improving Bag of Features) http://lear.inrialpes.fr/pubs/2010/JDS10a/jegou_improvingbof_preprint.pdf (Lost in Quantization) http://www.robots.ox.ac.uk/~vgg/publications/papers/philbin08.ps.gz (A Context Dissimilarity Measure for Accurate and Efficient Image Search) https://lear.inrialpes.fr/pubs/2007/JHS07/jegou_cdm.pdf Example: >>> massign_alpha = 1.2 >>> massign_sigma = 80.0 >>> massign_equal_weights = False Notes: sigma values from \cite{philbin_lost08} (70 ** 2) ~= 5000, (80 ** 2) ~= 6250, (86 ** 2) ~= 7500, """ if not ut.QUIET: print('[smk_index.assign] compute_multiassign_weights_') if _idx2_wx.shape[1] <= 1: idx2_wxs = _idx2_wx.tolist() idx2_maws = [[1.0]] * len(idx2_wxs) else: # Valid word assignments are beyond fraction of distance to the nearest word massign_thresh = _idx2_wdist.T[0:1].T.copy() # HACK: If the nearest word has distance 0 then this threshold is too hard # so we should use the distance to the second nearest word. EXACT_MATCH_HACK = True if EXACT_MATCH_HACK: flag_too_close = (massign_thresh == 0) massign_thresh[flag_too_close] = _idx2_wdist.T[1:2].T[flag_too_close] # Compute the threshold fraction epsilon = .001 np.add(epsilon, massign_thresh, out=massign_thresh) np.multiply(massign_alpha, massign_thresh, out=massign_thresh) # Mark assignments as invalid if they are too far away from the nearest assignment invalid = np.greater_equal(_idx2_wdist, massign_thresh) if ut.VERBOSE: nInvalid = (invalid.size - invalid.sum(), invalid.size) print('[maw] + massign_alpha = %r' % (massign_alpha,)) print('[maw] + massign_sigma = %r' % (massign_sigma,)) print('[maw] + massign_equal_weights = %r' % (massign_equal_weights,)) print('[maw] * Marked %d/%d assignments as invalid' % nInvalid) if massign_equal_weights: # Performance hack from jegou paper: just give everyone equal weight masked_wxs = np.ma.masked_array(_idx2_wx, mask=invalid) idx2_wxs = list(map(ut.filter_Nones, masked_wxs.tolist())) #ut.embed() if ut.DEBUG2: assert all([isinstance(wxs, list) for wxs in idx2_wxs]) idx2_maws = [np.ones(len(wxs), dtype=np.float32) for wxs in idx2_wxs] else: # More natural weighting scheme # Weighting as in Lost in Quantization gauss_numer = np.negative(_idx2_wdist.astype(np.float64)) gauss_denom = 2 * (massign_sigma ** 2) gauss_exp = np.divide(gauss_numer, gauss_denom) unnorm_maw = np.exp(gauss_exp) # Mask invalid multiassignment weights masked_unorm_maw = np.ma.masked_array(unnorm_maw, mask=invalid) # Normalize multiassignment weights from 0 to 1 masked_norm = masked_unorm_maw.sum(axis=1)[:, np.newaxis] masked_maw = np.divide(masked_unorm_maw, masked_norm) masked_wxs = np.ma.masked_array(_idx2_wx, mask=invalid) # Remove masked weights and word indexes idx2_wxs = list(map(ut.filter_Nones, masked_wxs.tolist())) idx2_maws = list(map(ut.filter_Nones, masked_maw.tolist())) #with ut.EmbedOnException(): if ut.DEBUG2: checksum = [sum(maws) for maws in idx2_maws] for x in np.where([not ut.almost_eq(val, 1) for val in checksum])[0]: print(checksum[x]) print(_idx2_wx[x]) print(masked_wxs[x]) print(masked_maw[x]) print(massign_thresh[x]) print(_idx2_wdist[x]) #all([ut.almost_eq(x, 1) for x in checksum]) assert all([ut.almost_eq(val, 1) for val in checksum]), 'weights did not break evenly' return idx2_wxs, idx2_maws
def main_smk_debug(): """ CommandLine: python -m ibeis.algo.hots.smk.smk_debug --test-main_smk_debug Example: >>> from ibeis.algo.hots.smk.smk_debug import * # NOQA >>> main_smk_debug() """ print('+------------') print('SMK_DEBUG MAIN') print('+------------') from ibeis.algo.hots import pipeline ibs, annots_df, taids, daids, qaids, qreq_, nWords = testdata_dataframe() # Query using SMK #qaid = qaids[0] nWords = qreq_.qparams.nWords aggregate = qreq_.qparams.aggregate smk_alpha = qreq_.qparams.smk_alpha smk_thresh = qreq_.qparams.smk_thresh nAssign = qreq_.qparams.nAssign #aggregate = ibs.cfg.query_cfg.smk_cfg.aggregate #smk_alpha = ibs.cfg.query_cfg.smk_cfg.smk_alpha #smk_thresh = ibs.cfg.query_cfg.smk_cfg.smk_thresh print('+------------') print('SMK_DEBUG PARAMS') print('[smk_debug] aggregate = %r' % (aggregate,)) print('[smk_debug] smk_alpha = %r' % (smk_alpha,)) print('[smk_debug] smk_thresh = %r' % (smk_thresh,)) print('[smk_debug] nWords = %r' % (nWords,)) print('[smk_debug] nAssign = %r' % (nAssign,)) print('L------------') # Learn vocabulary #words = qreq_.words = smk_index.learn_visual_words(annots_df, taids, nWords) # Index a database of annotations #qreq_.invindex = smk_repr.index_data_annots(annots_df, daids, words, aggregate, smk_alpha, smk_thresh) qreq_.ibs = ibs # Smk Mach print('+------------') print('SMK_DEBUG MATCH KERNEL') print('+------------') qaid2_scores, qaid2_chipmatch_SMK = smk_match.execute_smk_L5(qreq_) SVER = ut.get_argflag('--sver') if SVER: print('+------------') print('SMK_DEBUG SVER? YES!') print('+------------') qaid2_chipmatch_SVER_ = pipeline.spatial_verification(qaid2_chipmatch_SMK, qreq_) qaid2_chipmatch = qaid2_chipmatch_SVER_ else: print('+------------') print('SMK_DEBUG SVER? NO') print('+------------') qaid2_chipmatch = qaid2_chipmatch_SMK print('+------------') print('SMK_DEBUG DISPLAY RESULT') print('+------------') cm_list = convert_smkmatch_to_chipmatch(qaid2_chipmatch, qaid2_scores) #filt2_meta = {} #qaid2_qres_ = pipeline.chipmatch_to_resdict(qaid2_chipmatch, filt2_meta, qreq_) qaid2_qres_ = pipeline.chipmatch_to_resdict(qreq_, cm_list) #qaid2_qres_ = pipeline.chipmatch_to_resdict(qaid2_chipmatch, filt2_meta, qreq_) for count, (qaid, qres) in enumerate(six.iteritems(qaid2_qres_)): print('+================') #qres = qaid2_qres_[qaid] qres.show_top(ibs, fnum=count) for aid in qres.aid2_score.keys(): smkscore = qaid2_scores[qaid][aid] sumscore = qres.aid2_score[aid] if not ut.almost_eq(smkscore, sumscore): print('scorediff aid=%r, smkscore=%r, sumscore=%r' % (aid, smkscore, sumscore)) scores = qaid2_scores[qaid] #print(scores) print(qres.get_inspect_str(ibs)) print('L================') #ut.embed() #print(qres.aid2_fs) #daid2_totalscore, cmtup_old = smk_index.query_inverted_index(annots_df, qaid, invindex) ## Pack into QueryResult #qaid2_chipmatch = {qaid: cmtup_old} #qaid2_qres_ = pipeline.chipmatch_to_resdict(qaid2_chipmatch, {}, qreq_) ## Show match #daid2_totalscore.sort(axis=1, ascending=False) #print(daid2_totalscore) #daid2_totalscore2, cmtup_old = query_inverted_index(annots_df, daids[0], invindex) #print(daid2_totalscore2) #display_info(ibs, invindex, annots_df) print('finished main') return locals()
def in_depth_ellipse(kp): """ Makes sure that I understand how the ellipse is created form a keypoint representation. Walks through the steps I took in coming to an understanding. CommandLine: python -m pyhesaff.tests.test_ellipse --test-in_depth_ellipse --show --num-samples=12 Example: >>> # SCRIPT >>> from pyhesaff.tests.test_ellipse import * # NOQA >>> import pyhesaff.tests.pyhestest as pyhestest >>> test_data = pyhestest.load_test_data(short=True) >>> kpts = test_data['kpts'] >>> kp = kpts[0] >>> #kp = np.array([0, 0, 10, 10, 10, 0]) >>> print('Testing kp=%r' % (kp,)) >>> test_locals = in_depth_ellipse(kp) >>> ut.quit_if_noshow() >>> ut.show_if_requested() """ #nSamples = 12 nSamples = ut.get_argval('--num-samples', type_=int, default=12) kp = np.array(kp, dtype=np.float64) print('kp = %r' % kp) #----------------------- # SETUP #----------------------- np.set_printoptions(precision=3) df2.reset() df2.figure(9003, docla=True, doclf=True) ax = df2.gca() ax.invert_yaxis() def _plotpts(data, px, color=df2.BLUE, label='', marker='.', **kwargs): #df2.figure(9003, docla=True, pnum=(1, 1, px)) df2.plot2(data.T[0], data.T[1], marker, '', color=color, label=label, **kwargs) #df2.update() def _plotarrow(x, y, dx, dy, color=df2.BLUE, label=''): ax = df2.gca() arrowargs = dict(head_width=.5, length_includes_head=True, label=label) arrow = mpl.patches.FancyArrow(x, y, dx, dy, **arrowargs) arrow.set_edgecolor(color) arrow.set_facecolor(color) ax.add_patch(arrow) #df2.update() #----------------------- # INPUT #----------------------- # We will call perdoch's invA = invV print('--------------------------------') print('Let V = Perdoch.A') print('Let Z = Perdoch.E') print('--------------------------------') print('Input from Perdoch\'s detector: ') # We are given the keypoint in invA format if len(kp) == 5: (ix, iy, iv11, iv21, iv22), iv12 = kp, 0 elif len(kp) == 6: (ix, iy, iv11, iv21, iv22, ori), iv12 = kp, 0 invV = np.array([[iv11, iv12, ix], [iv21, iv22, iy], [ 0, 0, 1]]) V = np.linalg.inv(invV) Z = (V.T).dot(V) print('invV is a transform from points on a unit-circle to the ellipse') ut.horiz_print('invV = ', invV) print('--------------------------------') print('V is a transformation from points on the ellipse to a unit circle') ut.horiz_print('V = ', V) print('--------------------------------') print('An ellipse is a special case of a conic. For any ellipse:') print('Points on the ellipse satisfy (x_ - x_0).T.dot(Z).dot(x_ - x_0) = 1') print('where Z = (V.T).dot(V)') ut.horiz_print('Z = ', Z) # Define points on a unit circle theta_list = np.linspace(0, TAU, nSamples) cicrle_pts = np.array([(np.cos(t_), np.sin(t_), 1) for t_ in theta_list]) # Transform those points to the ellipse using invV ellipse_pts1 = invV.dot(cicrle_pts.T).T #Lets check our assertion: (x_ - x_0).T.dot(Z).dot(x_ - x_0) == 1 x_0 = np.array([ix, iy, 1]) checks = [(x_ - x_0).T.dot(Z).dot(x_ - x_0) for x_ in ellipse_pts1] try: # HELP: The phase is off here. in 3x3 version I'm not sure why #assert all([almost_eq(1, check) for check in checks1]) is_almost_eq_pos1 = [ut.almost_eq(1, check) for check in checks] is_almost_eq_neg1 = [ut.almost_eq(-1, check) for check in checks] assert all(is_almost_eq_pos1) except AssertionError as ex: print('circle pts = %r ' % cicrle_pts) print(ex) print(checks) print([ut.almost_eq(-1, check, 1E-9) for check in checks]) raise else: #assert all([abs(1 - check) < 1E-11 for check in checks2]) print('... all of our plotted points satisfy this') #======================= # THE CONIC SECTION #======================= # All of this was from the Perdoch paper, now lets move into conic sections # We will use the notation from wikipedia # References: # http://en.wikipedia.org/wiki/Conic_section # http://en.wikipedia.org/wiki/Matrix_representation_of_conic_sections #----------------------- # MATRIX REPRESENTATION #----------------------- # The matrix representation of a conic is: #(A, B2, B2_, C) = Z.flatten() #(D, E, F) = (0, 0, 1) (A, B2, D2, B2_, C, E2, D2_, E2_, F) = Z.flatten() B = B2 * 2 D = D2 * 2 E = E2 * 2 assert B2 == B2_, 'matrix should by symmetric' assert D2 == D2_, 'matrix should by symmetric' assert E2 == E2_, 'matrix should by symmetric' print('--------------------------------') print('Now, using wikipedia\' matrix representation of a conic.') con = np.array(((' A', 'B / 2', 'D / 2'), ('B / 2', ' C', 'E / 2'), ('D / 2', 'E / 2', ' F'))) ut.horiz_print('A matrix A_Q = ', con) # A_Q is our conic section (aka ellipse matrix) A_Q = np.array((( A, B / 2, D / 2), (B / 2, C, E / 2), (D / 2, E / 2, F))) ut.horiz_print('A_Q = ', A_Q) #----------------------- # DEGENERATE CONICS # References: # http://individual.utoronto.ca/somody/quiz.html print('----------------------------------') print('As long as det(A_Q) != it is not degenerate.') print('If the conic is not degenerate, we can use the 2x2 minor: A_33') print('det(A_Q) = %s' % str(np.linalg.det(A_Q))) assert np.linalg.det(A_Q) != 0, 'degenerate conic' A_33 = np.array((( A, B / 2), (B / 2, C))) ut.horiz_print('A_33 = ', A_33) #----------------------- # CONIC CLASSIFICATION #----------------------- print('----------------------------------') print('The determinant of the minor classifies the type of conic it is') print('(det == 0): parabola, (det < 0): hyperbola, (det > 0): ellipse') print('det(A_33) = %s' % str(np.linalg.det(A_33))) assert np.linalg.det(A_33) > 0, 'conic is not an ellipse' print('... this is indeed an ellipse') #----------------------- # CONIC CENTER #----------------------- print('----------------------------------') print('the centers of the ellipse are obtained by: ') print('x_center = (B * E - (2 * C * D)) / (4 * A * C - B ** 2)') print('y_center = (D * B - (2 * A * E)) / (4 * A * C - B ** 2)') # Centers are obtained by solving for where the gradient of the quadratic # becomes 0. Without going through the derivation the calculation is... # These should be 0, 0 if we are at the origin, or our original x, y # coordinate specified by the keypoints. I'm doing the calculation just for # shits and giggles x_center = (B * E - (2 * C * D)) / (4 * A * C - B ** 2) y_center = (D * B - (2 * A * E)) / (4 * A * C - B ** 2) ut.horiz_print('x_center = ', x_center) ut.horiz_print('y_center = ', y_center) #----------------------- # MAJOR AND MINOR AXES #----------------------- # Now we are going to determine the major and minor axis # of this beast. It just the center augmented by the eigenvecs print('----------------------------------') # Plot ellipse axis # !HELP! I DO NOT KNOW WHY I HAVE TO DIVIDE, SQUARE ROOT, AND NEGATE!!! (evals, evecs) = np.linalg.eig(A_33) l1, l2 = evals # The major and minor axis lengths b = 1 / np.sqrt(l1) a = 1 / np.sqrt(l2) v1, v2 = evecs # Find the transformation to align the axis nminor = v1 nmajor = v2 dx1, dy1 = (v1 * b) dx2, dy2 = (v2 * a) minor = np.array([dx1, -dy1]) major = np.array([dx2, -dy2]) x_axis = np.array([[1], [0]]) cosang = (x_axis.T.dot(nmajor)).T # Rotation angle theta = np.arccos(cosang) print('a = ' + str(a)) print('b = ' + str(b)) print('theta = ' + str(theta[0] / TAU) + ' * 2pi') # The warped eigenvects should have the same magintude # As the axis lengths assert ut.almost_eq(a, major.dot(ltool.rotation_mat2x2(theta))[0]) assert ut.almost_eq(b, minor.dot(ltool.rotation_mat2x2(theta))[1]) try: # HACK if len(theta) == 1: theta = theta[0] except Exception: pass #----------------------- # ECCENTRICITY #----------------------- print('----------------------------------') print('The eccentricity is determined by:') print('') print(' (2 * np.sqrt((A - C) ** 2 + B ** 2)) ') print('ecc = -----------------------------------------------') print(' (nu * (A + C) + np.sqrt((A - C) ** 2 + B ** 2))') print('') print('(nu is always 1 for ellipses)') nu = 1 ecc_numer = (2 * np.sqrt((A - C) ** 2 + B ** 2)) ecc_denom = (nu * (A + C) + np.sqrt((A - C) ** 2 + B ** 2)) ecc = np.sqrt(ecc_numer / ecc_denom) print('ecc = ' + str(ecc)) # Eccentricity is a little easier in axis aligned coordinates # Make sure they aggree ecc2 = np.sqrt(1 - (b ** 2) / (a ** 2)) assert ut.almost_eq(ecc, ecc2) #----------------------- # APPROXIMATE UNIFORM SAMPLING #----------------------- # We are given the keypoint in invA format print('----------------------------------') print('Approximate uniform points an inscribed polygon bondary') #def next_xy(x, y, d): # # References: # # http://gamedev.stackexchange.com/questions/1692/what-is-a-simple-algorithm-for-calculating-evenly-distributed-points-on-an-ellip # num = (b ** 2) * (x ** 2) # den = ((a ** 2) * ((a ** 2) - (x ** 2))) # dxdenom = np.sqrt(1 + (num / den)) # deltax = d / dxdenom # x_ = x + deltax # y_ = b * np.sqrt(1 - (x_ ** 2) / (a ** 2)) # return x_, y_ def xy_fn(t): return np.array((a * np.cos(t), b * np.sin(t))).T #nSamples = 16 #(ix, iy, iv11, iv21, iv22), iv12 = kp, 0 #invV = np.array([[iv11, iv12, ix], # [iv21, iv22, iy], # [ 0, 0, 1]]) #theta_list = np.linspace(0, TAU, nSamples) #cicrle_pts = np.array([(np.cos(t_), np.sin(t_), 1) for t_ in theta_list]) uneven_points = invV.dot(cicrle_pts.T).T[:, 0:2] #uneven_points2 = xy_fn(theta_list) def circular_distance(arr): dist_most_ = ((arr[0:-1] - arr[1:]) ** 2).sum(1) dist_end_ = ((arr[-1] - arr[0]) ** 2).sum() return np.sqrt(np.hstack((dist_most_, dist_end_))) # Calculate the distance from each point on the ellipse to the next dists = circular_distance(uneven_points) total_dist = dists.sum() # Get an even step size multiplier = 1 step_size = total_dist / (nSamples * multiplier) # Walk along edge num_steps_list = [] offset_list = [] dist_walked = 0 total_dist = step_size for count in range(len(dists)): segment_len = dists[count] # Find where your starting location is offset_list.append(total_dist - dist_walked) # How far can you possibly go? total_dist += segment_len # How many steps can you take? num_steps = int((total_dist - dist_walked) // step_size) num_steps_list.append(num_steps) # Log how much further youve gotten dist_walked += (num_steps * step_size) #print('step_size = %r' % step_size) #print(np.vstack((num_steps_list, dists, offset_list)).T) # store the percent location at each line segment where # the cut will be made cut_list = [] for num, dist, offset in zip(num_steps_list, dists, offset_list): if num == 0: cut_list.append([]) continue offset1 = (step_size - offset) / dist offset2 = ((num * step_size) - offset) / dist cut_locs = (np.linspace(offset1, offset2, num, endpoint=True)) cut_list.append(cut_locs) #print(cut_locs) # Cut the segments into new better segments approx_pts = [] nPts = len(uneven_points) for count, cut_locs in enumerate(cut_list): for loc in cut_locs: pt1 = uneven_points[count] pt2 = uneven_points[(count + 1) % nPts] # Linearly interpolate between points new_loc = ((1 - loc) * pt1) + ((loc) * pt2) approx_pts.append(new_loc) approx_pts = np.array(approx_pts) # Warp approx_pts to the unit circle print('----------------------------------') print('For each aproximate point, find the closet point on the ellipse') #new_unit = V.dot(approx_pts.T).T ones_ = np.ones(len(approx_pts)) new_hlocs = np.vstack((approx_pts.T, ones_)) new_unit = V.dot(new_hlocs).T # normalize new_unit new_mag = np.sqrt((new_unit ** 2).sum(1)) new_unorm_unit = new_unit / np.vstack([new_mag] * 3).T new_norm_unit = new_unorm_unit / np.vstack([new_unorm_unit[:, 2]] * 3).T # Get angle (might not be necessary) x_axis = np.array([1, 0, 0]) arccos_list = x_axis.dot(new_norm_unit.T) uniform_theta_list = np.arccos(arccos_list) # Maybe this? uniform_theta_list = np.arctan2(new_norm_unit[:, 1], new_norm_unit[:, 0]) # unevn_cicrle_pts = np.array([(np.cos(t_), np.sin(t_), 1) for t_ in uniform_theta_list]) # This is the output. Approximately uniform points sampled along an ellipse uniform_ell_pts = invV.dot(unevn_cicrle_pts.T).T #uniform_ell_pts = invV.dot(new_norm_unit.T).T _plotpts(approx_pts, 0, df2.YELLOW, label='approx points', marker='o-') _plotpts(uniform_ell_pts, 0, df2.RED, label='uniform points', marker='o-') # Desired number of points #ecc = np.sqrt(1 - (b ** 2) / (a ** 2)) # Total arclength #total_arclen = ellipeinc(TAU, ecc) #firstquad_arclen = total_arclen / 4 # Desired arclength between points #d = firstquad_arclen / nSamples # Initial point #x, y = xy_fn(.001) #uniform_points = [] #for count in range(nSamples): # if np.isnan(x_) or np.isnan(y_): # print('nan on count=%r' % count) # break # uniform_points.append((x_, y_)) # The angle between the major axis and our x axis is: #----------------------- # DRAWING #----------------------- print('----------------------------------') # Draw the keypoint using the tried and true df2 # Other things should subsiquently align #df2.draw_kpts2(np.array([kp]), ell_linewidth=4, # ell_color=df2.DEEP_PINK, ell_alpha=1, arrow=True, rect=True) # Plot ellipse points _plotpts(ellipse_pts1, 0, df2.LIGHT_BLUE, label='invV.dot(cicrle_pts.T).T', marker='o-') _plotarrow(x_center, y_center, dx1, -dy1, color=df2.GRAY, label='minor axis') _plotarrow(x_center, y_center, dx2, -dy2, color=df2.GRAY, label='major axis') # Rotate the ellipse so it is axis aligned and plot that rot = ltool.rotation_around_mat3x3(theta, ix, iy) ellipse_pts3 = rot.dot(ellipse_pts1.T).T #!_plotpts(ellipse_pts3, 0, df2.GREEN, label='axis aligned points') # Plot ellipse orientation ortho_basis = np.eye(3)[:, 0:2] orient_axis = invV.dot(ortho_basis) print(orient_axis) _dx1, _dx2, _dy1, _dy2, _1, _2 = orient_axis.flatten() #!_plotarrow(x_center, y_center, _dx1, _dy1, color=df2.BLUE, label='ellipse rotation') #!_plotarrow(x_center, y_center, _dx2, _dy2, color=df2.BLUE) #df2.plt.gca().set_xlim(400, 600) #df2.plt.gca().set_ylim(300, 500) xmin, ymin = ellipse_pts1.min(0)[0:2] - 1 xmax, ymax = ellipse_pts1.max(0)[0:2] + 1 df2.plt.gca().set_xlim(xmin, xmax) df2.plt.gca().set_ylim(ymin, ymax) df2.legend() df2.dark_background(doubleit=3) df2.gca().invert_yaxis() # Hack in another view # It seems like the even points are not actually that even. # there must be a bug df2.figure(fnum=9003 + 1, docla=True, doclf=True, pnum=(1, 3, 1)) _plotpts(ellipse_pts1, 0, df2.LIGHT_BLUE, label='invV.dot(cicrle_pts.T).T', marker='o-', title='even') df2.plt.gca().set_xlim(xmin, xmax) df2.plt.gca().set_ylim(ymin, ymax) df2.dark_background(doubleit=3) df2.gca().invert_yaxis() df2.figure(fnum=9003 + 1, pnum=(1, 3, 2)) _plotpts(approx_pts, 0, df2.YELLOW, label='approx points', marker='o-', title='approx') df2.plt.gca().set_xlim(xmin, xmax) df2.plt.gca().set_ylim(ymin, ymax) df2.dark_background(doubleit=3) df2.gca().invert_yaxis() df2.figure(fnum=9003 + 1, pnum=(1, 3, 3)) _plotpts(uniform_ell_pts, 0, df2.RED, label='uniform points', marker='o-', title='uniform') df2.plt.gca().set_xlim(xmin, xmax) df2.plt.gca().set_ylim(ymin, ymax) df2.dark_background(doubleit=3) df2.gca().invert_yaxis() return locals()
def _compute_multiassign_weights(_idx2_wx, _idx2_wdist, massign_alpha=1.2, massign_sigma=80.0, massign_equal_weights=False): """ Multi Assignment Weight Filtering from Improving Bag of Features Args: massign_equal_weights (): Turns off soft weighting. Gives all assigned vectors weight 1 Returns: tuple : (idx2_wxs, idx2_maws) References: (Improving Bag of Features) http://lear.inrialpes.fr/pubs/2010/JDS10a/jegou_improvingbof_preprint.pdf (Lost in Quantization) http://www.robots.ox.ac.uk/~vgg/publications/papers/philbin08.ps.gz (A Context Dissimilarity Measure for Accurate and Efficient Image Search) https://lear.inrialpes.fr/pubs/2007/JHS07/jegou_cdm.pdf Example: >>> massign_alpha = 1.2 >>> massign_sigma = 80.0 >>> massign_equal_weights = False Notes: sigma values from \cite{philbin_lost08} (70 ** 2) ~= 5000, (80 ** 2) ~= 6250, (86 ** 2) ~= 7500, """ if not ut.QUIET: print('[smk_index.assign] compute_multiassign_weights_') if _idx2_wx.shape[1] <= 1: idx2_wxs = _idx2_wx.tolist() idx2_maws = [[1.0]] * len(idx2_wxs) else: # Valid word assignments are beyond fraction of distance to the nearest word massign_thresh = _idx2_wdist.T[0:1].T.copy() # HACK: If the nearest word has distance 0 then this threshold is too hard # so we should use the distance to the second nearest word. EXACT_MATCH_HACK = True if EXACT_MATCH_HACK: flag_too_close = (massign_thresh == 0) massign_thresh[flag_too_close] = _idx2_wdist.T[1:2].T[ flag_too_close] # Compute the threshold fraction epsilon = .001 np.add(epsilon, massign_thresh, out=massign_thresh) np.multiply(massign_alpha, massign_thresh, out=massign_thresh) # Mark assignments as invalid if they are too far away from the nearest assignment invalid = np.greater_equal(_idx2_wdist, massign_thresh) if ut.VERBOSE: nInvalid = (invalid.size - invalid.sum(), invalid.size) print('[maw] + massign_alpha = %r' % (massign_alpha, )) print('[maw] + massign_sigma = %r' % (massign_sigma, )) print('[maw] + massign_equal_weights = %r' % (massign_equal_weights, )) print('[maw] * Marked %d/%d assignments as invalid' % nInvalid) if massign_equal_weights: # Performance hack from jegou paper: just give everyone equal weight masked_wxs = np.ma.masked_array(_idx2_wx, mask=invalid) idx2_wxs = list(map(ut.filter_Nones, masked_wxs.tolist())) #ut.embed() if ut.DEBUG2: assert all([isinstance(wxs, list) for wxs in idx2_wxs]) idx2_maws = [ np.ones(len(wxs), dtype=np.float32) for wxs in idx2_wxs ] else: # More natural weighting scheme # Weighting as in Lost in Quantization gauss_numer = np.negative(_idx2_wdist.astype(np.float64)) gauss_denom = 2 * (massign_sigma**2) gauss_exp = np.divide(gauss_numer, gauss_denom) unnorm_maw = np.exp(gauss_exp) # Mask invalid multiassignment weights masked_unorm_maw = np.ma.masked_array(unnorm_maw, mask=invalid) # Normalize multiassignment weights from 0 to 1 masked_norm = masked_unorm_maw.sum(axis=1)[:, np.newaxis] masked_maw = np.divide(masked_unorm_maw, masked_norm) masked_wxs = np.ma.masked_array(_idx2_wx, mask=invalid) # Remove masked weights and word indexes idx2_wxs = list(map(ut.filter_Nones, masked_wxs.tolist())) idx2_maws = list(map(ut.filter_Nones, masked_maw.tolist())) #with ut.EmbedOnException(): if ut.DEBUG2: checksum = [sum(maws) for maws in idx2_maws] for x in np.where( [not ut.almost_eq(val, 1) for val in checksum])[0]: print(checksum[x]) print(_idx2_wx[x]) print(masked_wxs[x]) print(masked_maw[x]) print(massign_thresh[x]) print(_idx2_wdist[x]) #all([ut.almost_eq(x, 1) for x in checksum]) assert all([ut.almost_eq(val, 1) for val in checksum ]), 'weights did not break evenly' return idx2_wxs, idx2_maws
def in_depth_ellipse(kp): """ Makes sure that I understand how the ellipse is created form a keypoint representation. Walks through the steps I took in coming to an understanding. CommandLine: python -m pyhesaff.tests.test_ellipse --test-in_depth_ellipse --show --num-samples=12 Example: >>> # SCRIPT >>> from pyhesaff.tests.test_ellipse import * # NOQA >>> import pyhesaff.tests.pyhestest as pyhestest >>> test_data = pyhestest.load_test_data(short=True) >>> kpts = test_data['kpts'] >>> kp = kpts[0] >>> #kp = np.array([0, 0, 10, 10, 10, 0]) >>> test_locals = in_depth_ellipse(kp) >>> ut.quit_if_noshow() >>> ut.show_if_requested() """ import plottool as pt #nSamples = 12 nSamples = ut.get_argval('--num-samples', type_=int, default=12) kp = np.array(kp, dtype=np.float64) #----------------------- # SETUP #----------------------- np.set_printoptions(precision=3) #pt.reset() pt.figure(9003, docla=True, doclf=True) ax = pt.gca() ax.invert_yaxis() def _plotpts(data, px, color=pt.BLUE, label='', marker='.', **kwargs): #pt.figure(9003, docla=True, pnum=(1, 1, px)) pt.plot2(data.T[0], data.T[1], marker, '', color=color, label=label, **kwargs) #pt.update() def _plotarrow(x, y, dx, dy, color=pt.BLUE, label=''): ax = pt.gca() arrowargs = dict(head_width=.5, length_includes_head=True, label=label) arrow = mpl.patches.FancyArrow(x, y, dx, dy, **arrowargs) arrow.set_edgecolor(color) arrow.set_facecolor(color) ax.add_patch(arrow) #pt.update() #----------------------- # INPUT #----------------------- print('kp = %s' % ut.repr2(kp, precision=3)) print('--------------------------------') print('Let V = Perdoch.A') print('Let Z = Perdoch.E') print('Let invV = Perdoch.invA') print('--------------------------------') print('Input from Perdoch\'s detector: ') # We are given the keypoint in invA format if len(kp) == 5: (ix, iy, iv11, iv21, iv22) = kp iv12 = 0 elif len(kp) == 6: (ix, iy, iv11, iv21, iv22, ori) = kp iv12 = 0 invV = np.array([[iv11, iv12, ix], [iv21, iv22, iy], [0, 0, 1]]) V = np.linalg.inv(invV) Z = (V.T).dot(V) import vtool as vt V_2x2 = V[0:2, 0:2] Z_2x2 = Z[0:2, 0:2] V_2x2_ = vt.decompose_Z_to_V_2x2(Z_2x2) assert np.all(np.isclose(V_2x2, V_2x2_)) #C = np.linalg.cholesky(Z) #np.isclose(C.dot(C.T), Z) #Z print('invV is a transform from points on a unit-circle to the ellipse') ut.horiz_print('invV = ', invV) print('--------------------------------') print('V is a transformation from points on the ellipse to a unit circle') ut.horiz_print('V = ', V) print('--------------------------------') print('An ellipse is a special case of a conic. For any ellipse:') print( 'Points on the ellipse satisfy (x_ - x_0).T.dot(Z).dot(x_ - x_0) = 1') print('where Z = (V.T).dot(V)') ut.horiz_print('Z = ', Z) # Define points on a unit circle theta_list = np.linspace(0, TAU, nSamples) cicrle_pts = np.array([(np.cos(t_), np.sin(t_), 1) for t_ in theta_list]) # Transform those points to the ellipse using invV ellipse_pts1 = invV.dot(cicrle_pts.T).T #Lets check our assertion: (x_ - x_0).T.dot(Z).dot(x_ - x_0) == 1 x_0 = np.array([ix, iy, 1]) checks = [(x_ - x_0).T.dot(Z).dot(x_ - x_0) for x_ in ellipse_pts1] try: # HELP: The phase is off here. in 3x3 version I'm not sure why #assert all([almost_eq(1, check) for check in checks1]) is_almost_eq_pos1 = [ut.almost_eq(1, check) for check in checks] is_almost_eq_neg1 = [ut.almost_eq(-1, check) for check in checks] assert all(is_almost_eq_pos1) except AssertionError as ex: print('circle pts = %r ' % cicrle_pts) print(ex) print(checks) print([ut.almost_eq(-1, check, 1E-9) for check in checks]) raise else: #assert all([abs(1 - check) < 1E-11 for check in checks2]) print('... all of our plotted points satisfy this') #======================= # THE CONIC SECTION #======================= # All of this was from the Perdoch paper, now lets move into conic sections # We will use the notation from wikipedia # References: # http://en.wikipedia.org/wiki/Conic_section # http://en.wikipedia.org/wiki/Matrix_representation_of_conic_sections #----------------------- # MATRIX REPRESENTATION #----------------------- # The matrix representation of a conic is: #(A, B2, B2_, C) = Z.flatten() #(D, E, F) = (0, 0, 1) (A, B2, D2, B2_, C, E2, D2_, E2_, F) = Z.flatten() B = B2 * 2 D = D2 * 2 E = E2 * 2 assert B2 == B2_, 'matrix should by symmetric' assert D2 == D2_, 'matrix should by symmetric' assert E2 == E2_, 'matrix should by symmetric' print('--------------------------------') print('Now, using wikipedia\' matrix representation of a conic.') con = np.array(((' A', 'B / 2', 'D / 2'), ('B / 2', ' C', 'E / 2'), ('D / 2', 'E / 2', ' F'))) ut.horiz_print('A matrix A_Q = ', con) # A_Q is our conic section (aka ellipse matrix) A_Q = np.array(((A, B / 2, D / 2), (B / 2, C, E / 2), (D / 2, E / 2, F))) ut.horiz_print('A_Q = ', A_Q) #----------------------- # DEGENERATE CONICS # References: # http://individual.utoronto.ca/somody/quiz.html print('----------------------------------') print('As long as det(A_Q) != it is not degenerate.') print('If the conic is not degenerate, we can use the 2x2 minor: A_33') print('det(A_Q) = %s' % str(np.linalg.det(A_Q))) assert np.linalg.det(A_Q) != 0, 'degenerate conic' A_33 = np.array(((A, B / 2), (B / 2, C))) ut.horiz_print('A_33 = ', A_33) #----------------------- # CONIC CLASSIFICATION #----------------------- print('----------------------------------') print('The determinant of the minor classifies the type of conic it is') print('(det == 0): parabola, (det < 0): hyperbola, (det > 0): ellipse') print('det(A_33) = %s' % str(np.linalg.det(A_33))) assert np.linalg.det(A_33) > 0, 'conic is not an ellipse' print('... this is indeed an ellipse') #----------------------- # CONIC CENTER #----------------------- print('----------------------------------') print('the centers of the ellipse are obtained by: ') print('x_center = (B * E - (2 * C * D)) / (4 * A * C - B ** 2)') print('y_center = (D * B - (2 * A * E)) / (4 * A * C - B ** 2)') # Centers are obtained by solving for where the gradient of the quadratic # becomes 0. Without going through the derivation the calculation is... # These should be 0, 0 if we are at the origin, or our original x, y # coordinate specified by the keypoints. I'm doing the calculation just for # shits and giggles x_center = (B * E - (2 * C * D)) / (4 * A * C - B**2) y_center = (D * B - (2 * A * E)) / (4 * A * C - B**2) ut.horiz_print('x_center = ', x_center) ut.horiz_print('y_center = ', y_center) #----------------------- # MAJOR AND MINOR AXES #----------------------- # Now we are going to determine the major and minor axis # of this beast. It just the center augmented by the eigenvecs print('----------------------------------') # Plot ellipse axis # !HELP! I DO NOT KNOW WHY I HAVE TO DIVIDE, SQUARE ROOT, AND NEGATE!!! (evals, evecs) = np.linalg.eig(A_33) l1, l2 = evals # The major and minor axis lengths b = 1 / np.sqrt(l1) a = 1 / np.sqrt(l2) v1, v2 = evecs # Find the transformation to align the axis nminor = v1 nmajor = v2 dx1, dy1 = (v1 * b) dx2, dy2 = (v2 * a) minor = np.array([dx1, -dy1]) major = np.array([dx2, -dy2]) x_axis = np.array([[1], [0]]) cosang = (x_axis.T.dot(nmajor)).T # Rotation angle theta = np.arccos(cosang) print('a = ' + str(a)) print('b = ' + str(b)) print('theta = ' + str(theta[0] / TAU) + ' * 2pi') # The warped eigenvects should have the same magintude # As the axis lengths assert ut.almost_eq(a, major.dot(ltool.rotation_mat2x2(theta))[0]) assert ut.almost_eq(b, minor.dot(ltool.rotation_mat2x2(theta))[1]) try: # HACK if len(theta) == 1: theta = theta[0] except Exception: pass #----------------------- # ECCENTRICITY #----------------------- print('----------------------------------') print('The eccentricity is determined by:') print('') print(' (2 * np.sqrt((A - C) ** 2 + B ** 2)) ') print('ecc = -----------------------------------------------') print(' (nu * (A + C) + np.sqrt((A - C) ** 2 + B ** 2))') print('') print('(nu is always 1 for ellipses)') nu = 1 ecc_numer = (2 * np.sqrt((A - C)**2 + B**2)) ecc_denom = (nu * (A + C) + np.sqrt((A - C)**2 + B**2)) ecc = np.sqrt(ecc_numer / ecc_denom) print('ecc = ' + str(ecc)) # Eccentricity is a little easier in axis aligned coordinates # Make sure they aggree ecc2 = np.sqrt(1 - (b**2) / (a**2)) assert ut.almost_eq(ecc, ecc2) #----------------------- # APPROXIMATE UNIFORM SAMPLING #----------------------- # We are given the keypoint in invA format print('----------------------------------') print('Approximate uniform points an inscribed polygon bondary') #def next_xy(x, y, d): # # References: # # http://gamedev.stackexchange.com/questions/1692/what-is-a-simple-algorithm-for-calculating-evenly-distributed-points-on-an-ellip # num = (b ** 2) * (x ** 2) # den = ((a ** 2) * ((a ** 2) - (x ** 2))) # dxdenom = np.sqrt(1 + (num / den)) # deltax = d / dxdenom # x_ = x + deltax # y_ = b * np.sqrt(1 - (x_ ** 2) / (a ** 2)) # return x_, y_ def xy_fn(t): return np.array((a * np.cos(t), b * np.sin(t))).T #nSamples = 16 #(ix, iy, iv11, iv21, iv22), iv12 = kp, 0 #invV = np.array([[iv11, iv12, ix], # [iv21, iv22, iy], # [ 0, 0, 1]]) #theta_list = np.linspace(0, TAU, nSamples) #cicrle_pts = np.array([(np.cos(t_), np.sin(t_), 1) for t_ in theta_list]) uneven_points = invV.dot(cicrle_pts.T).T[:, 0:2] #uneven_points2 = xy_fn(theta_list) def circular_distance(arr): dist_most_ = ((arr[0:-1] - arr[1:])**2).sum(1) dist_end_ = ((arr[-1] - arr[0])**2).sum() return np.sqrt(np.hstack((dist_most_, dist_end_))) # Calculate the distance from each point on the ellipse to the next dists = circular_distance(uneven_points) total_dist = dists.sum() # Get an even step size multiplier = 1 step_size = total_dist / (nSamples * multiplier) # Walk along edge num_steps_list = [] offset_list = [] dist_walked = 0 total_dist = step_size for count in range(len(dists)): segment_len = dists[count] # Find where your starting location is offset_list.append(total_dist - dist_walked) # How far can you possibly go? total_dist += segment_len # How many steps can you take? num_steps = int((total_dist - dist_walked) // step_size) num_steps_list.append(num_steps) # Log how much further youve gotten dist_walked += (num_steps * step_size) #print('step_size = %r' % step_size) #print(np.vstack((num_steps_list, dists, offset_list)).T) # store the percent location at each line segment where # the cut will be made cut_list = [] for num, dist, offset in zip(num_steps_list, dists, offset_list): if num == 0: cut_list.append([]) continue offset1 = (step_size - offset) / dist offset2 = ((num * step_size) - offset) / dist cut_locs = (np.linspace(offset1, offset2, num, endpoint=True)) cut_list.append(cut_locs) #print(cut_locs) # Cut the segments into new better segments approx_pts = [] nPts = len(uneven_points) for count, cut_locs in enumerate(cut_list): for loc in cut_locs: pt1 = uneven_points[count] pt2 = uneven_points[(count + 1) % nPts] # Linearly interpolate between points new_loc = ((1 - loc) * pt1) + ((loc) * pt2) approx_pts.append(new_loc) approx_pts = np.array(approx_pts) # Warp approx_pts to the unit circle print('----------------------------------') print('For each aproximate point, find the closet point on the ellipse') #new_unit = V.dot(approx_pts.T).T ones_ = np.ones(len(approx_pts)) new_hlocs = np.vstack((approx_pts.T, ones_)) new_unit = V.dot(new_hlocs).T # normalize new_unit new_mag = np.sqrt((new_unit**2).sum(1)) new_unorm_unit = new_unit / np.vstack([new_mag] * 3).T new_norm_unit = new_unorm_unit / np.vstack([new_unorm_unit[:, 2]] * 3).T # Get angle (might not be necessary) x_axis = np.array([1, 0, 0]) arccos_list = x_axis.dot(new_norm_unit.T) uniform_theta_list = np.arccos(arccos_list) # Maybe this? uniform_theta_list = np.arctan2(new_norm_unit[:, 1], new_norm_unit[:, 0]) # unevn_cicrle_pts = np.array([(np.cos(t_), np.sin(t_), 1) for t_ in uniform_theta_list]) # This is the output. Approximately uniform points sampled along an ellipse uniform_ell_pts = invV.dot(unevn_cicrle_pts.T).T #uniform_ell_pts = invV.dot(new_norm_unit.T).T _plotpts(approx_pts, 0, pt.YELLOW, label='approx points', marker='o-') _plotpts(uniform_ell_pts, 0, pt.RED, label='uniform points', marker='o-') # Desired number of points #ecc = np.sqrt(1 - (b ** 2) / (a ** 2)) # Total arclength #total_arclen = ellipeinc(TAU, ecc) #firstquad_arclen = total_arclen / 4 # Desired arclength between points #d = firstquad_arclen / nSamples # Initial point #x, y = xy_fn(.001) #uniform_points = [] #for count in range(nSamples): # if np.isnan(x_) or np.isnan(y_): # print('nan on count=%r' % count) # break # uniform_points.append((x_, y_)) # The angle between the major axis and our x axis is: #----------------------- # DRAWING #----------------------- print('----------------------------------') # Draw the keypoint using the tried and true pt # Other things should subsiquently align #pt.draw_kpts2(np.array([kp]), ell_linewidth=4, # ell_color=pt.DEEP_PINK, ell_alpha=1, arrow=True, rect=True) # Plot ellipse points _plotpts(ellipse_pts1, 0, pt.LIGHT_BLUE, label='invV.dot(cicrle_pts.T).T', marker='o-') _plotarrow(x_center, y_center, dx1, -dy1, color=pt.GRAY, label='minor axis') _plotarrow(x_center, y_center, dx2, -dy2, color=pt.GRAY, label='major axis') # Rotate the ellipse so it is axis aligned and plot that rot = ltool.rotation_around_mat3x3(theta, ix, iy) ellipse_pts3 = rot.dot(ellipse_pts1.T).T #!_plotpts(ellipse_pts3, 0, pt.GREEN, label='axis aligned points') # Plot ellipse orientation ortho_basis = np.eye(3)[:, 0:2] orient_axis = invV.dot(ortho_basis) print(orient_axis) _dx1, _dx2, _dy1, _dy2, _1, _2 = orient_axis.flatten() #!_plotarrow(x_center, y_center, _dx1, _dy1, color=pt.BLUE, label='ellipse rotation') #!_plotarrow(x_center, y_center, _dx2, _dy2, color=pt.BLUE) #pt.plt.gca().set_xlim(400, 600) #pt.plt.gca().set_ylim(300, 500) xmin, ymin = ellipse_pts1.min(0)[0:2] - 1 xmax, ymax = ellipse_pts1.max(0)[0:2] + 1 pt.plt.gca().set_xlim(xmin, xmax) pt.plt.gca().set_ylim(ymin, ymax) pt.legend() pt.dark_background(doubleit=3) pt.gca().invert_yaxis() # Hack in another view # It seems like the even points are not actually that even. # there must be a bug pt.figure(fnum=9003 + 1, docla=True, doclf=True, pnum=(1, 3, 1)) _plotpts(ellipse_pts1, 0, pt.LIGHT_BLUE, label='invV.dot(cicrle_pts.T).T', marker='o-', title='even') pt.plt.gca().set_xlim(xmin, xmax) pt.plt.gca().set_ylim(ymin, ymax) pt.dark_background(doubleit=3) pt.gca().invert_yaxis() pt.figure(fnum=9003 + 1, pnum=(1, 3, 2)) _plotpts(approx_pts, 0, pt.YELLOW, label='approx points', marker='o-', title='approx') pt.plt.gca().set_xlim(xmin, xmax) pt.plt.gca().set_ylim(ymin, ymax) pt.dark_background(doubleit=3) pt.gca().invert_yaxis() pt.figure(fnum=9003 + 1, pnum=(1, 3, 3)) _plotpts(uniform_ell_pts, 0, pt.RED, label='uniform points', marker='o-', title='uniform') pt.plt.gca().set_xlim(xmin, xmax) pt.plt.gca().set_ylim(ymin, ymax) pt.dark_background(doubleit=3) pt.gca().invert_yaxis() return locals()
def main_smk_debug(): """ CommandLine: python -m ibeis.algo.hots.smk.smk_debug --test-main_smk_debug Example: >>> from ibeis.algo.hots.smk.smk_debug import * # NOQA >>> main_smk_debug() """ print('+------------') print('SMK_DEBUG MAIN') print('+------------') from ibeis.algo.hots import pipeline ibs, annots_df, taids, daids, qaids, qreq_, nWords = testdata_dataframe() # Query using SMK #qaid = qaids[0] nWords = qreq_.qparams.nWords aggregate = qreq_.qparams.aggregate smk_alpha = qreq_.qparams.smk_alpha smk_thresh = qreq_.qparams.smk_thresh nAssign = qreq_.qparams.nAssign #aggregate = ibs.cfg.query_cfg.smk_cfg.aggregate #smk_alpha = ibs.cfg.query_cfg.smk_cfg.smk_alpha #smk_thresh = ibs.cfg.query_cfg.smk_cfg.smk_thresh print('+------------') print('SMK_DEBUG PARAMS') print('[smk_debug] aggregate = %r' % (aggregate, )) print('[smk_debug] smk_alpha = %r' % (smk_alpha, )) print('[smk_debug] smk_thresh = %r' % (smk_thresh, )) print('[smk_debug] nWords = %r' % (nWords, )) print('[smk_debug] nAssign = %r' % (nAssign, )) print('L------------') # Learn vocabulary #words = qreq_.words = smk_index.learn_visual_words(annots_df, taids, nWords) # Index a database of annotations #qreq_.invindex = smk_repr.index_data_annots(annots_df, daids, words, aggregate, smk_alpha, smk_thresh) qreq_.ibs = ibs # Smk Mach print('+------------') print('SMK_DEBUG MATCH KERNEL') print('+------------') qaid2_scores, qaid2_chipmatch_SMK = smk_match.execute_smk_L5(qreq_) SVER = ut.get_argflag('--sver') if SVER: print('+------------') print('SMK_DEBUG SVER? YES!') print('+------------') qaid2_chipmatch_SVER_ = pipeline.spatial_verification( qaid2_chipmatch_SMK, qreq_) qaid2_chipmatch = qaid2_chipmatch_SVER_ else: print('+------------') print('SMK_DEBUG SVER? NO') print('+------------') qaid2_chipmatch = qaid2_chipmatch_SMK print('+------------') print('SMK_DEBUG DISPLAY RESULT') print('+------------') cm_list = convert_smkmatch_to_chipmatch(qaid2_chipmatch, qaid2_scores) #filt2_meta = {} #qaid2_qres_ = pipeline.chipmatch_to_resdict(qaid2_chipmatch, filt2_meta, qreq_) qaid2_qres_ = pipeline.chipmatch_to_resdict(qreq_, cm_list) #qaid2_qres_ = pipeline.chipmatch_to_resdict(qaid2_chipmatch, filt2_meta, qreq_) for count, (qaid, qres) in enumerate(six.iteritems(qaid2_qres_)): print('+================') #qres = qaid2_qres_[qaid] qres.show_top(ibs, fnum=count) for aid in qres.aid2_score.keys(): smkscore = qaid2_scores[qaid][aid] sumscore = qres.aid2_score[aid] if not ut.almost_eq(smkscore, sumscore): print('scorediff aid=%r, smkscore=%r, sumscore=%r' % (aid, smkscore, sumscore)) scores = qaid2_scores[qaid] #print(scores) print(qres.get_inspect_str(ibs)) print('L================') #ut.embed() #print(qres.aid2_fs) #daid2_totalscore, cmtup_old = smk_index.query_inverted_index(annots_df, qaid, invindex) ## Pack into QueryResult #qaid2_chipmatch = {qaid: cmtup_old} #qaid2_qres_ = pipeline.chipmatch_to_resdict(qaid2_chipmatch, {}, qreq_) ## Show match #daid2_totalscore.sort(axis=1, ascending=False) #print(daid2_totalscore) #daid2_totalscore2, cmtup_old = query_inverted_index(annots_df, daids[0], invindex) #print(daid2_totalscore2) #display_info(ibs, invindex, annots_df) print('finished main') return locals()