def test_detect_then_show(ax, img_fpath): import pyhesaff kpts, vecs = pyhesaff.detect_feats(img_fpath) print('[test_detect_then_show]') print('img_fpath=%r' % img_fpath) print('kpts=%s' % (ut.truncate_str(repr(kpts)),)) print('vecs=%s' % (ut.truncate_str(repr(vecs)),)) assert len(kpts) == len(vecs) assert len(kpts) > 0, 'no keypoints were detected!' img = mpl.image.imread(img_fpath) plt.imshow(img) _xs, _ys = kpts.T[0:2] ax.plot(_xs, _ys, 'ro', alpha=.5)
def test_detect_then_show(ax, img_fpath): import pyhesaff kpts, vecs = pyhesaff.detect_feats(img_fpath) print('[test_detect_then_show]') print('img_fpath=%r' % img_fpath) print('kpts=%s' % (ut.truncate_str(repr(kpts)), )) print('vecs=%s' % (ut.truncate_str(repr(vecs)), )) assert len(kpts) == len(vecs) assert len(kpts) > 0, 'no keypoints were detected!' img = mpl.image.imread(img_fpath) plt.imshow(img) _xs, _ys = kpts.T[0:2] ax.plot(_xs, _ys, 'ro', alpha=.5)
def TIME_GEN_PREPROC_IMG(ibs): from ibeis.algo.preproc.preproc_image import add_images_params_gen print('[TIME_GEN_PREPROC_IMG]') gid_list = ibs.get_valid_gids() gpath_list = ibs.get_image_paths(gid_list) # STABILITY if not utool.get_argflag('--nostable'): # TEST 1 with utool.Timer('parallel chunksize=1'): output1 = list(add_images_params_gen(gpath_list, chunksize=1)) print(utool.truncate_str(str(output1), 80)) assert len(output1) == len(gpath_list), 'chuncksize changes output' # TEST 2 with utool.Timer('parallel chunksize=2'): output2 = list(add_images_params_gen(gpath_list, chunksize=2)) print(utool.truncate_str(str(output2), 80)) assert output1 == output2, 'chuncksize changes output' # TEST N with utool.Timer('parallel chunksize=None'): outputN = list(add_images_params_gen(gpath_list, chunksize=None)) print(utool.truncate_str(str(output2), 80)) assert outputN == output2, 'chuncksize changes output' # BENCHMARK setup = utool.unindent( ''' from ibeis.algo.preproc.preproc_image import add_images_params_gen genkw = dict(prog=False, verbose=True) gpath_list = %r ''' % (gpath_list,)) print(utool.truncate_str(str(gpath_list), 80)) print('Processing %d images' % (len(gpath_list),)) timeit3 = partial(timeit2, setup=setup, number=3) timeit3('list(add_images_params_gen(gpath_list, chunksize=None, **genkw))') timeit3('list(add_images_params_gen(gpath_list, chunksize=None, **genkw))') timeit3('list(add_images_params_gen(gpath_list, chunksize=1, **genkw))') timeit3('list(add_images_params_gen(gpath_list, chunksize=2, **genkw))') timeit3('list(add_images_params_gen(gpath_list, chunksize=4, **genkw))') timeit3('list(add_images_params_gen(gpath_list, chunksize=8, **genkw))') timeit3('list(add_images_params_gen(gpath_list, chunksize=16, **genkw))') timeit3('list(add_images_params_gen(gpath_list, chunksize=32, **genkw))') print('[/TIME_GEN_PREPROC_IMG]') return locals()
def TIME_GEN_PREPROC_IMG(ibs): from ibeis.algo.preproc.preproc_image import add_images_params_gen print('[TIME_GEN_PREPROC_IMG]') gid_list = ibs.get_valid_gids() gpath_list = ibs.get_image_paths(gid_list) # STABILITY if not utool.get_argflag('--nostable'): # TEST 1 with utool.Timer('parallel chunksize=1'): output1 = list(add_images_params_gen(gpath_list, chunksize=1)) print(utool.truncate_str(str(output1), 80)) assert len(output1) == len(gpath_list), 'chuncksize changes output' # TEST 2 with utool.Timer('parallel chunksize=2'): output2 = list(add_images_params_gen(gpath_list, chunksize=2)) print(utool.truncate_str(str(output2), 80)) assert output1 == output2, 'chuncksize changes output' # TEST N with utool.Timer('parallel chunksize=None'): outputN = list(add_images_params_gen(gpath_list, chunksize=None)) print(utool.truncate_str(str(output2), 80)) assert outputN == output2, 'chuncksize changes output' # BENCHMARK setup = utool.unindent(''' from ibeis.algo.preproc.preproc_image import add_images_params_gen genkw = dict(prog=False, verbose=True) gpath_list = %r ''' % (gpath_list, )) print(utool.truncate_str(str(gpath_list), 80)) print('Processing %d images' % (len(gpath_list), )) timeit3 = partial(timeit2, setup=setup, number=3) timeit3('list(add_images_params_gen(gpath_list, chunksize=None, **genkw))') timeit3('list(add_images_params_gen(gpath_list, chunksize=None, **genkw))') timeit3('list(add_images_params_gen(gpath_list, chunksize=1, **genkw))') timeit3('list(add_images_params_gen(gpath_list, chunksize=2, **genkw))') timeit3('list(add_images_params_gen(gpath_list, chunksize=4, **genkw))') timeit3('list(add_images_params_gen(gpath_list, chunksize=8, **genkw))') timeit3('list(add_images_params_gen(gpath_list, chunksize=16, **genkw))') timeit3('list(add_images_params_gen(gpath_list, chunksize=32, **genkw))') print('[/TIME_GEN_PREPROC_IMG]') return locals()
def test_pyflann_kmeans(): """ kmeans: (self, pts, num_clusters, max_iterations=None, dtype=None, **kwargs) Runs kmeans on pts with num_clusters centroids. Returns a numpy array of size num_clusters x dim. If max_iterations is not None, the algorithm terminates after the given number of iterations regardless of convergence. The default is to run until convergence. If dtype is None (the default), the array returned is the same type as pts. Otherwise, the returned array is of type dtype. CommandLine: python -m vtool.tests.test_pyflann --test-test_pyflann_kmeans Example: >>> # ENABLE_DOCTEST >>> from vtool.tests.test_pyflann import * # NOQA >>> # build test data >>> # execute function >>> result = test_pyflann_kmeans() >>> # verify results >>> print(result) """ print('Kmeans') flann = pyflann.FLANN() num_clusters = 7 pts = testdata_points(nPts=1009) kmeans_centroids = flann.kmeans(pts, num_clusters, max_iterations=None, dtype=None) print(utool.truncate_str(str(kmeans_centroids))) print('kmeans_centroids.shape = %r' % (kmeans_centroids.shape,)) target_shape = (num_clusters, pts.shape[1]) test_shape = kmeans_centroids.shape assert test_shape == target_shape, repr(test_shape) + ' != ' + repr(target_shape)
def __nice__(self): import utool as ut if self.short_header is None: header_str = ', '.join([ut.truncate_str(h, maxlen=15, truncmsg='~//~') for h in self.header]) else: header_str = ', '.join(self.short_header) return '(shape=%s: cols=%s)' % (self.shape, header_str,)
def add_annot_chunk(ibs_gt, ibs2, aids_chunk1, aid1_to_aid2): """ adds annotations to the tempoarary database and prevents duplicate additions. aids_chunk1 = aid_list1 Args: ibs_gt (IBEISController): ibs2 (IBEISController): aids_chunk1 (list): aid1_to_aid2 (dict): Returns: list: aids_chunk2 """ # Visual info guuids_chunk1 = ibs_gt.get_annot_image_uuids(aids_chunk1) verts_chunk1 = ibs_gt.get_annot_verts(aids_chunk1) thetas_chunk1 = ibs_gt.get_annot_thetas(aids_chunk1) # Non-name semantic info species_chunk1 = ibs_gt.get_annot_species_texts(aids_chunk1) gids_chunk2 = ibs2.get_image_gids_from_uuid(guuids_chunk1) ut.assert_all_not_None(aids_chunk1, 'aids_chunk1') ut.assert_all_not_None(guuids_chunk1, 'guuids_chunk1') try: ut.assert_all_not_None(gids_chunk2, 'gids_chunk2') except Exception as ex: #index = ut.get_first_None_position(gids_chunk2) #set(ibs2.get_valid_gids()).difference(set(gids_chunk2)) ut.printex(ex, keys=['gids_chunk2']) #ut.embed() #raise # Add this new unseen test case to the database aids_chunk2 = ibs2.add_annots(gids_chunk2, species_list=species_chunk1, vert_list=verts_chunk1, theta_list=thetas_chunk1, prevent_visual_duplicates=True) def register_annot_mapping(aids_chunk1, aids_chunk2, aid1_to_aid2): """ called by add_annot_chunk """ # Should be 1 to 1 for aid1, aid2 in zip(aids_chunk1, aids_chunk2): if aid1 in aid1_to_aid2: assert aid1_to_aid2[aid1] == aid2 else: aid1_to_aid2[aid1] = aid2 # Register the mapping from ibs_gt to ibs2 register_annot_mapping(aids_chunk1, aids_chunk2, aid1_to_aid2) print('Added: aids_chunk2=%s' % (ut.truncate_str(repr(aids_chunk2), maxlen=60), )) return aids_chunk2
def simple_iterative_test(): r""" CommandLine: python -m pyhesaff.tests.test_pyhesaff_simple_iterative --test-simple_iterative_test python -m pyhesaff.tests.test_pyhesaff_simple_iterative --test-simple_iterative_test --show Example: >>> # GUI_DOCTEST >>> from pyhesaff.tests.test_pyhesaff_simple_iterative import * # NOQA >>> result = simple_iterative_test() >>> print(result) >>> ut.show_if_requested() """ import pyhesaff fpath_list = [ ut.grab_test_imgpath('lena.png'), ut.grab_test_imgpath('carl.jpg'), ut.grab_test_imgpath('grace.jpg'), ut.grab_test_imgpath('ada.jpg'), ] kpts_list = [] for img_fpath in fpath_list: kpts, vecs = pyhesaff.detect_feats(img_fpath) print('img_fpath=%r' % img_fpath) print('kpts=%s' % (ut.truncate_str(repr(kpts)), )) print('vecs=%s' % (ut.truncate_str(repr(vecs)), )) assert len(kpts) == len(vecs) assert len(kpts) > 0, 'no keypoints were detected!' kpts_list.append(kpts) if ut.show_was_requested(): import matplotlib as mpl from matplotlib import pyplot as plt fig = plt.figure() for i, fpath, kpts in enumerate(zip(fpath_list, kpts_list), start=1): ax = fig.add_subplot(2, 2, i) img = mpl.image.imread(fpath) plt.imshow(img) _xs, _ys = kpts.T[0:2] ax.plot(_xs, _ys, 'ro', alpha=.5)
def add_annot_chunk(ibs_gt, ibs2, aids_chunk1, aid1_to_aid2): """ adds annotations to the tempoarary database and prevents duplicate additions. aids_chunk1 = aid_list1 Args: ibs_gt (IBEISController): ibs2 (IBEISController): aids_chunk1 (list): aid1_to_aid2 (dict): Returns: list: aids_chunk2 """ # Visual info guuids_chunk1 = ibs_gt.get_annot_image_uuids(aids_chunk1) verts_chunk1 = ibs_gt.get_annot_verts(aids_chunk1) thetas_chunk1 = ibs_gt.get_annot_thetas(aids_chunk1) # Non-name semantic info species_chunk1 = ibs_gt.get_annot_species_texts(aids_chunk1) gids_chunk2 = ibs2.get_image_gids_from_uuid(guuids_chunk1) ut.assert_all_not_None(aids_chunk1, 'aids_chunk1') ut.assert_all_not_None(guuids_chunk1, 'guuids_chunk1') try: ut.assert_all_not_None(gids_chunk2, 'gids_chunk2') except Exception as ex: #index = ut.get_first_None_position(gids_chunk2) #set(ibs2.get_valid_gids()).difference(set(gids_chunk2)) ut.printex(ex, keys=['gids_chunk2']) #ut.embed() #raise # Add this new unseen test case to the database aids_chunk2 = ibs2.add_annots(gids_chunk2, species_list=species_chunk1, vert_list=verts_chunk1, theta_list=thetas_chunk1, prevent_visual_duplicates=True) def register_annot_mapping(aids_chunk1, aids_chunk2, aid1_to_aid2): """ called by add_annot_chunk """ # Should be 1 to 1 for aid1, aid2 in zip(aids_chunk1, aids_chunk2): if aid1 in aid1_to_aid2: assert aid1_to_aid2[aid1] == aid2 else: aid1_to_aid2[aid1] = aid2 # Register the mapping from ibs_gt to ibs2 register_annot_mapping(aids_chunk1, aids_chunk2, aid1_to_aid2) print('Added: aids_chunk2=%s' % (ut.truncate_str(repr(aids_chunk2), maxlen=60),)) return aids_chunk2
def __nice__(self): import utool as ut if self.short_header is None: header_str = ', '.join([ ut.truncate_str(h, maxlen=15, truncmsg='~//~') for h in self.header ]) else: header_str = ', '.join(self.short_header) return '(shape=%s: cols=%s)' % ( self.shape, header_str, )
def test_pyflann_hkmeans(): """ hkmeans: Clusters the data by using multiple runs of kmeans to recursively partition the dataset. The number of resulting clusters is given by (branch_size-1)*num_branches+1. This method can be significantly faster when the number of desired clusters is quite large (e.g. a hundred or more). Higher branch sizes are slower but may give better results. If dtype is None (the default), the array returned is the same type as pts. Otherwise, the returned array is of type dtype. #>>> from vtool.tests.test_pyflann import * # NOQA #>>> test_pyflann_hkmeans() #doctest: +ELLIPSIS #HKmeans... CommandLine: python -m vtool.tests.test_pyflann --test-test_pyflann_hkmeans Example: >>> # ENABLE_DOCTEST >>> from vtool.tests.test_pyflann import * # NOQA >>> # build test data >>> # execute function >>> result = test_pyflann_hkmeans() >>> # verify results >>> print(result) """ # Test parameters flann = pyflann.FLANN() branch_size = 5 num_branches = 7 print('HKmeans') pts = testdata_points(nPts=1009) hkmean_centroids = flann.hierarchical_kmeans(pts, branch_size, num_branches, max_iterations=1000, dtype=None) print(utool.truncate_str(str(hkmean_centroids))) print('hkmean_centroids.shape = %r' % (hkmean_centroids.shape, )) nHKMeansCentroids = (branch_size - 1) * num_branches + 1 target_shape = (nHKMeansCentroids, pts.shape[1]) test_shape = hkmean_centroids.shape assert test_shape == target_shape, repr(test_shape) + ' != ' + repr( target_shape)
def test_pyflann_hkmeans(): """ hkmeans: Clusters the data by using multiple runs of kmeans to recursively partition the dataset. The number of resulting clusters is given by (branch_size-1)*num_branches+1. This method can be significantly faster when the number of desired clusters is quite large (e.g. a hundred or more). Higher branch sizes are slower but may give better results. If dtype is None (the default), the array returned is the same type as pts. Otherwise, the returned array is of type dtype. #>>> from vtool.tests.test_pyflann import * # NOQA #>>> test_pyflann_hkmeans() #doctest: +ELLIPSIS #HKmeans... CommandLine: python -m vtool.tests.test_pyflann --test-test_pyflann_hkmeans Example: >>> # ENABLE_DOCTEST >>> from vtool.tests.test_pyflann import * # NOQA >>> # build test data >>> # execute function >>> result = test_pyflann_hkmeans() >>> # verify results >>> print(result) """ # Test parameters flann = pyflann.FLANN() branch_size = 5 num_branches = 7 print('HKmeans') pts = testdata_points(nPts=1009) hkmean_centroids = flann.hierarchical_kmeans(pts, branch_size, num_branches, max_iterations=1000, dtype=None) print(utool.truncate_str(str(hkmean_centroids))) print('hkmean_centroids.shape = %r' % (hkmean_centroids.shape,)) nHKMeansCentroids = (branch_size - 1) * num_branches + 1 target_shape = (nHKMeansCentroids, pts.shape[1]) test_shape = hkmean_centroids.shape assert test_shape == target_shape, repr(test_shape) + ' != ' + repr(target_shape)
def test_pyflann_kmeans(): """ kmeans: (self, pts, num_clusters, max_iterations=None, dtype=None, **kwargs) Runs kmeans on pts with num_clusters centroids. Returns a numpy array of size num_clusters x dim. If max_iterations is not None, the algorithm terminates after the given number of iterations regardless of convergence. The default is to run until convergence. If dtype is None (the default), the array returned is the same type as pts. Otherwise, the returned array is of type dtype. CommandLine: python -m vtool.tests.test_pyflann --test-test_pyflann_kmeans Example: >>> # ENABLE_DOCTEST >>> from vtool.tests.test_pyflann import * # NOQA >>> # build test data >>> # execute function >>> result = test_pyflann_kmeans() >>> # verify results >>> print(result) """ print('Kmeans') flann = pyflann.FLANN() num_clusters = 7 pts = testdata_points(nPts=1009) kmeans_centroids = flann.kmeans(pts, num_clusters, max_iterations=None, dtype=None) print(utool.truncate_str(str(kmeans_centroids))) print('kmeans_centroids.shape = %r' % (kmeans_centroids.shape, )) target_shape = (num_clusters, pts.shape[1]) test_shape = kmeans_centroids.shape assert test_shape == target_shape, repr(test_shape) + ' != ' + repr( target_shape)
def translate_ibeis_webcall(func, *args, **kwargs): r""" Called from flask request context Args: func (function): live python function Returns: tuple: (output, True, 200, None, jQuery_callback) CommandLine: python -m ibeis.control.controller_inject --exec-translate_ibeis_webcall python -m ibeis.control.controller_inject --exec-translate_ibeis_webcall --domain http://52.33.105.88 Example: >>> # WEB_DOCTEST >>> from ibeis.control.controller_inject import * # NOQA >>> import ibeis >>> import time >>> import ibeis.web >>> web_ibs = ibeis.opendb_bg_web('testdb1', wait=1, start_job_queue=False) >>> aids = web_ibs.send_ibeis_request('/api/annot/', 'get') >>> uuid_list = web_ibs.send_ibeis_request('/api/annot/uuids/', aid_list=aids) >>> failrsp = web_ibs.send_ibeis_request('/api/annot/uuids/') >>> failrsp2 = web_ibs.send_ibeis_request('/api/query/chips/simple_dict//', 'get', qaid_list=[0], daid_list=[0]) >>> log_text = web_ibs.send_ibeis_request('/api/query/chips/simple_dict/', 'get', qaid_list=[0], daid_list=[0]) >>> time.sleep(.1) >>> print('\n---\nuuid_list = %r' % (uuid_list,)) >>> print('\n---\nfailrsp =\n%s' % (failrsp,)) >>> print('\n---\nfailrsp2 =\n%s' % (failrsp2,)) >>> print('Finished test') >>> web_ibs.terminate2() Ignore: app = get_flask_app() with app.app_context(): #ibs = ibeis.opendb('testdb1') func = ibs.get_annot_uuids args = tuple() kwargs = dict() """ assert len(args) == 0, 'There should not be any args=%r' % (args, ) # print('Calling: %r with args: %r and kwargs: %r' % (func, args, kwargs, )) ibs = flask.current_app.ibs funcstr = ut.func_str(func, (ibs, ) + args, kwargs=kwargs, truncate=True) print('[TRANSLATE] Calling: %s' % (funcstr, )) try: key_list = sorted(list(kwargs.keys())) type_list = [] message_list = [] for key in key_list: try: values = kwargs[key] type_ = type(values).__name__ if type_ == 'list': if len(values) == 0: type_ = 'empty list' message_ = '[]' else: value = values[0] type_ += ' of ' + type(value).__name__ length1 = len(values) try: length2 = len(set(values)) except TypeError: length2 = len(set(map(str, values))) length3 = min(length1, 3) mod = '...' if length1 != length3 else '' message_ = 'length %d with unique %d of %s%s' % ( length1, length2, values[:length3], mod, ) else: message_ = '%s' % (values, ) except: type_ = 'UNKNOWN' message_ = 'ERROR IN PARSING' type_list.append(type_) message_list.append(message_) zipped = list(zip(key_list, type_list, message_list)) if len(zipped) > 0: length1 = max(list(map(len, key_list))) length2 = max(list(map(len, type_list))) for key_, type_, message_ in zipped: key_ = key_.rjust(length1) type_ = type_.ljust(length2) try: print('[TRANSLATE] \t %s (%s) : %s' % ( key_, type_, message_, )) except UnicodeEncodeError: print('[TRANSLATE] \t %s (%s) : UNICODE ERROR') except: print('[TRANSLATE] ERROR IN KWARGS PARSING') try: # TODO, have better way to differentiate ibs funcs from other funcs output = func(**kwargs) except TypeError: try: output = func(ibs=ibs, **kwargs) except WebException: raise except Exception as ex2: # NOQA if MICROSOFT_API_ENABLED: if isinstance(ex2, TypeError) and 'required positional' in str(ex2): parameter = str(ex2).split(':')[1].strip().strip('\'') raise WebMissingInput('Missing required parameter', parameter) elif isinstance(ex2, WebException): raise else: raise WebRuntimeException( 'An unknown error has occurred, please contact the API administrator at [email protected].' ) else: msg_list = [] # msg_list.append('Error in translate_ibeis_webcall') msg_list.append('Expected Function Definition: ' + ut.func_defsig(func)) msg_list.append('Received Function Definition: %s' % (funcstr, )) msg_list.append('Received Function Parameters:') for key in kwargs: value = kwargs[key] value_str = '%r' % (value, ) value_str = ut.truncate_str(value_str, maxlen=256) msg_list.append('\t%r: %s' % ( key, value_str, )) # msg_list.append('\targs = %r' % (args,)) # msg_list.append('flask.request.args = %r' % (flask.request.args,)) # msg_list.append('flask.request.form = %r' % (flask.request.form,)) msg_list.append('%s: %s' % ( type(ex2).__name__, ex2, )) if WEB_DEBUG_INCLUDE_TRACE: trace = str(traceback.format_exc()) msg_list.append(trace) msg = '\n'.join(msg_list) print(msg) # error_msg = ut.formatex(ex2, msg, tb=True) # print(error_msg) # error_msg = ut.strip_ansi(error_msg) # raise Exception(error_msg) raise Exception(msg) #raise resp_tup = (output, True, 200, None) return resp_tup
def export(ibs, aid_pairs=None): """ 3 - 4 different animals 2 views of each matching keypoint coordinates on each annotation """ if aid_pairs is None: if ibs.get_dbname() == 'PZ_MOTHERS': aid_pair_list = MOTHERS_VIEWPOINT_EXPORT_PAIRS if ibs.get_dbname() == 'GZ_ALL': aid_pair_list = GZ_VIEWPOINT_EXPORT_PAIRS ibs.update_query_cfg(ratio_thresh=1.6) export_path = expanduser('~/Dropbox/Assignments/dataset') #utool.view_directory(export_path) # MOTHERS EG: for aid_pair in aid_pair_list: cm_list, qreq_ = ibs.query_chips(aid_pair, aid_pair) #ibeis.viz.show_qres(ibs, qaid2_qres.values()[1]); df2.iup() mrids_list = [] mkpts_list = [] for cm in cm_list: qaid = cm.qaid print('Getting kpts from %r' % qaid) #cm.show_top(ibs) posrid_list = utool.ensure_iterable(cm.get_classified_pos()) mrids_list.extend([(qaid, posrid) for posrid in posrid_list]) mkpts_list.extend(cm.get_matching_keypoints(ibs, posrid_list)) mkey2_kpts = {} for mrids_tup, mkpts_tup in zip(mrids_list, mkpts_list): assert len(mrids_tup) == 2, 'must be a match tuple' mrids_ = np.array(mrids_tup) sortx = mrids_.argsort() mrids_ = mrids_[sortx] mkpts_ = np.array(mkpts_tup)[sortx] if sortx[0] == 0: pass mkey = tuple(mrids_.tolist()) try: kpts_list = mkey2_kpts[mkey] print('append to mkey=%r' % (mkey, )) except KeyError: print('new mkey=%r' % (mkey, )) kpts_list = [] kpts_list.append(mkpts_) mkey2_kpts[mkey] = kpts_list mkeys_list = mkey2_kpts.keys() mkeys_keypoints = mkey2_kpts.values() for mkeys, mkpts_list in zip(mkeys_list, mkeys_keypoints): print(mkeys) print(len(kpts_list)) kpts1_m = np.vstack([mkpts[0] for mkpts in mkpts_list]) kpts2_m = np.vstack([mkpts[1] for mkpts in mkpts_list]) match_lines = [ repr(( tuple(kp1[ktool.LOC_DIMS].tolist()), tuple(kp2[ktool.LOC_DIMS].tolist()), )) + ', ' for kp1, kp2 in zip(kpts1_m, kpts2_m) ] mcpaths_list = ibs.get_annot_chip_fpath(mkeys) fnames_list = list(map(lambda x: split(x)[1], mcpaths_list)) for path in mcpaths_list: utool.copy(path, export_path) header_lines = [ '# Exported keypoint matches (might be duplicates matches)', '# matching_aids = %r' % (mkey, ) ] header_lines += [ '# img%d = %r' % (count, fname) for count, fname in enumerate(fnames_list) ] header_lines += [ '# LINE FORMAT: match_pts = [(img1_xy, img2_xy) ... ]' ] header_text = '\n'.join(header_lines) match_text = '\n'.join(['match_pts = ['] + match_lines + [']']) matchfile_text = '\n'.join([header_text, match_text]) matchfile_name = ('match_aids(%d,%d).txt' % mkey) matchfile_path = join(export_path, matchfile_name) utool.write_to(matchfile_path, matchfile_text) print(header_text) print(utool.truncate_str(match_text, maxlen=500))
def TIME_GEN_PREPROC_FEAT(ibs): print('[TIME_GEN_PREPROC_FEAT]') from ibeis.algo.preproc.preproc_feat import extract_hesaff_sift_feats from six.moves import zip import numpy as np def _listeq(x1, x2): if isinstance(x1, np.ndarray): return np.all(x2 == x2) return x1 == x2 aid_list = ibs.get_valid_aids() cid_list = ibs.get_annot_chip_rowids(aid_list) cfpath_list = ibs.get_chip_fpath(cid_list) # STABILITY if not utool.get_argflag('--nostable'): # TEST 1 with utool.Timer('parallel chunksize=1'): output1 = list(extract_hesaff_sift_feats(cfpath_list, chunksize=1)) print(utool.truncate_str(str(output1), 80)) # TEST 2 with utool.Timer('parallel chunksize=2'): output2 = list(extract_hesaff_sift_feats(cfpath_list, chunksize=2)) print(utool.truncate_str(str(output2), 80)) assert all([_listeq(*xtup) for tup in zip(output1, output2) for xtup in zip(*tup)]), 'chuncksize changes output' # TEST N with utool.Timer('parallel chunksize=None'): outputN = list(extract_hesaff_sift_feats(cfpath_list, chunksize=None)) print(utool.truncate_str(str(output2), 80)) assert all([_listeq(*xtup) for tup in zip(outputN, output2) for xtup in zip(*tup)]), 'chuncksize changes output' # BENCHMARK setup = utool.unindent( ''' from ibeis.algo.preproc.preproc_feat import extract_hesaff_sift_feats genkw = dict(prog=False, verbose=True) cfpath_list = %r ''' % (cfpath_list,)) print(utool.truncate_str(str(cid_list), 80)) print('Processing %d chips' % (len(cid_list),)) timeit3 = partial(timeit2, setup=setup, number=1) timeit3('list(extract_hesaff_sift_feats(cfpath_list, chunksize=None, **genkw))') timeit3('list(extract_hesaff_sift_feats(cfpath_list, chunksize=None, **genkw))') timeit3('list(extract_hesaff_sift_feats(cfpath_list, chunksize=1, **genkw))') timeit3('list(extract_hesaff_sift_feats(cfpath_list, chunksize=2, **genkw))') timeit3('list(extract_hesaff_sift_feats(cfpath_list, chunksize=4, **genkw))') timeit3('list(extract_hesaff_sift_feats(cfpath_list, chunksize=8, **genkw))') timeit3('list(extract_hesaff_sift_feats(cfpath_list, chunksize=16, **genkw))') timeit3('list(extract_hesaff_sift_feats(cfpath_list, chunksize=32, **genkw))') timeit3('list(extract_hesaff_sift_feats(cfpath_list, chunksize=64, **genkw))') #list(extract_hesaff_sift_feats(cfpath_list, chunksize=None, **genkw)) #[parallel] initializing pool with 7 processes #[parallel] executing 1049 gen_feat_worker tasks using 7 processes with chunksize=21 # * timed: 125.17100650510471 seconds #---------- #list(extract_hesaff_sift_feats(cfpath_list, chunksize=None, **genkw)) #[parallel] executing 1049 gen_feat_worker tasks using 7 processes with chunksize=21 # * timed: 97.37531812573734 seconds #---------- #list(extract_hesaff_sift_feats(cfpath_list, chunksize=1, **genkw)) #[parallel] executing 1049 gen_feat_worker tasks using 7 processes with chunksize=1 # * timed: 89.11060989484363 seconds #---------- #list(extract_hesaff_sift_feats(cfpath_list, chunksize=2, **genkw)) #[parallel] executing 1049 gen_feat_worker tasks using 7 processes with chunksize=2 # * timed: 89.3294122591355 seconds #---------- #list(extract_hesaff_sift_feats(cfpath_list, chunksize=4, **genkw)) #[parallel] executing 1049 gen_feat_worker tasks using 7 processes with chunksize=4 # * timed: 114.7752637914524 seconds #---------- #list(extract_hesaff_sift_feats(cfpath_list, chunksize=8, **genkw)) #[parallel] executing 1049 gen_feat_worker tasks using 7 processes with chunksize=8 # * timed: 123.35112345890252 seconds #---------- #list(extract_hesaff_sift_feats(cfpath_list, chunksize=16, **genkw)) #[parallel] executing 1049 gen_feat_worker tasks using 7 processes with chunksize=16 # * timed: 124.47361485097099 seconds #---------- #list(extract_hesaff_sift_feats(cfpath_list, chunksize=32, **genkw)) #[parallel] executing 1049 gen_feat_worker tasks using 7 processes with chunksize=32 # * timed: 126.47238857719219 seconds #---------- #list(extract_hesaff_sift_feats(cfpath_list, chunksize=64, **genkw)) #[parallel] executing 1049 gen_feat_worker tasks using 7 processes with chunksize=64 # * timed: 137.3404114996564 seconds print('[/TIME_GEN_PREPROC_FEAT]') return locals()
def TIME_GEN_PREPROC_FEAT(ibs): print('[TIME_GEN_PREPROC_FEAT]') from ibeis.algo.preproc.preproc_feat import extract_hesaff_sift_feats from six.moves import zip import numpy as np def _listeq(x1, x2): if isinstance(x1, np.ndarray): return np.all(x2 == x2) return x1 == x2 aid_list = ibs.get_valid_aids() cid_list = ibs.get_annot_chip_rowids(aid_list) cfpath_list = ibs.get_chip_fpath(cid_list) # STABILITY if not utool.get_argflag('--nostable'): # TEST 1 with utool.Timer('parallel chunksize=1'): output1 = list(extract_hesaff_sift_feats(cfpath_list, chunksize=1)) print(utool.truncate_str(str(output1), 80)) # TEST 2 with utool.Timer('parallel chunksize=2'): output2 = list(extract_hesaff_sift_feats(cfpath_list, chunksize=2)) print(utool.truncate_str(str(output2), 80)) assert all([ _listeq(*xtup) for tup in zip(output1, output2) for xtup in zip(*tup) ]), 'chuncksize changes output' # TEST N with utool.Timer('parallel chunksize=None'): outputN = list( extract_hesaff_sift_feats(cfpath_list, chunksize=None)) print(utool.truncate_str(str(output2), 80)) assert all([ _listeq(*xtup) for tup in zip(outputN, output2) for xtup in zip(*tup) ]), 'chuncksize changes output' # BENCHMARK setup = utool.unindent(''' from ibeis.algo.preproc.preproc_feat import extract_hesaff_sift_feats genkw = dict(prog=False, verbose=True) cfpath_list = %r ''' % (cfpath_list, )) print(utool.truncate_str(str(cid_list), 80)) print('Processing %d chips' % (len(cid_list), )) timeit3 = partial(timeit2, setup=setup, number=1) timeit3( 'list(extract_hesaff_sift_feats(cfpath_list, chunksize=None, **genkw))' ) timeit3( 'list(extract_hesaff_sift_feats(cfpath_list, chunksize=None, **genkw))' ) timeit3( 'list(extract_hesaff_sift_feats(cfpath_list, chunksize=1, **genkw))') timeit3( 'list(extract_hesaff_sift_feats(cfpath_list, chunksize=2, **genkw))') timeit3( 'list(extract_hesaff_sift_feats(cfpath_list, chunksize=4, **genkw))') timeit3( 'list(extract_hesaff_sift_feats(cfpath_list, chunksize=8, **genkw))') timeit3( 'list(extract_hesaff_sift_feats(cfpath_list, chunksize=16, **genkw))') timeit3( 'list(extract_hesaff_sift_feats(cfpath_list, chunksize=32, **genkw))') timeit3( 'list(extract_hesaff_sift_feats(cfpath_list, chunksize=64, **genkw))') #list(extract_hesaff_sift_feats(cfpath_list, chunksize=None, **genkw)) #[parallel] initializing pool with 7 processes #[parallel] executing 1049 gen_feat_worker tasks using 7 processes with chunksize=21 # * timed: 125.17100650510471 seconds #---------- #list(extract_hesaff_sift_feats(cfpath_list, chunksize=None, **genkw)) #[parallel] executing 1049 gen_feat_worker tasks using 7 processes with chunksize=21 # * timed: 97.37531812573734 seconds #---------- #list(extract_hesaff_sift_feats(cfpath_list, chunksize=1, **genkw)) #[parallel] executing 1049 gen_feat_worker tasks using 7 processes with chunksize=1 # * timed: 89.11060989484363 seconds #---------- #list(extract_hesaff_sift_feats(cfpath_list, chunksize=2, **genkw)) #[parallel] executing 1049 gen_feat_worker tasks using 7 processes with chunksize=2 # * timed: 89.3294122591355 seconds #---------- #list(extract_hesaff_sift_feats(cfpath_list, chunksize=4, **genkw)) #[parallel] executing 1049 gen_feat_worker tasks using 7 processes with chunksize=4 # * timed: 114.7752637914524 seconds #---------- #list(extract_hesaff_sift_feats(cfpath_list, chunksize=8, **genkw)) #[parallel] executing 1049 gen_feat_worker tasks using 7 processes with chunksize=8 # * timed: 123.35112345890252 seconds #---------- #list(extract_hesaff_sift_feats(cfpath_list, chunksize=16, **genkw)) #[parallel] executing 1049 gen_feat_worker tasks using 7 processes with chunksize=16 # * timed: 124.47361485097099 seconds #---------- #list(extract_hesaff_sift_feats(cfpath_list, chunksize=32, **genkw)) #[parallel] executing 1049 gen_feat_worker tasks using 7 processes with chunksize=32 # * timed: 126.47238857719219 seconds #---------- #list(extract_hesaff_sift_feats(cfpath_list, chunksize=64, **genkw)) #[parallel] executing 1049 gen_feat_worker tasks using 7 processes with chunksize=64 # * timed: 137.3404114996564 seconds print('[/TIME_GEN_PREPROC_FEAT]') return locals()
def translate_wbia_webcall(func, *args, **kwargs): r""" Called from flask request context Args: func (function): live python function Returns: tuple: (output, True, 200, None, jQuery_callback) Example: >>> # xdoctest: +REQUIRES(--web-tests) >>> from wbia.control.controller_inject import * # NOQA >>> import wbia >>> with wbia.opendb_with_web('testdb1') as (ibs, client): ... aids = client.get('/api/annot/').json ... failrsp = client.post('/api/annot/uuids/') ... failrsp2 = client.get('/api/query/chips/simple_dict//', data={'qaid_list': [0], 'daid_list': [0]}) ... log_text = client.get('/api/query/chips/simple_dict/', data={'qaid_list': [0], 'daid_list': [0]}) >>> print('\n---\nfailrsp =\n%s' % (failrsp.data,)) >>> print('\n---\nfailrsp2 =\n%s' % (failrsp2.data,)) >>> print('Finished test') Finished test """ assert len(args) == 0, 'There should not be any args=%r' % (args,) # logger.info('Calling: %r with args: %r and kwargs: %r' % (func, args, kwargs, )) ibs = flask.current_app.ibs funcstr = ut.func_str(func, (ibs,) + args, kwargs=kwargs, truncate=True) if 'heartbeat' in funcstr: pass elif 'metrics' in funcstr: pass else: logger.info('[TRANSLATE] Calling: %s' % (funcstr,)) try: key_list = sorted(list(kwargs.keys())) type_list = [] message_list = [] for key in key_list: try: values = kwargs[key] type_ = type(values).__name__ if type_ == 'list': if len(values) == 0: type_ = 'empty list' message_ = '[]' else: value = values[0] type_ += ' of ' + type(value).__name__ length1 = len(values) try: length2 = len(set(values)) except TypeError: length2 = len(set(map(str, values))) length3 = min(length1, 3) mod = '...' if length1 != length3 else '' message_ = 'length %d with unique %d of %s%s' % ( length1, length2, values[:length3], mod, ) else: message_ = '%s' % (values,) except Exception: type_ = 'UNKNOWN' message_ = 'ERROR IN PARSING' type_list.append(type_) message_list.append(message_) zipped = list(zip(key_list, type_list, message_list)) if len(zipped) > 0: length1 = max(list(map(len, key_list))) length2 = max(list(map(len, type_list))) for key_, type_, message_ in zipped: key_ = key_.rjust(length1) type_ = type_.ljust(length2) try: logger.info('[TRANSLATE] \t %s (%s) : %s' % (key_, type_, message_)) except UnicodeEncodeError: logger.info('[TRANSLATE] \t %s (%s) : UNICODE ERROR') except Exception: logger.info('[TRANSLATE] ERROR IN KWARGS PARSING') try: # TODO, have better way to differentiate ibs funcs from other funcs output = func(**kwargs) except TypeError: try: output = func(ibs, **kwargs) except WebException: raise except Exception as ex2: # NOQA if MICROSOFT_API_ENABLED: if isinstance(ex2, TypeError) and 'required positional' in str(ex2): parameter = str(ex2).split(':')[1].strip().strip("'") raise WebMissingInput('Missing required parameter', parameter) elif isinstance(ex2, WebException): raise else: raise WebRuntimeException( 'An unknown error has occurred, please contact the API administrator at [email protected].' ) else: msg_list = [] # msg_list.append('Error in translate_wbia_webcall') msg_list.append('Expected Function Definition: ' + ut.func_defsig(func)) msg_list.append('Received Function Definition: %s' % (funcstr,)) msg_list.append('Received Function Parameters:') for key in kwargs: value = kwargs[key] value_str = '%r' % (value,) value_str = ut.truncate_str(value_str, maxlen=256) msg_list.append('\t%r: %s' % (key, value_str)) # msg_list.append('\targs = %r' % (args,)) # msg_list.append('flask.request.args = %r' % (flask.request.args,)) # msg_list.append('flask.request.form = %r' % (flask.request.form,)) msg_list.append('%s: %s' % (type(ex2).__name__, ex2)) if WEB_DEBUG_INCLUDE_TRACE: trace = str(traceback.format_exc()) msg_list.append(trace) msg = '\n'.join(msg_list) logger.info(msg) # error_msg = ut.formatex(ex2, msg, tb=True) # logger.info(error_msg) # error_msg = ut.strip_ansi(error_msg) # raise Exception(error_msg) raise Exception(msg) # raise resp_tup = (output, True, 200, None) return resp_tup
def export(ibs, aid_pairs=None): """ 3 - 4 different animals 2 views of each matching keypoint coordinates on each annotation """ if aid_pairs is None: if ibs.get_dbname() == 'PZ_MOTHERS': aid_pair_list = MOTHERS_VIEWPOINT_EXPORT_PAIRS if ibs.get_dbname() == 'GZ_ALL': aid_pair_list = GZ_VIEWPOINT_EXPORT_PAIRS ibs.update_query_cfg(ratio_thresh=1.6) export_path = expanduser('~/Dropbox/Assignments/dataset') #utool.view_directory(export_path) # MOTHERS EG: for aid_pair in aid_pair_list: qaid2_qres = ibs.query_intra_encounter(aid_pair) #ibeis.viz.show_qres(ibs, qaid2_qres.values()[1]); df2.iup() mrids_list = [] mkpts_list = [] for qaid, qres in six.iteritems(qaid2_qres): print('Getting kpts from %r' % qaid) #qres.show_top(ibs) posrid_list = utool.ensure_iterable(qres.get_classified_pos()) mrids_list.extend([(qaid, posrid) for posrid in posrid_list]) mkpts_list.extend(qres.get_matching_keypoints(ibs, posrid_list)) mkey2_kpts = {} for mrids_tup, mkpts_tup in zip(mrids_list, mkpts_list): assert len(mrids_tup) == 2, 'must be a match tuple' mrids_ = np.array(mrids_tup) sortx = mrids_.argsort() mrids_ = mrids_[sortx] mkpts_ = np.array(mkpts_tup)[sortx] if sortx[0] == 0: pass mkey = tuple(mrids_.tolist()) try: kpts_list = mkey2_kpts[mkey] print('append to mkey=%r' % (mkey,)) except KeyError: print('new mkey=%r' % (mkey,)) kpts_list = [] kpts_list.append(mkpts_) mkey2_kpts[mkey] = kpts_list mkeys_list = mkey2_kpts.keys() mkeys_keypoints = mkey2_kpts.values() for mkeys, mkpts_list in zip(mkeys_list, mkeys_keypoints): print(mkeys) print(len(kpts_list)) kpts1_m = np.vstack([mkpts[0] for mkpts in mkpts_list]) kpts2_m = np.vstack([mkpts[1] for mkpts in mkpts_list]) match_lines = [ repr( ( tuple(kp1[ktool.LOC_DIMS].tolist()), tuple(kp2[ktool.LOC_DIMS].tolist()), ) ) + ', ' for kp1, kp2 in zip(kpts1_m, kpts2_m)] mcpaths_list = ibs.get_annot_cpaths(mkeys) fnames_list = map(lambda x: split(x)[1], mcpaths_list) for path in mcpaths_list: utool.copy(path, export_path) header_lines = ['# Exported keypoint matches (might be duplicates matches)', '# matching_aids = %r' % (mkey,)] header_lines += ['# img%d = %r' % (count, fname) for count, fname in enumerate(fnames_list)] header_lines += ['# LINE FORMAT: match_pts = [(img1_xy, img2_xy) ... ]'] header_text = '\n'.join(header_lines) match_text = '\n'.join(['match_pts = ['] + match_lines + [']']) matchfile_text = '\n'.join([header_text, match_text]) matchfile_name = ('match_aids(%d,%d).txt' % mkey) matchfile_path = join(export_path, matchfile_name) utool.write_to(matchfile_path, matchfile_text) print(header_text) print(utool.truncate_str(match_text, maxlen=500))
def get_item_resource(): """ from zotero_helpers import * """ #item_list = zotero.search('Distinctive Image Features from Scale-Invariant Keypoints') #item_list = zotero.search('lowe_distinctive_2004') zotero_fpath = get_zotero_path() from os.path import join # FIND THE BIBTEX ITEMID import sqlite3 bibsql = join(zotero_fpath, 'betterbibtex.sqlite') con = sqlite3.connect(bibsql) cur = con.cursor() # ut.util_sqlite.get_tablenames(cur) #ut.util_sqlite.print_database_structure(cur) itemID = ut.util_sqlite.get_table_rows(cur, 'keys', 'itemID', where='citekey=?', params='lowe_distinctive_2004') con.close() ############### zotero = get_libzotero() item = zotero.index[itemID] cur = zotero.cur # NOQA zotero.index[1434].title # ENTIRE DATABASE INFO ut.print_database_structure(cur) # FIND WHERE ATTACHMENT EXITS for tablename in ut.get_tablenames(cur): try: x = ut.get_table_csv(cur, tablename).find('ijcv04.pdf') except Exception as ex: continue if x != -1: print(tablename) print(x) tablename = 'itemDataValues' print(ut.truncate_str(ut.get_table_csv(cur, tablename), maxlen=5000)) tablename = 'itemDataValues' column_list = ut.get_table_columns(cur, tablename) import six for column in column_list: for rowx, row in enumerate(column): if isinstance(row, six.string_types): if row.find('ijcv04.pdf') > -1: print(rowx) print(row) valueID = column_list[0][3003] value = column_list[1][3003] ut.util_sqlite.get_table_rows(cur, 'itemData', None, where='valueID=?', params=valueID, unpack=False) ### #ut.rrrr() tablename = 'itemAttachments' colnames = tuple(ut.get_table_columnname_list(cur, tablename)) print(ut.get_table_csv(cur, tablename, ['path'])) _row_list = ut.get_table_rows(cur, tablename, 'itemID', unpack=True) ut.get_table_rows(cur, tablename, colnames, unpack=False) ut.get_table_num_rows(cur, tablename) itemID = ut.util_sqlite.get_table_rows(cur, tablename, colnames, where='itemID=?', params=itemID, unpack=False)