def _get_qx2_besrank_iterative(ibs, qreq, nTotalQueries, nPrevQueries, cfglbl=''): # TODO: INCORPORATE MINIBATCH SIZE TO MATCH_CHIPS3 AND DEPRICATE THIS print('[harn] querying one query at a time') # Make progress message msg = textwrap.dedent(''' --------------------- [harn] TEST %d/%d ''' + cfglbl + ''' ---------------------''') qx2_bestranks = [] qaids = qreq.qaids # Query one ANNOTATION at a time mark_prog = utool.simple_progres_func(TESTRES_VERBOSITY, msg, '.') # Query Chip / Row Loop for qx, qaid in enumerate(qaids): mark_prog(qx + nPrevQueries, nTotalQueries) try: qreq.qaids = [qaid] # hacky qaid2_qres = mc3.process_query_request(ibs, qreq, safe=False) except mf.QueryException as ex: utool.printex(ex, 'Harness caught Query Exception') qx2_bestranks.append([-1]) if not STRICT: continue raise try: assert len(qaid2_qres) == 1, '' except AssertionError as ex: utool.printex(ex, key_list=['qaid2_qres']) raise # record the best rank from this groundtruth best_rank = qaid2_qres[qaid].get_best_gt_rank(ibs) qx2_bestranks.append([best_rank]) qreq.qaids = qaids # fix previous hack return qx2_bestranks
def ensure_text(fname, text, repo_dpath='.', force=None, locals_={}, chmod=None): """ Args: fname (str): file name text (str): repo_dpath (str): directory path string(default = '.') force (bool): (default = False) locals_ (dict): (default = {}) Example: >>> # DISABLE_DOCTEST >>> from utool.util_git import * # NOQA >>> import utool as ut >>> result = setup_repo() >>> print(result) """ import utool as ut ut.colorprint('Ensuring fname=%r' % (fname), 'yellow') if force is None and ut.get_argflag('--force-%s' % (fname,)): force = True fpath = join(repo_dpath, fname) if force or not ut.checkpath(fpath, verbose=2, n=5): text_ = ut.remove_codeblock_syntax_sentinals(text) fmtkw = locals_.copy() fmtkw['fname'] = fname text_ = text_.format(**fmtkw) + '\n' ut.writeto(fpath, text_) try: if chmod: ut.chmod(fpath, chmod) except Exception as ex: ut.printex(ex, iswarning=True)
def set_process_title(title): try: import setproctitle setproctitle.setproctitle(title) except ImportError as ex: import utool utool.printex(ex, iswarning=True)
def event(self, event): #print(event) #print(event.type()) #print(ut.invert_dict(dict(QtCore.QEvent.__dict__))[event.type()]) #print(event.spontaneous()) #print(event.isAccepted()) result = QtGui.QMessageBox.event(self, event) #print(event.isAccepted()) #print('----') #if event != QtCore.QEvent.DeferredDelete: try: self.setMinimumHeight(0) self.setMaximumHeight(16777215) self.setMinimumWidth(0) self.setMaximumWidth(16777215) self.setSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding) textEdit = self.findChild(QtGui.QTextEdit) if textEdit is not None: textEdit.setMinimumHeight(0) textEdit.setMaximumHeight(16777215) textEdit.setMinimumWidth(0) textEdit.setMaximumWidth(16777215) textEdit.setSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding) except RuntimeError as ex: if ut.VERBOSE: msg = 'Closing seems to cause C++ errors. Unsure how to fix properly.' ut.printex(ex, msg, iswarning=True, keys=['event', 'event.type()']) return result
def get_obj(depc, tablename, root_rowids, config=None, ensure=True): """ Convinience function. Gets data in `tablename` as a list of objects. """ try: if tablename == depc.root: obj_list = [depc._root_asobject(rowid) for rowid in root_rowids] else: def make_property_getter(rowid, colname): def wrapper(): return depc.get_property( tablename, rowid, colnames=colname, config=config, ensure=ensure) return wrapper colnames = depc[tablename].data_colnames obj_list = [ ut.LazyDict({colname: make_property_getter(rowid, colname) for colname in colnames}) for rowid in root_rowids ] return obj_list # data_list = depc.get_property(tablename, root_rowids, config) # # TODO: lazy dict # return [dict(zip(colnames, data)) for data in data_list] except Exception as ex: ut.printex(ex, 'error in getobj', keys=['tablename', 'root_rowids', 'colnames']) raise
def aggregate_descriptors(ibs, aid_list): """ Aggregates descriptors with inverted information Return agg_index to(2) -> desc (descriptor) aid (annotation rowid) fx (feature index w.r.t. aid) </CYTH> """ print('[agg_desc] stacking descriptors from %d annotations' % len(aid_list)) desc_list = ibs.get_annot_desc(aid_list) # Build inverted index of (aid, fx) pairs aid_nFeat_iter = izip(aid_list, imap(len, desc_list)) nFeat_iter = imap(len, desc_list) # generate aid inverted index for each feature in each annotation _ax2_aid = ([aid] * nFeat for (aid, nFeat) in aid_nFeat_iter) # generate featx inverted index for each feature in each annotation _ax2_fx = (xrange(nFeat) for nFeat in nFeat_iter) # Flatten generators into the inverted index dx2_aid = np.array(list(chain.from_iterable(_ax2_aid))) dx2_fx = np.array(list(chain.from_iterable(_ax2_fx))) try: # Stack descriptors into numpy array corresponding to inverted inexed dx2_desc = np.vstack(desc_list) print('[agg_desc] stacked %d descriptors from %d annotations' % (len(dx2_desc), len(aid_list))) except MemoryError as ex: utool.printex(ex, 'cannot build inverted index', '[!memerror]') raise return dx2_desc, dx2_aid, dx2_fx
def all_figures_bring_to_front(): try: all_figures = get_all_figures() for fig in iter(all_figures): bring_to_front(fig) except Exception as ex: ut.printex(ex)
def build_flann_inverted_index(ibs, aid_list): """ Build a inverted index (using FLANN) </CYTH> """ try: if len(aid_list) == 0: msg = ('len(aid_list) == 0\n' 'Cannot build inverted index without features!') raise AssertionError(msg) dx2_desc, dx2_aid, dx2_fx = aggregate_descriptors(ibs, aid_list) except Exception as ex: intostr = ibs.get_infostr() # NOQA dbname = ibs.get_dbname() # NOQA num_images = ibs.get_num_images() # NOQA num_annotations = ibs.get_num_annotations() # NOQA num_names = ibs.get_num_names() # NOQA utool.printex(ex, '', 'cannot build inverted index', locals().keys()) raise # Build/Load the flann index flann_cfgstr = get_flann_cfgstr(ibs, aid_list) flann_params = {'algorithm': 'kdtree', 'trees': 4} precomp_kwargs = {'cache_dir': ibs.get_flann_cachedir(), 'cfgstr': flann_cfgstr, 'flann_params': flann_params, 'force_recompute': NOCACHE_FLANN} flann = nntool.flann_cache(dx2_desc, **precomp_kwargs) return dx2_desc, dx2_aid, dx2_fx, flann
def add_annot_chips(ibs, aid_list, qreq_=None): """ FIXME: This is a dirty dirty function Adds chip data to the ANNOTATION. (does not create ANNOTATIONs. first use add_annots and then pass them here to ensure chips are computed) """ # Ensure must be false, otherwise an infinite loop occurs from ibeis.model.preproc import preproc_chip cid_list = ibs.get_annot_chip_rowids(aid_list, ensure=False) dirty_aids = ut.get_dirty_items(aid_list, cid_list) if len(dirty_aids) > 0: if ut.VERBOSE: print('[ibs] adding chips') try: # FIXME: Cant be lazy until chip config / delete issue is fixed preproc_chip.compute_and_write_chips(ibs, aid_list) #preproc_chip.compute_and_write_chips_lazy(ibs, aid_list) params_iter = preproc_chip.add_annot_chips_params_gen(ibs, dirty_aids) except AssertionError as ex: ut.printex(ex, '[!ibs.add_annot_chips]') print('[!ibs.add_annot_chips] ' + ut.list_dbgstr('aid_list')) raise colnames = (ANNOT_ROWID, 'config_rowid', 'chip_uri', 'chip_width', 'chip_height',) get_rowid_from_superkey = functools.partial(ibs.get_annot_chip_rowids, ensure=False, qreq_=qreq_) cid_list = ibs.dbcache.add_cleanly(const.CHIP_TABLE, colnames, params_iter, get_rowid_from_superkey) return cid_list
def _fix_agraph_color(data): try: orig_color = data.get('color', None) alpha = data.get('alpha', None) color = orig_color if color is None and alpha is not None: color = [0, 0, 0] if color is not None: color = pt.ensure_nonhex_color(color) #if isinstance(color, np.ndarray): # color = color.tolist() color = list(color_funcs.ensure_base255(color)) if alpha is not None: if len(color) == 3: color += [int(alpha * 255)] else: color[3] = int(alpha * 255) color = tuple(color) if len(color) == 3: data['color'] = '#%02x%02x%02x' % color else: data['color'] = '#%02x%02x%02x%02x' % color except Exception as ex: import utool as ut ut.printex(ex, keys=['color', 'orig_color', 'data']) raise
def check_modules_exists(): # Modules in this list don't really need to be inspected # just make sure they are there modname_list = [ 'simplejson', 'flask', 'parse', 'tornado', 'pandas', 'statsmodels', ] failed_list = [] for modname in modname_list: try: globals_ = {} locals_ = {} exec('import ' + modname, globals_, locals_) except ImportError: failed_list.append(modname) except Exception as ex: ut.printex(ex, 'Some othere error happened when importing %r ' % (modname,), iswarning=True) failed_list.append(modname) if len(failed_list) > 0: print('The following modules are not installed') print('\n'.join(failed_list)) return ''
def translated_call(**kwargs): try: resp_tup = translate_ibeis_webcall(func, **kwargs) rawreturn, success, code, message, jQuery_callback = resp_tup except WebException as webex: ut.printex(webex) rawreturn = '' if DEBUG_PYTHON_STACK_TRACE_JSON_RESPONSE: rawreturn = str(traceback.format_exc()) success = False code = webex.code message = webex.message jQuery_callback = None except Exception as ex: ut.printex(ex) rawreturn = '' if DEBUG_PYTHON_STACK_TRACE_JSON_RESPONSE: rawreturn = str(traceback.format_exc()) success = False code = 500 message = 'API error, Python Exception thrown: %r' % (str(ex)) if "'int' object is not iterable" in message: rawreturn = ( 'HINT: the input for this call is most likely ' 'expected to be a list. Try adding a comma at ' 'the end of the input (to cast the conversion ' 'into a list) or encapsualte the input with ' '[].') jQuery_callback = None webreturn = translate_ibeis_webreturn(rawreturn, success, code, message, jQuery_callback) return flask.make_response(webreturn, code)
def cached_wraper(*args, **kwargs): try: if True: print('[utool] computing cached function fname_=%s' % (fname_,)) # Implicitly adds use_cache to kwargs cfgstr = get_cfgstr_from_args(func, args, kwargs, key_argx, key_kwds, kwdefaults, argnames) assert cfgstr is not None, 'cfgstr=%r cannot be None' % (cfgstr,) if kwargs.get('use_cache', use_cache_): # Make cfgstr from specified input data = cacher.tryload(cfgstr) if data is not None: return data # Cached missed compute function data = func(*args, **kwargs) # Cache save cacher.save(data, cfgstr) return data except Exception as ex: import utool _dbgdict2 = dict(key_argx=key_argx, lenargs=len(args), lenkw=len(kwargs),) msg = '\n'.join([ '+--- UTOOL --- ERROR IN CACHED FUNCTION', #'dbgdict = ' + utool.dict_str(_dbgdict), 'dbgdict2 = ' + utool.dict_str(_dbgdict2), ]) utool.printex(ex, msg) raise
def assert_base255(channels): try: tests255 = _test_base255(channels) assert tests255['is_255'], ('channels must be in 0-255') except AssertionError as ex: ut.printex(ex, key_list=['channels', 'tests255']) raise
def assert_cache_hits(ibs, ismiss_list, rowid_list, kwargs_hash, **kwargs): cached_rowid_list = ut.filterfalse_items(rowid_list, ismiss_list) cache_ = ibs.table_cache[tblname][colname][kwargs_hash] # Load cached values for each rowid cache_vals_list = ut.dict_take_list(cache_, cached_rowid_list, None) db_vals_list = getter_func(ibs, cached_rowid_list, **kwargs) # Assert everything is valid msg_fmt = ut.codeblock( """ [assert_cache_hits] tblname = %r [assert_cache_hits] colname = %r [assert_cache_hits] cfgkeys = %r [assert_cache_hits] CACHE INVALID: %r != %r """ ) msg = msg_fmt % (tblname, colname, cfgkeys, cache_vals_list, db_vals_list) try: list1 = cache_vals_list list2 = db_vals_list assert ut.lists_eq(list1, list2), msg # if isinstance(db_vals_list, list): # assert cache_vals_list == db_vals_list, msg # else: # assert np.all(cache_vals_list == db_vals_list), msg except AssertionError as ex: raise ex except Exception as ex2: print(type(cache_vals_list)) print(type(db_vals_list)) ut.printex(ex2) ut.embed() raise
def _get_data(model, qtindex, **kwargs): #row = qtindex.row() col = qtindex.column() row_id = model._get_row_id(qtindex) # row_id w.r.t. to sorting getter = model.col_getter_list[col] # getter for this column # Using this getter may not be thread safe try: # Should this work around decorators? #data = getter((row_id,), **kwargs)[0] data = getter(row_id, **kwargs) except Exception as ex: ut.printex( ex, '[api_item_model] problem getting in column %r' % (col,), keys=['model.name', 'getter', 'row_id', 'col', 'qtindex']) #getting from: %r' % ut.util_str.get_callable_name(getter)) raise # <HACK: MODEL_CACHE> #cachekey = (row_id, col) #try: # if True: # Cache is disabled # raise KeyError('') # #data = model.cache[cachekey] #except KeyError: # data = getter(row_id) # #model.cache[cachekey] = data # </HACK: MODEL_CACHE> return data
def update_query_cfg(query_cfg, **cfgdict): # Each config paramater should be unique # So updating them all should not cause conflicts # FIXME: Should be able to infer all the children that need updates # # apply codename before updating subconfigs query_cfg.apply_codename(cfgdict.get('codename', None)) # update subconfigs query_cfg.nn_cfg.update(**cfgdict) query_cfg.nnweight_cfg.update(**cfgdict) query_cfg.sv_cfg.update(**cfgdict) query_cfg.agg_cfg.update(**cfgdict) query_cfg.flann_cfg.update(**cfgdict) query_cfg.smk_cfg.update(**cfgdict) query_cfg.smk_cfg.vocabassign_cfg.update(**cfgdict) query_cfg.smk_cfg.vocabtrain_cfg.update(**cfgdict) query_cfg.rrvsone_cfg.update(**cfgdict) query_cfg._featweight_cfg.update(**cfgdict) query_cfg._featweight_cfg._feat_cfg.update(**cfgdict) query_cfg._featweight_cfg._feat_cfg._chip_cfg.update(**cfgdict) query_cfg.update(**cfgdict) # Ensure feasibility of the configuration try: query_cfg.make_feasible() except AssertionError as ex: print(ut.dict_str(cfgdict, sorted_=True)) ut.printex(ex) raise
def debug_depcache(ibs): r""" CommandLine: python -m ibeis_flukematch.plugin --exec-debug_depcache python -m ibeis_flukematch.plugin --exec-debug_depcache --show --no-cnn python -m ibeis_flukematch.plugin --exec-debug_depcache --clear-all-depcache --db humbpacks python -m ibeis_flukematch.plugin --exec-debug_depcache --show --no-cnn --db humpbacks python -m ibeis_flukematch.plugin --exec-preproc_notch_tips --db humpbacks --no-cnn --show Example: >>> # SCRIPT >>> from ibeis_flukematch.plugin import * # NOQA >>> ibs = ibeis.opendb(defaultdb='PZ_MTEST') >>> debug_depcache(ibs) >>> ut.show_if_requested() """ print(ibs.depc) nas_notch_deps = ibs.depc.get_dependencies('Has_Notch') print('nas_notch_deps = %r' % (nas_notch_deps,)) te_deps = ibs.depc.get_dependencies('Trailing_Edge') print('te_deps = %r' % (te_deps,)) notch_tip_deps = ibs.depc.get_dependencies('Notch_Tips') print('notch_tip_deps = %r' % (notch_tip_deps,)) ibs.depc.print_schemas() try: ibs.depc.show_graph() except Exception as ex: ut.printex(ex, iswarning=True) all_aids = ibs.get_valid_aids() isvalid = ibs.depc.get('Has_Notch', all_aids, 'flag') aid_list = ut.compress(all_aids, isvalid) aid_list = aid_list[0:10] ibs.depc.print_config_tables()
def package_installer(): """ system dependent post pyinstaller step """ print('[installer] +--- PACKAGE_INSTALLER ---') #build_win32_inno_installer() cwd = get_setup_dpath() # Build the os-appropriate package if sys.platform.startswith('win32'): installer_src = build_win32_inno_installer() installer_fname_fmt = 'ibeis-win32-install-{timestamp}.exe' elif sys.platform.startswith('darwin'): installer_src = build_osx_dmg_installer() installer_fname_fmt = 'ibeis-osx-install-{timestamp}.dmg' elif sys.platform.startswith('linux'): installer_src = build_linux_zip_binaries() installer_fname_fmt = 'ibeis-linux-binary-{timestamp}.zip' #try: # raise NotImplementedError('no linux packager (rpm or deb) supported. try running with --build') #except Exception as ex: # ut.printex(ex) #pass # timestamp the installer name installer_fname = installer_fname_fmt.format(timestamp=ut.get_timestamp()) installer_dst = join(cwd, 'dist', installer_fname) try: ut.move(installer_src, installer_dst) except Exception as ex: ut.printex(ex, 'error moving setups', iswarning=True) print('[installer] L___ FINISH PACKAGE_INSTALLER ___')
def test_ignore_exec_traceback(): r""" CommandLine: python -m utool.util_decor --test-test_ignore_exec_traceback Example: >>> # ENABLE_DOCTEST >>> from utool.util_decor import * # NOQA >>> result = test_ignore_exec_traceback() >>> print(result) """ import utool as ut @ut.indent_func def foobar(): print('foobar') raise Exception('foobar') try: print('printing foobar') foobar() except Exception as ex: #import sys #exc_type, exc_value, exc_traceback = sys.exc_info() #print(exc_traceback) # TODO: ensure decorators are not printed in stack trace ut.printex(ex, tb=True)
def touch(fname, times=None, verbose=True): """ Args: fname (str) times (None): verbose (bool): Example: >>> from utool.util_path import * # NOQA >>> fname = '?' >>> times = None >>> verbose = True >>> result = touch(fname, times, verbose) >>> print(result) References: 'http://stackoverflow.com/questions/1158076/implement-touch-using-python' """ try: if verbose: print('[util_path] touching %r' % fname) with open(fname, 'a'): os.utime(fname, times) except Exception as ex: import utool utool.printex(ex, 'touch %s' % fname) raise
def get_cfgstr_list(cfg, ignore_keys=None, **kwargs): """ default get_cfgstr_list, can be overrided by a config object """ if hasattr(cfg, 'get_param_info_list'): if ignore_keys is not None: itemstr_list = [pi.get_itemstr(cfg) for pi in cfg.get_param_info_list() if pi.varname not in ignore_keys] else: itemstr_list = [pi.get_itemstr(cfg) for pi in cfg.get_param_info_list()] else: try: item_list = parse_config_items(cfg) assert item_list is not None if ignore_keys is None: itemstr_list = [key + '=' + str(val) for key, val in item_list] else: itemstr_list = [key + '=' + str(val) for key, val in item_list if key not in ignore_keys] except Exception as ex: print(ignore_keys is None) print(ignore_keys) ut.printex(ex, keys=['item_list', 'ignore_keys']) raise filtered_itemstr_list = list(filter(len, itemstr_list)) config_name = cfg.get_config_name() body = ','.join(filtered_itemstr_list) cfgstr = ''.join(['_', config_name, '(', body, ')']) return cfgstr
def compute_or_read_annotation_chips(ibs, aid_list, ensure=True): """ Reads chips and tries to compute them if they do not exist """ #print('[preproc_chip] compute_or_read_chips') if ensure: try: utool.assert_all_not_None(aid_list, 'aid_list') except AssertionError as ex: utool.printex(ex, key_list=['aid_list']) raise cfpath_list = get_annot_cfpath_list(ibs, aid_list) try: if ensure: chip_list = [gtool.imread(cfpath) for cfpath in cfpath_list] else: chip_list = [None if cfpath is None else gtool.imread(cfpath) for cfpath in cfpath_list] except IOError as ex: if not utool.QUIET: utool.printex(ex, '[preproc_chip] Handing Exception: ') ibs.add_chips(aid_list) try: chip_list = [gtool.imread(cfpath) for cfpath in cfpath_list] except IOError: print('[preproc_chip] cache must have been deleted from disk') compute_and_write_chips_lazy(ibs, aid_list) # Try just one more time chip_list = [gtool.imread(cfpath) for cfpath in cfpath_list] return chip_list
def show_matches(qres, ibs, aid, qreq_=None, *args, **kwargs): from ibeis.viz import viz_matches try: return viz_matches.show_matches(ibs, qres, aid, *args, qreq_=qreq_, **kwargs) except Exception as ex: ut.printex(ex, 'failed in qres.show_matches', keys=['aid', 'qreq_']) raise
def _new_image_hesaff(img, **kwargs): """ Creates new detector object which reads the image """ hesaff_params = _make_hesaff_cpp_params(kwargs) if __DEBUG__: print('[hes] New Hesaff') print('[hes] hesaff_params=%r' % (hesaff_params,)) hesaff_args = hesaff_params.values() # pass all parameters to HESAFF_CLIB rows, cols = img.shape[0:2] if len(img.shape) == 2: channels = 1 else: channels = img.shape[2] try: hesaff_ptr = HESAFF_CLIB.new_hesaff_image( img, rows, cols, channels, *hesaff_args) except Exception as ex: msg = ('hesaff_ptr = ' 'HESAFF_CLIB.new_hesaff_image(img_realpath, *hesaff_args)') print(msg) print('hesaff_args = ') print(hesaff_args) import utool utool.printex(ex, msg, keys=['hesaff_args']) raise return hesaff_ptr
def new_ibeis_windex(ibs, daid_list): """ IBEIS interface into word_index >>> from ibeis.algo.hots.word_index import * # NOQA >>> windex, qreq_, ibs = test_windex() """ daids_hashid = ibs.get_annot_hashid_visual_uuid(daid_list, 'D') flann_cfgstr = ibs.cfg.query_cfg.flann_cfg.get_cfgstr() feat_cfgstr = ibs.cfg.query_cfg._feat_cfg.get_cfgstr() indexer_cfgstr = daids_hashid + flann_cfgstr + feat_cfgstr try: # Grab the keypoints names and image ids before query time flann_params = ibs.cfg.query_cfg.flann_cfg.get_flann_params() # Get annotation descriptors that will be searched # FIXME; qreq_ vecs_list = ibs.get_annot_vecs(daid_list) flann_cachedir = ibs.get_flann_cachedir() windex = new_word_index( daid_list, vecs_list, flann_params, flann_cachedir, indexer_cfgstr, hash_rowids=False, use_params_hash=False) return windex except Exception as ex: utool.printex(ex, True, msg_='cannot build inverted index', key_list=['ibs.get_infostr()']) raise
def paint(dgt, painter, option, qtindex): """ TODO: prevent recursive paint """ view = dgt.parent() offset = view.verticalOffset() + option.rect.y() # Check if still in viewport if view_would_not_be_visible(view, offset): return None try: thumb_path = dgt.get_thumb_path_if_exists(view, offset, qtindex) if thumb_path is not None: # Check if still in viewport if view_would_not_be_visible(view, offset): return None # Read the precomputed thumbnail qimg = read_thumb_as_qimg(thumb_path) width, height = qimg.width(), qimg.height() # Adjust the cell size to fit the image dgt.adjust_thumb_cell_size(qtindex, width, height) # Check if still in viewport if view_would_not_be_visible(view, offset): return None # Paint image on an item in some view painter.save() painter.setClipRect(option.rect) painter.translate(option.rect.x(), option.rect.y()) painter.drawImage(QtCore.QRectF(0, 0, width, height), qimg) painter.restore() except Exception as ex: # PSA: Always report errors on Exceptions! print('Error in APIThumbDelegate') ut.printex(ex, 'Error in APIThumbDelegate') painter.save() painter.restore()
def get_annot_texts(ibs, aid_list, **kwargs): """ Add each type of text_list to the strings list """ try: ibsfuncs.assert_valid_aids(ibs, aid_list) assert utool.isiterable(aid_list), 'input must be iterable' assert all([isinstance(aid, int) for aid in aid_list]), 'invalid input' except AssertionError as ex: utool.printex(ex, 'invalid input', 'viz', key_list=['aid_list']) raise texts_list = [] # list of lists of texts if kwargs.get('show_aidstr', True): aidstr_list = get_aidstrs(aid_list) texts_list.append(aidstr_list) if kwargs.get('show_gname', False): gname_list = ibs.get_annot_gnames(aid_list) texts_list.append(['gname=%s' % gname for gname in gname_list]) if kwargs.get('show_name', True): name_list = ibs.get_annot_names(aid_list) texts_list.append(['name=%s' % name for name in name_list]) if kwargs.get('show_exemplar', True): flag_list = ibs.get_annot_exemplar_flag(aid_list) texts_list.append(['EX' if flag else '' for flag in flag_list]) # zip them up to get a tuple for each chip and join the fields if len(texts_list) > 0: annotation_text_list = [', '.join(tup) for tup in izip(*texts_list)] else: # no texts were specified return empty string for each input annotation_text_list = [''] * len(aid_list) return annotation_text_list
def _image_view(sel_aids=sel_aids, **_kwargs): try: viz.show_image(ibs, gid, sel_aids=sel_aids, fnum=self.fnum, **_kwargs) df2.set_figtitle('Image View') except TypeError as ex: ut.printex(ex, ut.dict_str(_kwargs)) raise
def setData(model, index, var, role=Qt.EditRole): """ Sets the role data for the item at index to var. var is a QVariant (called data in documentation) """ print("[model] setData: %r" % (str(qtype.qindexinfo(index)))) try: if not index.isValid(): return None flags = model.flags(index) if not (flags & Qt.ItemIsEditable or flags & Qt.ItemIsUserCheckable): return None if role == Qt.CheckStateRole: type_ = "QtCheckState" data = var == Qt.Checked elif role != Qt.EditRole: return False else: # Cast var into datatype type_ = model.get_coltype(index.column()) data = qtype.cast_from_qt(var, type_) # Do actual setting of data print(" * new_data = %s(%r)" % (utool.type_str(type_), data)) model.set_data(index, data) # Emit that data was changed and return succcess model.dataChanged.emit(index, index) return True except Exception as ex: var_ = str(var.toString()) # NOQA utool.printex(ex, "ignoring setData", "[model]", key_list=["var_"]) # raise # print(' * ignoring setData: %r' % locals().get('var', None)) return False
def make_annotation_uuids(image_uuid_list, bbox_list, theta_list, deterministic=True): try: # Check to make sure bbox input is a tuple-list, not a list-list if len(bbox_list) > 0: try: assert isinstance(bbox_list[0], tuple), 'Bounding boxes must be tuples of ints!' assert isinstance(bbox_list[0][0], int), 'Bounding boxes must be tuples of ints!' except AssertionError as ex: ut.printex(ex) print('bbox_list = %r' % (bbox_list,)) raise annotation_uuid_list = [ut.augment_uuid(img_uuid, bbox, theta) for img_uuid, bbox, theta in zip(image_uuid_list, bbox_list, theta_list)] if not deterministic: # Augment determenistic uuid with a random uuid to ensure randomness # (this should be ensured in all hardward situations) annotation_uuid_list = [ut.augment_uuid(ut.random_uuid(), _uuid) for _uuid in annotation_uuid_list] except Exception as ex: ut.printex(ex, 'Error building annotation_uuids', '[add_annot]', key_list=['image_uuid_list']) raise return annotation_uuid_list
def draw_image_overlay(ibs, ax, gid, sel_aids, draw_lbls=True, annote=True): try: raise NotImplementedError('use pt.viz_image2.draw_image_overlay') # draw chips in the image aid_list = ibs.get_image_aids(gid) bbox_list = ibs.get_annot_bboxes(aid_list) theta_list = ibs.get_annot_thetas(aid_list) text_list = vh.get_annot_text(ibs, aid_list, draw_lbls) annotation_centers = vh.get_bbox_centers(bbox_list) sel_list = [aid in sel_aids for aid in aid_list] viz_image2.draw_image_overlay(ax, bbox_list, theta_list, text_list, sel_list, draw_lbls, annote) # Draw all chip indexes in the image if annote: annotation_iter = zip(bbox_list, theta_list, text_list, sel_list) for bbox, theta, lbl, is_sel in annotation_iter: viz_image2.draw_chip_overlay(ax, bbox, theta, lbl, is_sel) # Put annotation centers in the axis ph.set_plotdat(ax, 'annotation_centers', np.array(annotation_centers)) ph.set_plotdat(ax, 'annotation_bbox_list', bbox_list) ph.set_plotdat(ax, 'aid_list', aid_list) except Exception as ex: ut.printex(ex, 'error drawing image overlay', key_list=['ibs', 'ax', 'gid', 'sel_aids']) raise
def _get_row_id(model, qtindex=QtCore.QModelIndex()): """ returns the id (specified by iders i.e. an wbia rowid) from qtindex """ if qtindex is not None and qtindex.isValid(): node = qtindex.internalPointer() if ut.USE_ASSERT: try: assert isinstance( node, _atn.TreeNode), 'type(node)=%r, node=%r' % ( type(node), node, ) except AssertionError as ex: ut.printex(ex, 'error in _get_row_id', keys=['model', 'qtindex', 'node']) raise try: id_ = node.get_id() except AttributeError as ex: ut.printex(ex, key_list=['node', 'model', 'qtindex']) raise return id_
def _set_data(model, qtindex, value): """ The setter function should be of the following format def setter(column_name, row_id, value) column_name is the key or SQL-like name for the column row_id is the corresponding row key or SQL-like id that the row call back returned value is the value that needs to be stored The setter function should return a boolean, if setting the value was successfull or not """ col = qtindex.column() row_id = model._get_row_id(qtindex) # <HACK: MODEL_CACHE> #cachekey = (row_id, col) #try: # del model.cache[cachekey] #except KeyError: # pass # </HACK: MODEL_CACHE> setter = model.col_setter_list[col] if VERBOSE: print('[model] Setting data: row_id=%r, setter=%r' % (row_id, setter)) try: return setter(row_id, value) except Exception as ex: ut.printex(ex, 'ERROR: setting data: row_id=%r, setter=%r' % (row_id, setter)) raise
def precompute_akmeans(data, num_clusters, max_iters=5, flann_params={}, cache_dir=None, force_recomp=False, use_data_hash=True, cfgstr='', refine=False, akmeans_cfgstr=None): """ precompute aproximate kmeans with builtin caching """ print('[akmeans] pre_akmeans()') # filename prefix constants assert cache_dir is not None, 'choose a cache directory' # Build a cfgstr if the full one is not specified if akmeans_cfgstr is None: # compute a hashstr based on the data akmeans_cfgstr = nn.get_flann_cfgstr(data, flann_params, cfgstr, use_data_hash) try: # Try and load a previous clustering if force_recomp: raise UserWarning('forceing recommpute') centroids = ut.load_cache(cache_dir, CLUSTERS_FNAME, akmeans_cfgstr) datax2_clusterx = ut.load_cache(cache_dir, DATAX2CL_FNAME, akmeans_cfgstr) print('[akmeans.precompute] load successful') if refine: # Refines the cluster centers if specified (datax2_clusterx, centroids) =\ refine_akmeans(data, datax2_clusterx, centroids, max_iters=max_iters, flann_params=flann_params, cache_dir=cache_dir, akmeans_cfgstr=akmeans_cfgstr) return (datax2_clusterx, centroids) except IOError as ex: ut.printex(ex, 'cache miss', iswarning=True) except UserWarning: pass # First time computation print('[akmeans.precompute] pre_akmeans(): calling akmeans') (datax2_clusterx, centroids) = akmeans(data, num_clusters, max_iters, flann_params) print('[akmeans.precompute] save and return') ut.save_cache(cache_dir, CLUSTERS_FNAME, akmeans_cfgstr, centroids) ut.save_cache(cache_dir, DATAX2CL_FNAME, akmeans_cfgstr, datax2_clusterx) return (datax2_clusterx, centroids)
def remove_prefix_hack(cfg, cfgtype, cfg_options, alias_keys): if cfgtype is not None and cfgtype in ['qcfg', 'dcfg']: for key in list(cfg_options.keys()): # check if key is nonstandard if not (key in cfg or key in alias_keys): # does removing prefix make it stanard? prefix = cfgtype[0] if key.startswith(prefix): key_ = key[len(prefix) :] if key_ in cfg or key_ in alias_keys: # remove prefix cfg_options[key_] = cfg_options[key] try: assert ( key[1:] in cfg or key[1:] in alias_keys ), 'key=%r, key[1:] =%r' % (key, key[1:]) except AssertionError as ex: ut.printex( ex, 'Parse Error Customize Cfg Base ', keys=['key', 'cfg', 'alias_keys', 'cfgstr_options', 'cfgtype'], ) raise del cfg_options[key]
def get_cfgstr_list(cfg, ignore_keys=None, **kwargs): """ default get_cfgstr_list, can be overrided by a config object """ if hasattr(cfg, 'get_param_info_list'): if ignore_keys is not None: itemstr_list = [ pi.get_itemstr(cfg) for pi in cfg.get_param_info_list() if pi.varname not in ignore_keys ] else: itemstr_list = [ pi.get_itemstr(cfg) for pi in cfg.get_param_info_list() ] else: try: item_list = parse_config_items(cfg) assert item_list is not None if ignore_keys is None: itemstr_list = [ key + '=' + six.text_type(val) for key, val in item_list ] else: itemstr_list = [ key + '=' + six.text_type(val) for key, val in item_list if key not in ignore_keys ] except Exception as ex: logger.info(ignore_keys is None) logger.info(ignore_keys) ut.printex(ex, keys=['item_list', 'ignore_keys']) raise filtered_itemstr_list = list(filter(len, itemstr_list)) config_name = cfg.get_config_name() body = ','.join(filtered_itemstr_list) cfgstr = ''.join(['_', config_name, '(', body, ')']) return cfgstr
def read_thumb_size(thumb_path): import vtool as vt if VERBOSE_THUMB: print('[ThumbDelegate] Reading thumb size') # npimg = vt.imread(thumb_path, delete_if_corrupted=True) # (height, width) = npimg.shape[0:2] # del npimg try: width, height = vt.open_image_size(thumb_path) except IOError as ex: if ut.checkpath(thumb_path, verbose=True): ut.printex( ex, 'image=%r seems corrupted. Needs deletion' % (thumb_path, ), iswarning=True, ) ut.delete(thumb_path) else: ut.printex(ex, 'image=%r does not exist', (thumb_path, ), iswarning=True) raise return width, height
def setData(model, index, var, role=Qt.EditRole): """ Sets the role data for the item at index to var. var is a QVariant (called data in documentation) """ print('[model] setData: %r' % (str(qtype.qindexinfo(index)))) try: if not index.isValid(): return None flags = model.flags(index) if not (flags & Qt.ItemIsEditable or flags & Qt.ItemIsUserCheckable): return None if role == Qt.CheckStateRole: type_ = 'QtCheckState' data = var == Qt.Checked elif role != Qt.EditRole: return False else: # Cast var into datatype type_ = model.get_coltype(index.column()) data = qtype.cast_from_qt(var, type_) # Do actual setting of data print(' * new_data = %s(%r)' % ( utool.type_str(type_), data, )) model.set_data(index, data) # Emit that data was changed and return succcess model.dataChanged.emit(index, index) return True except Exception as ex: var_ = str(var.toString()) # NOQA utool.printex(ex, 'ignoring setData', '[model]', key_list=['var_']) # raise # print(' * ignoring setData: %r' % locals().get('var', None)) return False
def _load_singles(qreq_): # Find existing cached chip matches # Try loading as many as possible fpath_list = qreq_.get_chipmatch_fpaths(qreq_.qaids) exists_flags = [exists(fpath) for fpath in fpath_list] qaids_hit = ut.compress(qreq_.qaids, exists_flags) fpaths_hit = ut.compress(fpath_list, exists_flags) # First, try a fast reload assuming no errors fpath_iter = ut.ProgIter(fpaths_hit, length=len(fpaths_hit), enabled=len(fpaths_hit) > 1, label='loading cache hits', adjust=True, freq=1) try: qaid_to_hit = { qaid: chip_match.ChipMatch.load_from_fpath(fpath, verbose=False) for qaid, fpath in zip(qaids_hit, fpath_iter) } except chip_match.NeedRecomputeError as ex: # Fallback to a slow reload ut.printex(ex, 'Some cached results need to recompute', iswarning=True) qaid_to_hit = _load_singles_fallback(fpaths_hit) return qaid_to_hit
def event(self, event): # print(event) # print(event.type()) # print(ut.invert_dict(dict(QtCore.QEvent.__dict__))[event.type()]) # print(event.spontaneous()) # print(event.isAccepted()) result = QtWidgets.QMessageBox.event(self, event) # print(event.isAccepted()) # print('----') # if event != QtCore.QEvent.DeferredDelete: try: self.setMinimumHeight(0) self.setMaximumHeight(16777215) self.setMinimumWidth(0) self.setMaximumWidth(16777215) self.setSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding) textEdit = self.findChild(QtWidgets.QTextEdit) if textEdit is not None: textEdit.setMinimumHeight(0) textEdit.setMaximumHeight(16777215) textEdit.setMinimumWidth(0) textEdit.setMaximumWidth(16777215) textEdit.setSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding) except RuntimeError as ex: if ut.VERBOSE: msg = 'Closing seems to cause C++ errors. Unsure how to fix properly.' ut.printex(ex, msg, iswarning=True, keys=['event', 'event.type()']) return result
def update_query_cfg(query_cfg, **cfgdict): # Each config paramater should be unique # So updating them all should not cause conflicts # FIXME: Should be able to infer all the children that need updates # # apply codename before updating subconfigs query_cfg.apply_codename(cfgdict.get('codename', None)) # update subconfigs query_cfg.nn_cfg.update(**cfgdict) query_cfg.nnweight_cfg.update(**cfgdict) query_cfg.sv_cfg.update(**cfgdict) query_cfg.agg_cfg.update(**cfgdict) query_cfg.flann_cfg.update(**cfgdict) query_cfg._featweight_cfg.update(**cfgdict) query_cfg._featweight_cfg._feat_cfg.update(**cfgdict) query_cfg._featweight_cfg._feat_cfg._chip_cfg.update(**cfgdict) query_cfg.update(**cfgdict) # Ensure feasibility of the configuration try: query_cfg.make_feasible() except AssertionError as ex: logger.info(ut.repr2(cfgdict, sorted_=True)) ut.printex(ex) raise
def build_win32_inno_installer(): """ win32 self-executable package """ print('[installer] +--- BUILD_WIN32_INNO_INSTALLER ---') assert ut.WIN32, 'Can only build INNO on windows' # Get inno executable inno_fpath = ensure_inno_isinstalled() # Get IBEIS inno script iss_script_fpath = ensure_inno_script() print('Trying to run ' + ' '.join(['"' + inno_fpath + '"', '"' + iss_script_fpath + '"'])) try: command_args = ' '.join((inno_fpath, iss_script_fpath)) ut.cmd(command_args) except Exception as ex: ut.printex(ex, 'error running script') raise # Move the installer into dist and make a timestamped version # Uninstall exe in case we need to cleanup #uninstall_ibeis_exe = 'unins000.exe' cwd = get_setup_dpath() installer_fpath = join(cwd, '_installers', 'Output', 'ibeis-win32-setup.exe') print('[installer] L___ BUILD_WIN32_INNO_INSTALLER ___') return installer_fpath
def wx_len_stats(wx2_xxx): """ Example: >>> from ibeis.algo.hots.smk.smk_debug import * # NOQA >>> from ibeis.algo.hots.smk import smk_debug >>> from ibeis.algo.hots.smk import smk_repr >>> ibs, annots_df, taids, daids, qaids, qreq_, nWords = smk_debug.testdata_dataframe() >>> qreq_ = query_request.new_ibeis_query_request(ibs, qaids, daids) >>> qparams = qreq_.qparams >>> invindex = smk_repr.index_data_annots(annots_df, daids, words) >>> qaid = qaids[0] >>> wx2_qrvecs, wx2_qaids, wx2_qfxs, query_sccw = smk_repr.new_qindex(annots_df, qaid, invindex, qparams) >>> print(ut.dict_str(wx2_rvecs_stats(wx2_qrvecs))) """ import utool as ut if wx2_xxx is None: return 'None' if isinstance(wx2_xxx, dict): #len_list = [len(xxx) for xxx in ] val_list = wx2_xxx.values() else: val_list = wx2_xxx try: len_list = [len(xxx) for xxx in val_list] statdict = ut.get_stats(len_list) return ut.dict_str(statdict, strvals=True, newlines=False) except Exception as ex: ut.printex(ex) for count, xxx in wx2_xxx: try: len(xxx) except Exception: print('failed on count=%r' % (count, )) print('failed on xxx=%r' % (xxx, )) pass raise
def try_svd(M): """ CommandLine: python -m vtool.spatial_verification try_svd Example: >>> # SLOW_DOCTEST >>> from vtool.spatial_verification import * # NOQA >>> import vtool.tests.dummy as dummy >>> rng = np.random.RandomState(42) >>> num = 1000 >>> xy1_mn = rng.randn(2, num) >>> xy2_mn = rng.randn(2, num) >>> M = build_lstsqrs_Mx9(xy1_mn, xy2_mn) >>> print('M.shape = %r' % (M.shape,)) >>> USV = npl.svd(M, full_matrices=True, compute_uv=True) >>> USV = try_svd(M) Example: >>> # SLOW_DOCTEST >>> from vtool.spatial_verification import * # NOQA >>> import vtool.tests.dummy as dummy >>> num = np.ceil(np.sqrt(2000)) >>> kpts1, kpts2 = dummy.get_dummy_kpts_pair(wh_num=(num, num)) >>> xy1_mn = ktool.get_xys(kpts1).astype(np.float64) >>> xy2_mn = ktool.get_xys(kpts2).astype(np.float64) >>> M = build_lstsqrs_Mx9(xy1_mn, xy2_mn) >>> print('M.shape = %r' % (M.shape,)) >>> USV = npl.svd(M, full_matrices=True, compute_uv=True) >>> USV = try_svd(M) """ #if M.shape[0] > 2500: # # hack to prevent bug in lapack # M = M[:2500] try: USV = npl.svd(M, full_matrices=True, compute_uv=True) except MemoryError as ex: ut.printex(ex, '[sver] Caught MemErr during full SVD. Trying sparse SVD.') M_sparse = sps.lil_matrix(M) USV = spsl.svds(M_sparse) except npl.LinAlgError as ex: ut.printex(ex, '[sver] svd did not converge') raise except Exception as ex: ut.printex(ex, '[sver] svd error') raise return USV
def _init_sqldbstaging(ibs, request_stagingversion=None): """ Example: >>> # DISABLE_DOCTEST >>> from wbia.control.IBEISControl import * # NOQA >>> import wbia # NOQA >>> #ibs = wbia.opendb('PZ_MTEST') >>> #ibs = wbia.opendb('PZ_Master0') >>> ibs = wbia.opendb('testdb1') >>> #ibs = wbia.opendb('PZ_Master0') Ignore: aid_list = ibs.get_valid_aids() #ibs.update_annot_visual_uuids(aid_list) vuuid_list = ibs.get_annot_visual_uuids(aid_list) aid_list2 = ibs.get_annot_aids_from_visual_uuid(vuuid_list) assert aid_list2 == aid_list # v1.3.0 testdb1:264us, PZ_MTEST:3.93ms, PZ_Master0:11.6s %timeit ibs.get_annot_aids_from_visual_uuid(vuuid_list) # v1.3.1 testdb1:236us, PZ_MTEST:1.83ms, PZ_Master0:140ms ibs.print_imageset_table(exclude_columns=['imageset_uuid']) """ from wbia.control import _sql_helpers from wbia.control import STAGING_SCHEMA # Before load, ensure database has been backed up for the day backup_idx = ut.get_argval('--loadbackup-staging', type_=int, default=None) sqlstaging_fpath = None if backup_idx is not None: backups = _sql_helpers.get_backup_fpaths(ibs) logger.info('backups = %r' % (backups, )) sqlstaging_fpath = backups[backup_idx] logger.info('CHOSE BACKUP sqlstaging_fpath = %r' % (sqlstaging_fpath, )) # HACK if backup_idx is None and ibs._needs_backup(): try: _sql_helpers.ensure_daily_database_backup( ibs.get_ibsdir(), ibs.sqlstaging_fname, ibs.backupdir) except IOError as ex: ut.printex( ex, ('Failed making daily backup. ' 'Run with --nobackup to disable'), ) raise # IBEIS SQL State Database if request_stagingversion is None: ibs.staging_version_expected = '1.2.0' else: ibs.staging_version_expected = request_stagingversion # TODO: add this functionality to SQLController if backup_idx is None: new_version, new_fname = dtool.sql_control.dev_test_new_schema_version( ibs.get_dbname(), ibs.get_ibsdir(), ibs.sqlstaging_fname, ibs.staging_version_expected, version_next='1.2.0', ) ibs.staging_version_expected = new_version ibs.sqlstaging_fname = new_fname if sqlstaging_fpath is None: assert backup_idx is None sqlstaging_fpath = join(ibs.get_ibsdir(), ibs.sqlstaging_fname) readonly = None else: readonly = True ibs.staging = dtool.SQLDatabaseController( fpath=sqlstaging_fpath, inmemory=False, readonly=readonly, always_check_metadata=False, ) ibs.readonly = ibs.staging.readonly if backup_idx is None: # Ensure correct schema versions _sql_helpers.ensure_correct_version( ibs, ibs.staging, ibs.staging_version_expected, STAGING_SCHEMA, verbose=ut.VERBOSE, )
def show_matches2(ibs, aid1, aid2, fm=None, fs=None, fm_norm=None, sel_fm=[], H1=None, H2=None, qreq_=None, **kwargs): """ TODO: DEPRICATE and use special case of show_name_matches Integrate ChipMatch Used in: Found 1 line(s) in '/home/joncrall/code/ibeis_cnn/ibeis_cnn/ingest_ibeis.py': ingest_ibeis.py : 827 | >>> ibeis.viz.viz_matches.show_matches2(ibs, aid1, aid2, fm=None, kpts1=kpts1, kpts2=kpts2) ---------------------- Found 4 line(s) in '/home/joncrall/code/ibeis/ibeis/viz/viz_matches.py': viz_matches.py : 423 |def show_matches2(ibs, aid1, aid2, fm=None, fs=None, fm_norm=None, sel_fm=[], viz_matches.py : 430 | python -m ibeis.viz.viz_matches --exec-show_matches2 --show viz_matches.py : 431 | python -m ibeis --tf ChipMatch.ishow_single_annotmatch show_matches2 --show viz_matches.py : 515 | return show_matches2(ibs, aid1, aid2, fm, fs, qreq_=qreq_, **kwargs) ---------------------- Found 1 line(s) in '/home/joncrall/code/ibeis/ibeis/viz/interact/interact_matches.py': interact_matches.py : 372 | tup = viz.viz_matches.show_matches2(ibs, self.qaid, self.daid, ---------------------- Found 1 line(s) in '/home/joncrall/code/ibeis/ibeis/algo/hots/vsone_pipeline.py': vsone_pipeline.py : 1339 | viz_matches.show_matches2(ibs, qaid, daid, fm=fm, fs=fs, fm_norm=fm_norm, ori=True, ---------------------- Found 2 line(s) in '/home/joncrall/code/ibeis/ibeis/algo/hots/chip_match.py': chip_match.py : 204 | viz_matches.show_matches2(qreq_.ibs, cm.qaid, daid, qreq_=qreq_, chip_match.py : 219 | ibeis.viz.viz_matches.show_matches2 ---------------------- Found 1 line(s) in '/home/joncrall/code/ibeis/ibeis/algo/hots/scoring.py': scoring.py : 562 | viz.viz_matches.show_matches2(qreq_.ibs, qaid, daid, fm, fs, CommandLine: python -m ibeis.viz.viz_matches --exec-show_matches2 --show python -m ibeis --tf ChipMatch.ishow_single_annotmatch show_matches2 --show Example: >>> # DISABLE_DOCTEST >>> from ibeis.algo.hots.chip_match import * # NOQA >>> import ibeis >>> cm, qreq_ = ibeis.testdata_cm(defaultdb='PZ_MTEST', default_qaids=[18]) >>> cm.score_nsum(qreq_) >>> daid = cm.get_top_aids()[0] >>> cm.show_single_annotmatch(qreq_, daid) >>> ut.show_if_requested() """ if qreq_ is None: print('[viz_matches] WARNING: qreq_ is None') kwargs = kwargs.copy() in_image = kwargs.get('in_image', False) draw_fmatches = kwargs.pop('draw_fmatches', True) # Read query and result info (chips, names, ...) rchip1, rchip2, kpts1, kpts2 = _get_annot_pair_info( ibs, aid1, aid2, qreq_, draw_fmatches, **kwargs) ut.delete_keys(kwargs, ['kpts1', 'kpts2']) if fm is None: assert len(kpts1) == len( kpts2), 'keypoints should be in correspondence' import numpy as np fm = np.vstack((np.arange(len(kpts1)), np.arange(len(kpts1)))).T # Build annotation strings / colors lbl1 = 'q' + vh.get_aidstrs(aid1) lbl2 = vh.get_aidstrs(aid2) if in_image: # HACK! lbl1 = None lbl2 = None # Draws the chips and keypoint matches try: ax, xywh1, xywh2 = pt.show_chipmatch2(rchip1, rchip2, kpts1, kpts2, fm, fs=fs, fm_norm=fm_norm, H1=H1, H2=H2, lbl1=lbl1, lbl2=lbl2, sel_fm=sel_fm, **kwargs) except Exception as ex: ut.printex(ex, 'consider qr.remove_corrupted_queries', '[viz_matches]') print('') raise # Moved the code into show_chipmatch #if len(sel_fm) > 0: # # Draw any selected matches # sm_kw = dict(rect=True, colors=pt.BLUE) # pt.plot_fmatch(xywh1, xywh2, kpts1, kpts2, sel_fm, **sm_kw) (x1, y1, w1, h1) = xywh1 (x2, y2, w2, h2) = xywh2 offset1 = (x1, y1) offset2 = (x2, y2) annotate_matches2(ibs, aid1, aid2, fm, fs, xywh2=xywh2, xywh1=xywh1, offset1=offset1, offset2=offset2, **kwargs) return ax, xywh1, xywh2
def new_cpd(self, parents=None, pmf_func=None): """ Makes a new random variable that is an instance of this tempalte parents : only used to define the name of this node. """ if pmf_func is None: pmf_func = self.pmf_func # --- MAKE VARIABLE ID def _getid(obj): if isinstance(obj, int): return str(obj) elif isinstance(obj, six.string_types): return obj else: return obj._template_id if not ut.isiterable(parents): parents = [parents] template_ids = [_getid(cpd) for cpd in parents] HACK_SAME_IDS = True # TODO: keep track of parent index inheritence # then rectify uniqueness based on that if HACK_SAME_IDS and ut.allsame(template_ids): _id = template_ids[0] else: _id = ''.join(template_ids) variable = ''.join([self.varpref, _id]) # variable = '_'.join([self.varpref, '{' + _id + '}']) # variable = '$%s$' % (variable,) evidence_cpds = [cpd for cpd in parents if hasattr(cpd, 'ttype')] if len(evidence_cpds) == 0: evidence_cpds = None variable_card = len(self.basis) statename_dict = { variable: self.basis, } if self.evidence_ttypes is not None: if any(cpd.ttype != tcpd.ttype for cpd, tcpd in zip(evidence_cpds, evidence_cpds)): raise ValueError('Evidence is not of appropriate type') evidence_bases = [cpd.variable_statenames for cpd in evidence_cpds] evidence_card = list(map(len, evidence_bases)) evidence_states = list(ut.iprod(*evidence_bases)) for cpd in evidence_cpds: _dict = ut.dict_subset(cpd.statename_dict, [cpd.variable]) statename_dict.update(_dict) evidence = [cpd.variable for cpd in evidence_cpds] else: if evidence_cpds is not None: raise ValueError('Gave evidence for evidence-less template') evidence = None evidence_card = None # --- MAKE TABLE VALUES if pmf_func is not None: if isinstance(pmf_func, list): values = np.array(pmf_func) else: values = np.array([[ pmf_func(vstate, *estates) for estates in evidence_states ] for vstate in self.basis]) ensure_normalized = True if ensure_normalized: values = values / values.sum(axis=0) else: # assume uniform fill_value = 1.0 / variable_card if evidence_card is None: values = np.full((1, variable_card), fill_value) else: values = np.full([variable_card] + list(evidence_card), fill_value) try: cpd = pgmpy.factors.TabularCPD( variable=variable, variable_card=variable_card, values=values, evidence=evidence, evidence_card=evidence_card, # statename_dict=statename_dict, state_names=statename_dict, ) except Exception as ex: ut.printex( ex, 'Failed to create TabularCPD', keys=[ 'variable', 'variable_card', 'statename_dict', 'evidence_card', 'evidence', 'values.shape', ], ) ut.embed() raise cpd.ttype = self.ttype cpd._template_ = self cpd._template_id = _id return cpd
def run_tests(): """ >>> from ibeis.tests.run_tests import * # NOQA """ # DONT USE THESE FLAGS #print('--testall and --testslow give you more tests') # starts logging for tests import ibeis ibeis._preload() # Build module list and run tests import sys ensure_testing_data() if False: ut.change_term_title('RUN IBEIS TESTS') exclude_doctests_fnames = set([ '_autogen_explicit_controller', 'template_definitions.py', 'autogen_test_script.py', ]) exclude_dirs = [ '_broken', 'old', 'tests', 'timeits', '_scripts', '_timeits', '_doc', 'notebook', ] if ut.in_pyinstaller_package(): from os.path import dirname dpath_list = [dirname(ibeis.__file__)] # Run tests for installer doctest_modname_list_ = [ 'ibeis.ibsfuncs', 'ibeis.viz.interact.interact_qres2', 'ibeis.viz.interact.interact_matches', 'ibeis.viz.interact.interact_annotations2', 'ibeis.viz.interact.interact_name', 'ibeis.viz.interact.interact_query_decision', 'ibeis.viz.interact.interact_chip', 'ibeis.viz.interact.interact_qres', 'ibeis.algo.Config', 'ibeis.algo.hots._pipeline_helpers', 'ibeis.algo.hots.name_scoring', 'ibeis.algo.hots.devcases', 'ibeis.algo.hots.neighbor_index', 'ibeis.algo.hots.automated_helpers', 'ibeis.algo.hots.hots_query_result', 'ibeis.algo.hots.automated_oracle', 'ibeis.algo.hots.nn_weights', 'ibeis.algo.hots.pipeline', 'ibeis.algo.hots.automated_params', 'ibeis.algo.hots.vsone_pipeline', 'ibeis.algo.hots.automatch_suggestor', 'ibeis.algo.hots.score_normalization', 'ibeis.algo.hots.query_request', 'ibeis.algo.hots.chip_match', 'ibeis.algo.hots.multi_index', 'ibeis.algo.hots.qt_inc_automatch', 'ibeis.algo.hots.query_params', 'ibeis.algo.hots.precision_recall', 'ibeis.algo.hots.hstypes', 'ibeis.algo.hots.match_chips4', 'ibeis.algo.hots.distinctiveness_normalizer', 'ibeis.algo.hots.automated_matcher', 'ibeis.algo.hots.special_query', 'ibeis.algo.hots.scoring', 'ibeis.algo.preproc.preproc_annot', 'ibeis.algo.preproc.preproc_imageset', 'ibeis.algo.preproc.preproc_image', 'ibeis.algo.preproc.preproc_residual', 'ibeis.algo.detect.grabmodels', 'ibeis.control.manual_annot_funcs', 'ibeis.control.manual_chip_funcs', 'ibeis.control.manual_species_funcs', 'ibeis.control.manual_ibeiscontrol_funcs', 'ibeis.control._autogen_party_funcs', 'ibeis.control.manual_garelate_funcs', 'ibeis.control.manual_name_funcs', 'ibeis.control._sql_helpers', 'ibeis.control.manual_wildbook_funcs', 'ibeis.control.controller_inject', 'ibeis.control.manual_lblimage_funcs', 'ibeis.control.IBEISControl', 'ibeis.control._autogen_featweight_funcs', 'ibeis.control.manual_imageset_funcs', 'ibeis.control.manual_feat_funcs', 'ibeis.control.manual_gsgrelate_funcs', 'ibeis.control._autogen_annotmatch_funcs', 'ibeis.control.manual_meta_funcs', 'ibeis.control.manual_lblannot_funcs', 'ibeis.control.DB_SCHEMA', 'ibeis.control.manual_lbltype_funcs', 'ibeis.control.SQLDatabaseControl', 'ibeis.control.manual_image_funcs', 'ibeis.control.manual_annotgroup_funcs', 'ibeis.control.DBCACHE_SCHEMA', 'ibeis.init.main_helpers', 'ibeis.init.sysres', 'ibeis.gui.clock_offset_gui', 'ibeis.dbio.export_subset', 'ibeis.dbio.export_hsdb', 'ibeis.dbio.ingest_database', ] else: dpath_list = ['ibeis'] doctest_modname_list_ = ut.find_doctestable_modnames( dpath_list, exclude_doctests_fnames, exclude_dirs) exclude_doctest_pattern = ut.get_argval( ('--exclude-doctest-patterns', '--x'), type_=list, default=[]) if exclude_doctest_pattern is not None: import re is_ok = [ all([ re.search(pat, name) is None for pat in exclude_doctest_pattern ]) for name in doctest_modname_list_ ] doctest_modname_list = ut.compress(doctest_modname_list_, is_ok) else: doctest_modname_list = doctest_modname_list_ doctest_modname_list2 = [] for modname in doctest_modname_list: try: exec('import ' + modname, globals(), locals()) except ImportError as ex: ut.printex(ex, iswarning=True) if not ut.in_pyinstaller_package(): raise else: doctest_modname_list2.append(modname) module_list = [sys.modules[name] for name in doctest_modname_list2] nPass, nTotal, failed_cmd_list = ut.doctest_module_list(module_list) if nPass != nTotal: return 1 else: return 0
def define_flann_bindings(binding_name): """ Define the binding names for flann """ # default c source c_source = None optional_args = None c_source_part = None py_source = None py_alias = None py_args = None pydoc = None cpp_param_doc = { 'cols': 'number of columns in the dataset (feature dimensionality)', 'dataset': 'pointer to a data set stored in row major order', 'dists': ut.packtext( '''pointer to matrix for the distances of the nearest neighbors of the testset features in the dataset'''), 'flann_params': 'generic flann parameters', 'index_ptr': 'the index (constructed previously using flann_build_index)', 'nn': 'how many nearest neighbors to return', 'rebuild_threshold': ut.packtext( '''reallocs index when it grows by factor of `rebuild_threshold`. A smaller value results is more space efficient but less computationally efficient. Must be greater than 1.'''), 'result_ids': ut.packtext( '''pointer to matrix for the indices of the nearest neighbors of the testset features in the dataset (must have tcount number of rows and nn number of columns)'''), 'rows': 'number of rows (features) in the dataset', 'tcount': ut.packtext('''number of rows (features) in the query dataset (same dimensionality as features in the dataset)'''), 'testset': 'pointer to a query set stored in row major order', 'level': 'verbosity level' } standard_csource = ut.codeblock(r''' try {{ if (index_ptr==NULL) {{ throw FLANNException("Invalid index"); }} Index<Distance>* index = (Index<Distance>*)index_ptr; return index->{cpp_binding_name}(); }} catch (std::runtime_error& e) {{ Logger::error("Caught exception: %s\n",e.what()); throw; }} ''') return_doc = None cpp_binding_name = binding_name zero_success = 'zero or a number <0 for error' if binding_name == 'clean_removed_points': cpp_binding_name = ut.to_camel_case(binding_name) return_type = 'void' docstr = 'Deletes removed points in index?' binding_argnames = ['index_ptr'] c_source = standard_csource elif binding_name == 'veclen': return_type = 'int' docstr = 'Returns number of features in this index' binding_argnames = ['index_ptr'] c_source = standard_csource elif binding_name == 'size': return_type = 'int' docstr = 'returns The dimensionality of the features in this index.' binding_argnames = ['index_ptr'] c_source = standard_csource elif binding_name == 'getType': return_type = 'flann_algorithm_t' docstr = 'returns The index type (kdtree, kmeans,...)' binding_argnames = ['index_ptr'] c_source = standard_csource elif binding_name == 'used_memory': docstr = ut.codeblock(''' Returns the amount of memory (in bytes) used by the index index_ptr = pointer to pre-built index. Returns: int ''') c_source = ut.codeblock(r''' try {{ if (index_ptr==NULL) {{ throw FLANNException("Invalid index"); }} Index<Distance>* index = (Index<Distance>*)index_ptr; return index->usedMemory(); }} catch (std::runtime_error& e) {{ Logger::error("Caught exception: %s\n",e.what()); return -1; }} ''') py_source = ut.codeblock(''' if self.__curindex is None: return 0 return flann.used_memory[self.__curindex_type](self.__curindex) ''') binding_argnames = ['index_ptr'] return_type = 'int' elif binding_name == 'add_points': c_source = ut.codeblock(r''' typedef typename Distance::ElementType ElementType; try {{ if (index_ptr==NULL) {{ throw FLANNException("Invalid index"); }} Index<Distance>* index = (Index<Distance>*)index_ptr; Matrix<ElementType> points = Matrix<ElementType>(points, rows, index->veclen()); index->addPoints(points, rebuild_threshold); return 0; }} catch (std::runtime_error& e) {{ Logger::error("Caught exception: %s\n",e.what()); return -1; }} ''') py_source = ut.codeblock(''' if new_pts.dtype.type not in allowed_types: raise FLANNException('Cannot handle type: %s' % new_pts.dtype) if new_pts.dtype != self.__curindex_type: raise FLANNException('New points must have the same type') new_pts = ensure_2d_array(new_pts, default_flags) rows = new_pts.shape[0] flann.add_points[self.__curindex_type](self.__curindex, new_pts, rows, rebuild_threshold) return self.__added_data.append(new_pts) ''') #return_type = 'void' return_type = 'int' docstr = 'Adds points to pre-built index.' if False: binding_argnames = [ 'index_ptr', 'points', 'rows', 'cols', # TODO: can remove 'rebuild_threshold', ] else: binding_argnames = [ 'index_ptr', 'points', 'rows', 'rebuild_threshold', ] return_doc = '0 if success otherwise -1' py_args = ['new_pts', 'rebuild_threshold=2.'] cpp_param_doc['points'] = 'pointer to array of points' elif binding_name == 'remove_point': c_source = ut.codeblock(r''' size_t point_id(point_id_uint); try {{ if (index_ptr==NULL) {{ throw FLANNException("Invalid index"); }} Index<Distance>* index = (Index<Distance>*)index_ptr; index->removePoint(point_id); return 0; }} catch (std::runtime_error& e) {{ Logger::error("Caught exception: %s\n",e.what()); return -1; }} ''') py_source = ut.codeblock(''' flann.remove_point[self.__curindex_type](self.__curindex, point_id) self.__removed_ids.append(point_id) ''') #return_type = 'void' return_type = 'int' docstr = 'Removes a point from the index' return_doc = zero_success cpp_param_doc['point_id'] = 'point id to be removed' cpp_param_doc['index_ptr'] = 'The index that should be modified' binding_argnames = ['index_ptr', 'point_id'] elif binding_name == 'remove_points': c_source = ut.codeblock(r''' typedef typename Distance::ElementType ElementType; try {{ if (index_ptr==NULL) {{ thow FLANNException("Invalid index"); }} Index<Distance>* index = (Index<Distance>*)index_ptr; index->removePoints(id_list, num); return; }} catch (std::runtime_error& e) {{ Logger::error("Caught exception: %s\n",e.what()); return; }} ''') py_source = ut.codeblock(''' id_list = np.array(id_list, dtype=np.int32) num = len(id_list) flann.remove_points[self.__curindex_type](self.__curindex, id_list, num) self.__removed_ids.extend(id_list) ''') cpp_param_doc['index_ptr'] = 'The index that should be modified' cpp_param_doc['id_list'] = 'list of point ids to be removed' cpp_param_doc['num'] = 'number of points in id_list' docstr = 'Removes multiple points from the index' return_doc = 'void' py_args = ['id_list'] return_type = 'void' binding_argnames = ['index_ptr', 'id_list', 'num'] elif binding_name == 'compute_cluster_centers': docstr = ut.textblock(r''' Clusters the features in the dataset using a hierarchical kmeans clustering approach. This is significantly faster than using a flat kmeans clustering for a large number of clusters. ''') c_source = ut.codeblock(r''' typedef typename Distance::ElementType ElementType; typedef typename Distance::ResultType DistanceType; try { init_flann_parameters(flann_params); Matrix<ElementType> inputData(dataset,rows,cols); KMeansIndexParams params(flann_params->branching, flann_params->iterations, flann_params->centers_init, flann_params->cb_index); Matrix<DistanceType> centers(result_centers, clusters,cols); int clusterNum = hierarchicalClustering<Distance>(inputData, centers, params, d); return clusterNum; } catch (std::runtime_error& e) { Logger::error("Caught exception: %s\n",e.what()); return -1; } '''.replace('{', '{{').replace('}', '}}')) py_source = ut.codeblock(''' # First verify the paremeters are sensible. if pts.dtype.type not in allowed_types: raise FLANNException('Cannot handle type: %s' % pts.dtype) if int(branch_size) != branch_size or branch_size < 2: raise FLANNException('branch_size must be an integer >= 2.') branch_size = int(branch_size) if int(num_branches) != num_branches or num_branches < 1: raise FLANNException('num_branches must be an integer >= 1.') num_branches = int(num_branches) if max_iterations is None: max_iterations = -1 else: max_iterations = int(max_iterations) # init the arrays and starting values pts = ensure_2d_array(pts, default_flags) npts, dim = pts.shape num_clusters = (branch_size - 1) * num_branches + 1 if pts.dtype.type == np.float64: result = np.empty((num_clusters, dim), dtype=np.float64) else: result = np.empty((num_clusters, dim), dtype=np.float32) # set all the parameters appropriately self.__ensureRandomSeed(kwargs) params = {'iterations': max_iterations, 'algorithm': 'kmeans', 'branching': branch_size, 'random_seed': kwargs['random_seed']} self.__flann_parameters.update(params) numclusters = flann.compute_cluster_centers[pts.dtype.type]( pts, npts, dim, num_clusters, result, pointer(self.__flann_parameters)) if numclusters <= 0: raise FLANNException('Error occured during clustering procedure.') if dtype is None: return result else: return dtype(result) ''').replace('}', '}}').replace('{', '{{') return_doc = ut.packtext( '''number of clusters computed or a number <0 for error. This number can be different than the number of clusters requested, due to the way hierarchical clusters are computed. The number of clusters returned will be the highest number of the form (branch_size-1)*K+1 smaller than the number of clusters requested.''') cpp_param_doc['clusters'] = 'number of cluster to compute' cpp_param_doc[ 'result_centers'] = 'memory buffer where the output cluster centers are stored' cpp_param_doc[ 'flann_params'] = 'generic flann parameters and index_params used to specify the kmeans tree parameters (branching factor, max number of iterations to use)' return_type = 'int' binding_argnames = [ 'dataset', 'rows', 'cols', 'clusters', 'result_centers', 'flann_params' ] optional_args = ['Distance d = Distance()'] py_alias = 'hierarchical_kmeans' py_args = 'pts, branch_size, num_branches, max_iterations=None, dtype=None, **kwargs'.split( ', ') elif binding_name == 'radius_search': docstr = ut.codeblock(r''' Performs an radius search using an already constructed index. In case of radius search, instead of always returning a predetermined number of nearest neighbours (for example the 10 nearest neighbours), the search will return all the neighbours found within a search radius of the query point. The check parameter in the FLANNParameters below sets the level of approximation for the search by only visiting "checks" number of features in the index (the same way as for the KNN search). A lower value for checks will give a higher search speedup at the cost of potentially not returning all the neighbours in the specified radius. ''') c_source = ut.codeblock(r''' typedef typename Distance::ElementType ElementType; typedef typename Distance::ResultType DistanceType; try {{ init_flann_parameters(flann_params); if (index_ptr==NULL) {{ throw FLANNException("Invalid index"); }} Index<Distance>* index = (Index<Distance>*)index_ptr; Matrix<int> m_result_ids(result_ids, 1, max_nn); Matrix<DistanceType> m_dists(dists1d, 1, max_nn); SearchParams search_params = create_search_params(flann_params); int count = index->radiusSearch(Matrix<ElementType>(query1d, 1, index->veclen()), m_result_ids, m_dists, radius, search_params ); return count; }} catch (std::runtime_error& e) {{ Logger::error("Caught exception: %s\n",e.what()); return -1; }} ''') py_source = ut.codeblock(''' if self.__curindex is None: raise FLANNException( 'build_index(...) method not called first or current index deleted.') if query.dtype.type not in allowed_types: raise FLANNException('Cannot handle type: %s' % query.dtype) if self.__curindex_type != query.dtype.type: raise FLANNException('Index and query must have the same type') npts, dim = self.get_indexed_shape() assert(query.shape[0] == dim), 'data and query must have the same dims' result = np.empty(npts, dtype=index_type) if self.__curindex_type == np.float64: dists = np.empty(npts, dtype=np.float64) else: dists = np.empty(npts, dtype=np.float32) self.__flann_parameters.update(kwargs) nn = flann.radius_search[ self.__curindex_type]( self.__curindex, query, result, dists, npts, radius, pointer(self.__flann_parameters)) return (result[0:nn], dists[0:nn]) ''') cpp_param_doc['index_ptr'] = 'the index' cpp_param_doc['query1d'] = 'query point' cpp_param_doc['dists1d'] = 'similar, but for storing distances' cpp_param_doc[ 'result_ids'] = 'array for storing the indices found (will be modified)' cpp_param_doc['max_nn'] = 'size of arrays result_ids and dists1d' cpp_param_doc[ 'radius'] = 'search radius (squared radius for euclidian metric)' return_doc = 'number of neighbors found or <0 for an error' return_type = 'int' binding_argnames = [ 'index_ptr', 'query1d', 'result_ids', 'dists1d', 'max_nn', 'radius', 'flann_params', ] py_alias = 'nn_radius' py_args = 'query, radius, **kwargs'.split(', ') elif binding_name == 'find_nearest_neighbors_index': c_source = ut.codeblock(r''' typedef typename Distance::ElementType ElementType; typedef typename Distance::ResultType DistanceType; try { init_flann_parameters(flann_params); if (index_ptr==NULL) { throw FLANNException("Invalid index"); } Index<Distance>* index = (Index<Distance>*)index_ptr; Matrix<int> m_indices(result_ids,tcount, nn); Matrix<DistanceType> m_dists(dists, tcount, nn); SearchParams search_params = create_search_params(flann_params); index->knnSearch(Matrix<ElementType>(testset, tcount, index->veclen()), m_indices, m_dists, nn, search_params ); return 0; } catch (std::runtime_error& e) { Logger::error("Caught exception: %s\n",e.what()); return -1; } return -1; ''').replace('{', '{{').replace('}', '}}') py_source = ut.codeblock(''' if self.__curindex is None: raise FLANNException( 'build_index(...) method not called first or current index deleted.') if qpts.dtype.type not in allowed_types: raise FLANNException('Cannot handle type: %s' % qpts.dtype) if self.__curindex_type != qpts.dtype.type: raise FLANNException('Index and query must have the same type') qpts = ensure_2d_array(qpts, default_flags) npts, dim = self.get_indexed_shape() if qpts.size == dim: qpts.reshape(1, dim) nqpts = qpts.shape[0] assert qpts.shape[1] == dim, 'data and query must have the same dims' assert npts >= num_neighbors, 'more neighbors than there are points' result = np.empty((nqpts, num_neighbors), dtype=index_type) if self.__curindex_type == np.float64: dists = np.empty((nqpts, num_neighbors), dtype=np.float64) else: dists = np.empty((nqpts, num_neighbors), dtype=np.float32) self.__flann_parameters.update(kwargs) flann.find_nearest_neighbors_index[ self.__curindex_type]( self.__curindex, qpts, nqpts, result, dists, num_neighbors, pointer(self.__flann_parameters)) if num_neighbors == 1: return (result.reshape(nqpts), dists.reshape(nqpts)) else: return (result, dists) ''') docstr = 'Searches for nearest neighbors using the index provided' return_doc = zero_success return_type = 'int' # optional_args = ['Distance d = Distance()'] binding_argnames = [ 'index_ptr', 'testset', 'tcount', 'result_ids', 'dists', 'nn', 'flann_params', ] py_alias = 'nn_index' py_args = ['qpts', 'num_neighbors=1', '**kwargs'] elif binding_name == 'find_nearest_neighbors': c_source = ut.codeblock(r''' typedef typename Distance::ElementType ElementType; typedef typename Distance::ResultType DistanceType; try {{ init_flann_parameters(flann_params); if (index_ptr==NULL) {{ throw FLANNException("Invalid index"); }} Index<Distance>* index = (Index<Distance>*)index_ptr; Matrix<int> m_indices(result_ids,tcount, nn); Matrix<DistanceType> m_dists(dists, tcount, nn); SearchParams search_params = create_search_params(flann_params); index->knnSearch(Matrix<ElementType>(testset, tcount, index->veclen()), m_indices, m_dists, nn, search_params ); return 0; }} catch (std::runtime_error& e) {{ Logger::error("Caught exception: %s\n",e.what()); return -1; }} return -1; ''') py_source = ut.codeblock(''' if pts.dtype.type not in allowed_types: raise FLANNException('Cannot handle type: %s' % pts.dtype) if qpts.dtype.type not in allowed_types: raise FLANNException('Cannot handle type: %s' % pts.dtype) if pts.dtype != qpts.dtype: raise FLANNException('Data and query must have the same type') pts = ensure_2d_array(pts, default_flags) qpts = ensure_2d_array(qpts, default_flags) npts, dim = pts.shape nqpts = qpts.shape[0] assert qpts.shape[1] == dim, 'data and query must have the same dims' assert npts >= num_neighbors, 'more neighbors than there are points' result = np.empty((nqpts, num_neighbors), dtype=index_type) if pts.dtype == np.float64: dists = np.empty((nqpts, num_neighbors), dtype=np.float64) else: dists = np.empty((nqpts, num_neighbors), dtype=np.float32) self.__flann_parameters.update(kwargs) flann.find_nearest_neighbors[ pts.dtype.type]( pts, npts, dim, qpts, nqpts, result, dists, num_neighbors, pointer(self.__flann_parameters)) if num_neighbors == 1: return (result.reshape(nqpts), dists.reshape(nqpts)) else: return (result, dists) ''') docstr = 'Builds an index and uses it to find nearest neighbors.' return_doc = zero_success py_alias = 'nn' py_args = ['pts', 'qpts', 'num_neighbors=1', '**kwargs'] return_type = 'int' binding_argnames = [ 'dataset', 'rows', 'cols', 'testset', 'tcount', 'result_ids', 'dists', 'nn', 'flann_params' ] optional_args = ['Distance d = Distance()'] elif binding_name == 'load_index': c_source_part = ut.codeblock(r''' Index<Distance>* index = new Index<Distance>(Matrix<typename Distance::ElementType>(dataset,rows,cols), SavedIndexParams(filename), d); return index; ''') py_source = ut.codeblock(''' if pts.dtype.type not in allowed_types: raise FLANNException('Cannot handle type: %s' % pts.dtype) pts = ensure_2d_array(pts, default_flags) npts, dim = pts.shape if self.__curindex is not None: flann.free_index[self.__curindex_type]( self.__curindex, pointer(self.__flann_parameters)) self.__curindex = None self.__curindex_data = None self.__added_data = [] self.__curindex_type = None self.__curindex = flann.load_index[pts.dtype.type]( c_char_p(to_bytes(filename)), pts, npts, dim) if self.__curindex is None: raise FLANNException( ('Error loading the FLANN index with filename=%r.' ' C++ may have thrown more detailed errors') % (filename,)) self.__curindex_data = pts self.__added_data = [] self.__removed_ids = [] self.__curindex_type = pts.dtype.type ''') docstr = 'Loads a previously saved index from a file.' return_doc = 'index_ptr' cpp_param_doc['dataset'] = 'The dataset corresponding to the index' cpp_param_doc['filename'] = 'File to load the index from' py_args = ['filename', 'pts'] return_type = 'flann_index_t' binding_argnames = ['filename', 'dataset', 'rows', 'cols'] optional_args = ['Distance d = Distance()'] elif binding_name == 'save_index': docstr = 'Saves the index to a file. Only the index is saved into the file, the dataset corresponding to the index is not saved.' cpp_param_doc['index_ptr'] = 'The index that should be saved' cpp_param_doc['filename'] = 'The filename the index should be saved to' return_doc = 'Returns 0 on success, negative value on error' c_source_part = ut.codeblock(r''' Index<Distance>* index = (Index<Distance>*)index_ptr; index->save(filename); return 0; ''') py_source = ut.codeblock(''' if self.__curindex is not None: flann.save_index[self.__curindex_type]( self.__curindex, c_char_p(to_bytes(filename))) ''') return_type = 'int' binding_argnames = ['index_ptr', 'filename'] py_alias = None py_args = None elif binding_name == 'build_index': docstr = ut.codeblock(''' Builds and returns an index. It uses autotuning if the target_precision field of index_params is between 0 and 1, or the parameters specified if it's -1. ''') pydoc = ut.codeblock(''' This builds and internally stores an index to be used for future nearest neighbor matchings. It erases any previously stored indexes, so use multiple instances of this class to work with multiple stored indices. Use nn_index(...) to find the nearest neighbors in this index. pts is a 2d numpy array or matrix. All the computation is done in np.float32 type, but pts may be any type that is convertable to np.float32. ''') c_source = ut.codeblock(r''' typedef typename Distance::ElementType ElementType; try { init_flann_parameters(flann_params); if (flann_params == NULL) { throw FLANNException("The flann_params argument must be non-null"); } IndexParams params = create_parameters(flann_params); Index<Distance>* index = new Index<Distance>(Matrix<ElementType>(dataset,rows,cols), params, d); index->buildIndex(); if (flann_params->algorithm==FLANN_INDEX_AUTOTUNED) { IndexParams params = index->getParameters(); update_flann_parameters(params,flann_params); SearchParams search_params = get_param<SearchParams>(params,"search_params"); *speedup = get_param<float>(params,"speedup"); flann_params->checks = search_params.checks; flann_params->eps = search_params.eps; flann_params->cb_index = get_param<float>(params,"cb_index",0.0); } return index; } catch (std::runtime_error& e) { Logger::error("Caught exception: %s\n",e.what()); return NULL; } ''').replace('{', '{{').replace('}', '}}') py_source = ut.codeblock(''' if pts.dtype.type not in allowed_types: raise FLANNException('Cannot handle type: %s' % pts.dtype) pts = ensure_2d_array(pts, default_flags) npts, dim = pts.shape self.__ensureRandomSeed(kwargs) self.__flann_parameters.update(kwargs) if self.__curindex is not None: flann.free_index[self.__curindex_type]( self.__curindex, pointer(self.__flann_parameters)) self.__curindex = None speedup = c_float(0) self.__curindex = flann.build_index[pts.dtype.type]( pts, npts, dim, byref(speedup), pointer(self.__flann_parameters)) self.__curindex_data = pts self.__curindex_type = pts.dtype.type params = dict(self.__flann_parameters) params['speedup'] = speedup.value return params ''') # binding_argnames = ['dataset', 'rows', 'cols', 'speedup', 'flann_params'] return_doc = 'the newly created index or a number <0 for error' cpp_param_doc[ 'speedup'] = 'speedup over linear search, estimated if using autotuning, output parameter' optional_args = ['Distance d = Distance()'] return_type = 'flann_index_t' py_args = ['pts', '**kwargs'] binding_argnames = [ 'dataset', 'rows', 'cols', 'speedup', 'flann_params' ] elif binding_name == 'free_index': docstr = 'Deletes an index and releases the memory used by it.' pydoc = ut.codeblock(''' Deletes the current index freeing all the momory it uses. The memory used by the dataset that was indexed is not freed unless there are no other references to those numpy arrays. ''') c_source_part = ut.codeblock(r''' Index<Distance>* index = (Index<Distance>*)index_ptr; delete index; return 0; ''') py_source = ut.codeblock(''' self.__flann_parameters.update(kwargs) if self.__curindex is not None: flann.free_index[self.__curindex_type]( self.__curindex, pointer(self.__flann_parameters)) self.__curindex = None self.__curindex_data = None self.__added_data = [] self.__removed_ids = [] ''') return_doc = zero_success return_type = 'int' binding_argnames = ['index_ptr', 'flann_params'] cpp_param_doc['flann_params'] = ut.textblock( '''generic flann params (only used to specify verbosity)''') py_alias = 'delete_index' py_args = ['**kwargs'] elif binding_name == 'get_point': docstr = 'Gets a point from a given index position.' return_doc = 'pointer to datapoint or NULL on miss' binding_argnames = ['index_ptr', 'point_id'] cpp_param_doc['point_id'] = 'index of datapoint to get.' return_type = 'Distance::ElementType*' elif binding_name == 'flann_get_distance_order': docstr = ut.textblock( '''Gets the distance order in use throughout FLANN (only applicable if minkowski distance is in use).''') binding_argnames = [] return_type = 'int' else: dictdef = { '_template_new': { 'docstr': '', 'binding_argnames': [], 'return_type': 'int', }, 'flann_get_distance_type': { 'docstr': '', 'binding_argnames': [], 'return_type': 'int', }, 'flann_log_verbosity': { 'docstr': ut.codeblock(''' Sets the log level used for all flann functions (unless specified in FLANNParameters for each call '''), 'binding_argnames': ['level'], 'return_type': 'void', }, } if binding_name in dictdef: docstr = dictdef[binding_name].get('docstr', '') binding_argnames = dictdef[binding_name]['binding_argnames'] return_type = dictdef[binding_name]['return_type'] else: raise NotImplementedError('Unknown binding name %r' % (binding_name, )) if c_source is None: if c_source_part is not None: try_ = ut.codeblock(''' try {{ ''') throw_ = '\n' + ut.indent( ut.codeblock(''' if (index_ptr==NULL) {{ throw FLANNException("Invalid index"); }} '''), ' ' * 4) if 'index_ptr' not in binding_argnames: throw_ = '' if 'flann_params' in binding_argnames: part1 = try_ + '\n' + ' init_flann_parameters(flann_params);' + throw_ else: part1 = try_ + throw_ if return_type == 'int': default_return = '-1' else: default_return = 'NULL' part2 = ut.codeblock(r''' }} catch (std::runtime_error& e) {{ Logger::error("Caught exception: %s\n",e.what()); return ''' + default_return + '''; }} ''') c_source = part1 + '\n' + ut.indent(c_source_part, ' ' * 4) + '\n' + part2 else: c_source = ut.codeblock(''' TODO: IMPLEMENT THIS FUNCTION WRAPPER ''') try: docstr_cpp = docstr[:] if return_doc is not None: param_docs = ut.dict_take(cpp_param_doc, binding_argnames) cpp_param_docblock = '\n'.join([ '%s = %s' % (name, doc) for name, doc in zip(binding_argnames, param_docs) ]) docstr_cpp += '\n\n' + 'Params:\n' + ut.indent( cpp_param_docblock, ' ') docstr_cpp += '\n\n' + 'Returns: ' + return_doc if pydoc is None: docstr_py = docstr[:] else: docstr_py = pydoc[:] if py_args: py_param_doc = cpp_param_doc.copy() py_param_doc['pts'] = py_param_doc['dataset'].replace( 'pointer to ', '') py_param_doc['qpts'] = ( py_param_doc['testset'].replace('pointer to ', '') + ' (may be a single point)') py_param_doc['num_neighbors'] = py_param_doc['nn'] py_param_doc['**kwargs'] = py_param_doc['flann_params'] py_args_ = [a.split('=')[0] for a in py_args] param_docs = ut.dict_take(py_param_doc, py_args_, '') # py_types = py_param_docblock = '\n'.join([ '%s: %s' % (name, doc) for name, doc in zip(py_args_, param_docs) ]) docstr_py += '\n\n' + 'Params:\n' + ut.indent( py_param_docblock, ' ') except Exception as ex: ut.printex(ex, keys=['binding_name']) raise pass binding_def = { 'cpp_binding_name': cpp_binding_name, 'docstr_cpp': docstr_cpp, 'docstr_py': docstr_py, 'return_type': return_type, 'binding_argnames': binding_argnames, 'c_source': c_source, 'optional_args': optional_args, 'py_source': py_source, 'py_args': py_args, 'py_alias': py_alias, } return binding_def
def request_diskcached_ibeis_nnindexer(qreq_, daid_list, nnindex_cfgstr=None, verbose=True, force_rebuild=False, memtrack=None, prog_hook=None): r""" builds new NeighborIndexer which will try to use a disk cached flann if available Args: qreq_ (QueryRequest): query request object with hyper-parameters daid_list (list): nnindex_cfgstr (?): verbose (bool): Returns: NeighborIndexer: nnindexer CommandLine: python -m ibeis.algo.hots.neighbor_index_cache --test-request_diskcached_ibeis_nnindexer Example: >>> # DISABLE_DOCTEST >>> from ibeis.algo.hots.neighbor_index_cache import * # NOQA >>> import ibeis >>> # build test data >>> ibs = ibeis.opendb('testdb1') >>> daid_list = ibs.get_valid_aids(species=ibeis.const.TEST_SPECIES.ZEB_PLAIN) >>> qreq_ = ibs.new_query_request(daid_list, daid_list) >>> nnindex_cfgstr = build_nnindex_cfgstr(qreq_, daid_list) >>> verbose = True >>> # execute function >>> nnindexer = request_diskcached_ibeis_nnindexer(qreq_, daid_list, nnindex_cfgstr, verbose) >>> # verify results >>> result = str(nnindexer) >>> print(result) """ if nnindex_cfgstr is None: nnindex_cfgstr = build_nnindex_cfgstr(qreq_, daid_list) cfgstr = nnindex_cfgstr cachedir = qreq_.ibs.get_flann_cachedir() flann_params = qreq_.qparams.flann_params flann_params['checks'] = qreq_.qparams.checks #if memtrack is not None: # memtrack.report('[PRE SUPPORT]') # Get annot descriptors to index if prog_hook is not None: prog_hook.set_progress(1, 3, 'Loading support data for indexer') print('[nnindex] Loading support data for indexer') vecs_list, fgws_list, fxs_list = get_support_data(qreq_, daid_list) if memtrack is not None: memtrack.report('[AFTER GET SUPPORT DATA]') try: nnindexer = new_neighbor_index(daid_list, vecs_list, fgws_list, fxs_list, flann_params, cachedir, cfgstr=cfgstr, verbose=verbose, force_rebuild=force_rebuild, memtrack=memtrack, prog_hook=prog_hook) except Exception as ex: ut.printex(ex, True, msg_='cannot build inverted index', key_list=['ibs.get_infostr()']) raise # Record these uuids in the disk based uuid map so they can be augmented if # needed min_reindex_thresh = qreq_.qparams.min_reindex_thresh if len(daid_list) > min_reindex_thresh: uuid_map_fpath = get_nnindexer_uuid_map_fpath(qreq_) daids_hashid = get_data_cfgstr(qreq_.ibs, daid_list) visual_uuid_list = qreq_.ibs.get_annot_visual_uuids(daid_list) UUID_MAP_CACHE.write_uuid_map_dict(uuid_map_fpath, visual_uuid_list, daids_hashid) if memtrack is not None: memtrack.report('[AFTER WRITE_UUID_MAP]') return nnindexer
pip install networkx wget http://www.graphviz.org/pub/graphviz/stable/windows/graphviz-2.38.msi graphviz-2.38.msi """ from __future__ import absolute_import, division, print_function, unicode_literals import utool as ut import vtool as vt import numpy as np # NOQA import itertools #import sys #from os.path import join try: import networkx as nx except ImportError as ex: ut.printex(ex, 'Cannot import networkx. pip install networkx', iswarning=True) def get_name_rowid_edges_from_nids(ibs, nids): aids_list = ibs.get_name_aids(nids) import itertools unflat_edges = (list(itertools.product(aids, aids)) for aids in aids_list) aid_pairs = [tup for tup in ut.iflatten(unflat_edges) if tup[0] != tup[1]] aids1 = ut.get_list_column(aid_pairs, 0) aids2 = ut.get_list_column(aid_pairs, 1) return aids1, aids2 def get_name_rowid_edges_from_aids(ibs, aid_list): aids_list, nids = ibs.group_annots_by_name(aid_list)
def draw_keypoints(ax, kpts_, scale_factor=1.0, offset=(0.0, 0.0), rotation=0.0, ell=True, pts=False, rect=False, eig=False, ori=False, sifts=None, siftkw={}, H=None, **kwargs): """ draws keypoints extracted by pyhesaff onto a matplotlib axis FIXME: There is probably a matplotlib bug here. If you specify two different alphas in a collection, whatever the last alpha was gets applied to everything Args: ax (mpl.Axes): kpts (ndarray): keypoints [[x, y, a, c, d, theta], ...] scale_factor (float): offset (tuple): rotation (float): ell (bool): pts (bool): rect (bool): eig (bool): ori (bool): sifts (None): References: http://stackoverflow.com/questions/28401788/transforms-non-affine-patch CommandLine: python -m wbia.plottool.mpl_keypoint draw_keypoints --show Example: >>> # ENABLE_DOCTEST >>> from wbia.plottool.mpl_keypoint import * # NOQA >>> from wbia.plottool.mpl_keypoint import _draw_patches, _draw_pts # NOQA >>> import wbia.plottool as pt >>> import vtool as vt >>> imgBGR = vt.get_star_patch(jitter=True) >>> H = np.array([[1, 0, 0], [.5, 2, 0], [0, 0, 1]]) >>> H = np.array([[.8, 0, 0], [0, .8, 0], [0, 0, 1]]) >>> H = None >>> TAU = 2 * np.pi >>> kpts_ = vt.make_test_image_keypoints(imgBGR, scale=.5, skew=2, theta=TAU / 8.0) >>> scale_factor=1.0 >>> #offset=(0.0, -4.0) >>> offset=(0.0, 0.0) >>> rotation=0.0 >>> ell=True >>> pts=True >>> rect=True >>> eig=True >>> ori=True >>> # make random sifts >>> sifts = mpl_sift.testdata_sifts() >>> siftkw = {} >>> kwargs = dict(ori_color=[0, 1, 0], rect_color=[0, 0, 1], >>> eig_color=[1, 1, 0], pts_size=.1) >>> w, h = imgBGR.shape[0:2][::-1] >>> imgBGR_ = imgBGR if H is None else vt.warpAffine( >>> imgBGR, H, (int(w * .8), int(h * .8))) >>> fig, ax = pt.imshow(imgBGR_ * 255) >>> draw_keypoints(ax, kpts_, scale_factor, offset, rotation, ell, pts, ... rect, eig, ori, sifts, siftkw, H=H, **kwargs) >>> pt.iup() >>> pt.show_if_requested() """ import vtool.keypoint as ktool if kpts_.shape[1] == 2: # pad out structure if only xy given kpts = np.zeros((len(kpts_), 6)) kpts[:, 0:2] = kpts_ kpts[:, 2] = 1 kpts[:, 4] = 1 kpts_ = kpts if scale_factor is None: scale_factor = 1.0 # print('[mpl_keypoint.draw_keypoints] kwargs = ' + ut.repr2(kwargs)) # ellipse and point properties pts_size = kwargs.get('pts_size', 2) pts_alpha = kwargs.get('pts_alpha', 1.0) ell_alpha = kwargs.get('ell_alpha', 1.0) ell_linewidth = kwargs.get('ell_linewidth', 2) ell_color = kwargs.get('ell_color', None) if ell_color is None: ell_color = [1, 0, 0] # colors pts_color = kwargs.get('pts_color', ell_color) rect_color = kwargs.get('rect_color', ell_color) eig_color = kwargs.get('eig_color', ell_color) ori_color = kwargs.get('ori_color', ell_color) # linewidths eig_linewidth = kwargs.get('eig_linewidth', ell_linewidth) rect_linewidth = kwargs.get('rect_linewidth', ell_linewidth) ori_linewidth = kwargs.get('ori_linewidth', ell_linewidth) # Offset keypoints assert len(kpts_) > 0, 'cannot draw no keypoints1' kpts = ktool.offset_kpts(kpts_, offset, scale_factor) assert len(kpts) > 0, 'cannot draw no keypoints2' # Build list of keypoint shape transforms from unit circles to ellipes invVR_aff2Ds = get_invVR_aff2Ds(kpts, H=H) try: if sifts is not None: # SIFT descriptors pass_props( kwargs, siftkw, 'bin_color', 'arm1_color', 'arm2_color', 'arm1_lw', 'arm2_lw', 'stroke', 'arm_alpha', 'arm_alpha', 'multicolored_arms', ) mpl_sift.draw_sifts(ax, sifts, invVR_aff2Ds, **siftkw) if rect: # Bounding Rectangles rect_patches = rectangle_actors(invVR_aff2Ds) _draw_patches(ax, rect_patches, rect_color, ell_alpha, rect_linewidth) if ell: # Keypoint shape ell_patches = ellipse_actors(invVR_aff2Ds) _draw_patches(ax, ell_patches, ell_color, ell_alpha, ell_linewidth) if eig: # Shape eigenvectors eig_patches = eigenvector_actors(invVR_aff2Ds) _draw_patches(ax, eig_patches, eig_color, ell_alpha, eig_linewidth) if ori: # Keypoint orientation ori_patches = orientation_actors(kpts, H=H) _draw_patches(ax, ori_patches, ori_color, ell_alpha, ori_linewidth, ori_color) if pts: # Keypoint locations _xs, _ys = ktool.get_xys(kpts) if H is not None: # adjust for homogrpahy import vtool as vt _xs, _ys = vt.transform_points_with_homography( H, np.vstack((_xs, _ys))) pts_patches = _draw_pts(ax, _xs, _ys, pts_size, pts_color, pts_alpha) if pts_patches is not None: _draw_patches(ax, pts_patches, 'none', pts_alpha, pts_size, pts_color) except ValueError as ex: ut.printex(ex, '\n[mplkp] !!! ERROR') # print('_oris.shape = %r' % (_oris.shape,)) # print('_xs.shape = %r' % (_xs.shape,)) # print('_iv11s.shape = %r' % (_iv11s.shape,)) raise
def query_chips(ibs, qaid_list=None, daid_list=None, cfgdict=None, use_cache=None, use_bigcache=None, qreq_=None, return_request=False, verbose=pipeline.VERB_PIPELINE, save_qcache=None, prog_hook=None, return_cm_dict=False, return_cm_simple_dict=False): r""" Submits a query request to the hotspotter recognition pipeline. Returns a list of QueryResult objects. Args: qaid_list (list): a list of annotation ids to be submitted as queries daid_list (list): a list of annotation ids used as the database that will be searched cfgdict (dict): dictionary of configuration options used to create a new QueryRequest if not already specified use_cache (bool): turns on/off chip match cache (default: True) use_bigcache (bool): turns one/off chunked chip match cache (default: True) qreq_ (QueryRequest): optional, a QueryRequest object that overrides all previous settings return_request (bool): returns the request which will be created if one is not already specified verbose (bool): default=False, turns on verbose printing Returns: list: a list of ChipMatch objects containing the matching annotations, scores, and feature matches Returns(2): tuple: (cm_list, qreq_) - a list of query results and optionally the QueryRequest object used RESTful: Method: PUT URL: /api/query/chips/ CommandLine: python -m ibeis.web.apis_query --test-query_chips # Test speed of single query python -m ibeis --tf IBEISController.query_chips --db PZ_Master1 \ -a default:qindex=0:1,dindex=0:500 --nocache-hs python -m ibeis --tf IBEISController.query_chips --db PZ_Master1 \ -a default:qindex=0:1,dindex=0:3000 --nocache-hs python -m ibeis.web.apis_query --test-query_chips:1 --show python -m ibeis.web.apis_query --test-query_chips:2 --show Example: >>> # SLOW_DOCTEST >>> from ibeis.control.IBEISControl import * # NOQA >>> import ibeis >>> qreq_ = ibeis.testdata_qreq_() >>> ibs = qreq_.ibs >>> cm_list = qreq_.execute() >>> cm = cm_list[0] >>> ut.quit_if_noshow() >>> cm.ishow_analysis(qreq_) >>> ut.show_if_requested() Example: >>> # SLOW_DOCTEST >>> #from ibeis.all_imports import * # NOQA >>> import ibeis >>> from ibeis.control.IBEISControl import * # NOQA >>> qaid_list = [1] >>> daid_list = [1, 2, 3, 4, 5] >>> ibs = ibeis.test_main(db='testdb1') >>> qreq_ = ibs.new_query_request(qaid_list, daid_list) >>> cm = ibs.query_chips(qaid_list, daid_list, use_cache=False, qreq_=qreq_)[0] >>> ut.quit_if_noshow() >>> cm.ishow_analysis(qreq_) >>> ut.show_if_requested() Example1: >>> # SLOW_DOCTEST >>> #from ibeis.all_imports import * # NOQA >>> import ibeis >>> from ibeis.control.IBEISControl import * # NOQA >>> qaid_list = [1] >>> daid_list = [1, 2, 3, 4, 5] >>> ibs = ibeis.test_main(db='testdb1') >>> cfgdict = {'pipeline_root':'BC_DTW'} >>> qreq_ = ibs.new_query_request(qaid_list, daid_list, cfgdict=cfgdict, verbose=True) >>> cm = ibs.query_chips(qaid_list, daid_list, use_cache=False, qreq_=qreq_)[0] >>> ut.quit_if_noshow() >>> cm.ishow_analysis(qreq_) >>> ut.show_if_requested() """ from ibeis.algo.hots import match_chips4 as mc4 # The qaid and daid objects are allowed to be None if qreq_ is # specified if qaid_list is None: qaid_list = qreq_.qaids if daid_list is None: if qreq_ is not None: daid_list = qreq_.daids else: daid_list = ibs.get_valid_aids() qaid_list, was_scalar = ut.wrap_iterable(qaid_list) # Check fo empty queries try: assert len(daid_list) > 0, 'there are no database chips' assert len(qaid_list) > 0, 'there are no query chips' except AssertionError as ex: ut.printex(ex, 'Impossible query request', iswarning=True, keys=['qaid_list', 'daid_list']) if ut.SUPER_STRICT: raise cm_list = [None for qaid in qaid_list] else: # Check for consistency if qreq_ is not None: ut.assert_lists_eq(qreq_.qaids, qaid_list, 'qaids do not agree with qreq_', verbose=True) ut.assert_lists_eq(qreq_.daids, daid_list, 'daids do not agree with qreq_', verbose=True) if qreq_ is None: qreq_ = ibs.new_query_request(qaid_list, daid_list, cfgdict=cfgdict, verbose=verbose) if isinstance(qreq_, dtool.BaseRequest): # Dtool has a new-ish way of doing requests. Eventually requests # will be depricated and all of this will go away though. cm_list = qreq_.execute() else: # Send query to hotspotter (runs the query) cm_list = mc4.submit_query_request(ibs, qaid_list, daid_list, use_cache, use_bigcache, cfgdict=cfgdict, qreq_=qreq_, verbose=verbose, save_qcache=save_qcache, prog_hook=prog_hook) assert isinstance(cm_list, list), 'Chip matches were not returned as a list' if return_cm_dict or return_cm_simple_dict: # Convert to cm_list if return_cm_simple_dict: for cm in cm_list: cm.qauuid = ibs.get_annot_uuids(cm.qaid) cm.dauuid_list = ibs.get_annot_uuids(cm.daid_list) keys = ['qauuid', 'dauuid_list'] cm_list = [cm.as_simple_dict(keys) for cm in cm_list] elif return_cm_dict: cm_list = [cm.as_dict() for cm in cm_list] if was_scalar: # hack for scalar input assert len(cm_list) == 1 cm_list = cm_list[0] if return_request: return cm_list, qreq_ else: return cm_list
def parse_cfgstr_list2(cfgstr_list, named_defaults_dict=None, cfgtype=None, alias_keys=None, valid_keys=None, expand_nested=True, strict=True, special_join_dict=None, is_nestedcfgtype=False, metadata=None): r""" Parses config strings. By looking up name in a dict of configs DEPRICATE Args: cfgstr_list (list): named_defaults_dict (dict): (default = None) cfgtype (None): (default = None) alias_keys (None): (default = None) valid_keys (None): (default = None) expand_nested (bool): (default = True) strict (bool): (default = True) is_nestedcfgtype - used for annot configs so special joins arent geometrically combined Note: Normal Case: --flag name Custom Arugment Cases: --flag name:custom_key1=custom_val1,custom_key2=custom_val2 Multiple Config Case: --flag name1:custom_args1 name2:custom_args2 Multiple Config (special join) Case: (here name2 and name3 have some special interaction) --flag name1:custom_args1 name2:custom_args2::name3:custom_args3 Varied Argument Case: --flag name:key1=[val1,val2] Returns: list: cfg_combos_list CommandLine: python -m ibeis.expt.cfghelpers --exec-parse_cfgstr_list2 python -m ibeis.expt.cfghelpers --test-parse_cfgstr_list2 Example: >>> # ENABLE_DOCTET >>> from ibeis.expt.cfghelpers import * # NOQA >>> cfgstr_list = ['name', 'name:f=1', 'name:b=[1,2]', 'name1:f=1::name2:f=1,b=2'] >>> #cfgstr_list = ['name', 'name1:f=1::name2:f=1,b=2'] >>> named_defaults_dict = None >>> cfgtype = None >>> alias_keys = None >>> valid_keys = None >>> expand_nested = True >>> strict = False >>> special_join_dict = {'joined': True} >>> cfg_combos_list = parse_cfgstr_list2(cfgstr_list, named_defaults_dict, >>> cfgtype, alias_keys, valid_keys, >>> expand_nested, strict, >>> special_join_dict) >>> print('cfg_combos_list = %s' % (ut.list_str(cfg_combos_list, nl=2),)) >>> print(ut.depth_profile(cfg_combos_list)) >>> cfg_list = ut.flatten(cfg_combos_list) >>> cfg_list = ut.flatten([cfg if isinstance(cfg, list) else [cfg] for cfg in cfg_list]) >>> result = ut.repr2(ut.get_varied_cfg_lbls(cfg_list)) >>> print(result) ['name:', 'name:f=1', 'name:b=1', 'name:b=2', 'name1:f=1,joined=True', 'name2:b=2,f=1,joined=True'] """ with ut.Indenter(' '): cfg_combos_list = [] for cfgstr in cfgstr_list: cfg_combos = [] # Parse special joined cfg case if cfgstr.find('::') > -1: special_cfgstr_list = cfgstr.split('::') special_combo_list = parse_cfgstr_list2( special_cfgstr_list, named_defaults_dict=named_defaults_dict, cfgtype=cfgtype, alias_keys=alias_keys, valid_keys=valid_keys, strict=strict, expand_nested=expand_nested, is_nestedcfgtype=False, metadata=metadata) OLD = False if OLD: special_combo = ut.flatten(special_combo_list) if special_join_dict is not None: for cfg in special_combo: cfg.update(special_join_dict) else: if special_join_dict is not None: for special_combo in special_combo_list: for cfg in special_combo: cfg.update(special_join_dict) if is_nestedcfgtype: cfg_combo = tuple([combo for combo in special_combo_list]) else: # not sure if this is right cfg_combo = special_combo_list # FIXME DUPLICATE CODE if expand_nested: cfg_combos.extend(cfg_combo) else: #print('Appending: ' + str(ut.depth_profile(cfg_combo))) #if ut.depth_profile(cfg_combo) == [1, 9]: # ut.embed() cfg_combos_list.append(cfg_combo) else: cfgname, cfgopt_strs, subx = ut.parse_cfgstr_name_options(cfgstr) # -- # Lookup named default settings try: base_cfg_list = ut.lookup_base_cfg_list(cfgname, named_defaults_dict, metadata=metadata) except Exception as ex: ut.printex(ex, keys=['cfgstr_list']) raise # -- for base_cfg in base_cfg_list: cfg_combo = customize_base_cfg( cfgname, cfgopt_strs, base_cfg, cfgtype, alias_keys, valid_keys, strict=strict, offset=len(cfg_combos)) if is_nestedcfgtype: cfg_combo = [cfg_combo] if expand_nested: cfg_combos.extend(cfg_combo) else: cfg_combos_list.append(cfg_combo) # SUBX Cannot work here because of acfg hackiness #if subx is not None: # cfg_combo = ut.take(cfg_combo, subx) if expand_nested: cfg_combos_list.append(cfg_combos) # print('Updated to: ' + str(ut.depth_profile(cfg_combos_list))) #print('Returning len(cfg_combos_list) = %r' % (len(cfg_combos_list),)) return cfg_combos_list
def arrptr_to_np_OLD(c_arrptr, shape, arr_t, dtype): """ Casts an array pointer from C to numpy Args: c_arrptr (uint64): a pointer to an array returned from C shape (tuple): shape of the underlying array being pointed to arr_t (PyCSimpleType): the ctypes datatype of c_arrptr dtype (dtype): numpy datatype the array will be to cast into CommandLine: python2 -m pyhesaff._pyhesaff --test-detect_feats_list:0 --rebuild-hesaff python2 -m pyhesaff._pyhesaff --test-detect_feats_list:0 python3 -m pyhesaff._pyhesaff --test-detect_feats_list:0 """ try: byte_t = ctypes.c_char itemsize_ = dtype().itemsize #import utool #utool.printvar2('itemsize_') ###--------- #dtype_t1 = C.c_voidp * itemsize_ #dtype_ptr_t1 = C.POINTER(dtype_t1) # size of each item #dtype_ptr_t = dtype_ptr_t1 ###--------- if True or six.PY2: # datatype of array elements dtype_t = byte_t * itemsize_ dtype_ptr_t = C.POINTER(dtype_t) # size of each item #typed_c_arrptr = c_arrptr.astype(C.c_long) typed_c_arrptr = c_arrptr.astype(int) c_arr = C.cast(typed_c_arrptr, dtype_ptr_t) # cast to ctypes #raise Exception('fuuu. Why does 2.7 work? Why does 3.4 not!?!!!') else: dtype_t = C.c_char * itemsize_ dtype_ptr_t = C.POINTER(dtype_t) # size of each item #typed_c_arrptr = c_arrptr.astype(int) #typed_c_arrptr = c_arrptr.astype(C.c_size_t) typed_c_arrptr = c_arrptr.astype(int) c_arr = C.cast(c_arrptr.astype(C.c_size_t), dtype_ptr_t) # cast to ctypes c_arr = C.cast(c_arrptr.astype(int), dtype_ptr_t) # cast to ctypes c_arr = C.cast(c_arrptr, dtype_ptr_t) # cast to ctypes #typed_c_arrptr = c_arrptr.astype(int) #, order='C', casting='safe') #utool.embed() #typed_c_arrptr = c_arrptr.astype(dtype_t) #typed_c_arrptr = c_arrptr.astype(ptr_t2) #typed_c_arrptr = c_arrptr.astype(C.c_uint8) #typed_c_arrptr = c_arrptr.astype(C.c_void_p) #typed_c_arrptr = c_arrptr.astype(C.c_int) #typed_c_arrptr = c_arrptr.astype(C.c_char) # WORKS BUT WRONG #typed_c_arrptr = c_arrptr.astype(bytes) # WORKS BUT WRONG #typed_c_arrptr = c_arrptr.astype(int) #typed_c_arrptr = c_arrptr #typed_c_arrptr = c_arrptr.astype(np.int64) #typed_c_arrptr = c_arrptr.astype(int) """ ctypes.cast(arg1, arg2) Input: arg1 - a ctypes object that is or can be converted to a pointer of some kind arg2 - a ctypes pointer type. Output: It returns an instance of the second argument, which references the same memory block as the first argument """ c_arr = C.cast(typed_c_arrptr, dtype_ptr_t) # cast to ctypes np_arr = np.ctypeslib.as_array(c_arr, shape) # cast to numpy np_arr.dtype = dtype # fix numpy dtype except Exception as ex: import utool as ut #utool.embed() varnames = sorted(list(locals().keys())) vartypes = [(type, name) for name in varnames] spaces = [None for name in varnames] c_arrptr_dtype = c_arrptr.dtype # NOQA #key_list = list(zip(varnames, vartypes, spaces)) key_list = ['c_arrptr_dtype' ] + 'c_arrptr, shape, arr_t, dtype'.split(', ') print('itemsize(float) = %r' % np.dtype(float).itemsize) print('itemsize(c_char) = %r' % np.dtype(C.c_char).itemsize) print('itemsize(c_wchar) = %r' % np.dtype(C.c_wchar).itemsize) print('itemsize(c_char_p) = %r' % np.dtype(C.c_char_p).itemsize) print('itemsize(c_wchar_p) = %r' % np.dtype(C.c_wchar_p).itemsize) print('itemsize(c_int) = %r' % np.dtype(C.c_int).itemsize) print('itemsize(c_int32) = %r' % np.dtype(C.c_int32).itemsize) print('itemsize(c_int64) = %r' % np.dtype(C.c_int64).itemsize) print('itemsize(int) = %r' % np.dtype(int).itemsize) print('itemsize(float32) = %r' % np.dtype(np.float32).itemsize) print('itemsize(float64) = %r' % np.dtype(np.float64).itemsize) ut.printex(ex, keys=key_list) ut.embed() raise return np_arr
def compute_vocab(depc, fid_list, config): r""" Depcache method for computing a new visual vocab CommandLine: python -m wbia.core_annots --exec-compute_neighbor_index --show python -m wbia show_depc_annot_table_input --show --tablename=neighbor_index python -m wbia.algo.smk.vocab_indexer --exec-compute_vocab:0 python -m wbia.algo.smk.vocab_indexer --exec-compute_vocab:1 # FIXME make util_tests register python -m wbia.algo.smk.vocab_indexer compute_vocab:0 Ignore: >>> # Lev Oxford Debug Example >>> import wbia >>> ibs = wbia.opendb('Oxford') >>> depc = ibs.depc >>> table = depc['vocab'] >>> # Check what currently exists in vocab table >>> table.print_configs() >>> table.print_table() >>> table.print_internal_info() >>> # Grab aids used to compute vocab >>> from wbia.expt.experiment_helpers import get_annotcfg_list >>> expanded_aids_list = get_annotcfg_list(ibs, ['oxford'])[1] >>> qaids, daids = expanded_aids_list[0] >>> vocab_aids = daids >>> config = {'num_words': 64000} >>> exists = depc.check_rowids('vocab', [vocab_aids], config=config) >>> print('exists = %r' % (exists,)) >>> vocab_rowid = depc.get_rowids('vocab', [vocab_aids], config=config)[0] >>> print('vocab_rowid = %r' % (vocab_rowid,)) >>> vocab = table.get_row_data([vocab_rowid], 'words')[0] >>> print('vocab = %r' % (vocab,)) Example: >>> # DISABLE_DOCTEST >>> from wbia.algo.smk.vocab_indexer import * # NOQA >>> # Test depcache access >>> import wbia >>> ibs, aid_list = wbia.testdata_aids('testdb1') >>> depc = ibs.depc_annot >>> input_tuple = [aid_list] >>> rowid_kw = {} >>> tablename = 'vocab' >>> vocabid_list = depc.get_rowids(tablename, input_tuple, **rowid_kw) >>> vocab = depc.get(tablename, input_tuple, 'words')[0] >>> assert vocab.wordflann is not None >>> assert vocab.wordflann._FLANN__curindex_data is not None >>> assert vocab.wordflann._FLANN__curindex_data is vocab.wx_to_word Example: >>> # DISABLE_DOCTEST >>> from wbia.algo.smk.vocab_indexer import * # NOQA >>> import wbia >>> ibs, aid_list = wbia.testdata_aids('testdb1') >>> depc = ibs.depc_annot >>> fid_list = depc.get_rowids('feat', aid_list) >>> config = VocabConfig() >>> vocab, train_vecs = ut.exec_func_src(compute_vocab, keys=['vocab', 'train_vecs']) >>> idx_to_vec = depc.d.get_feat_vecs(aid_list)[0] >>> self = vocab >>> ut.quit_if_noshow() >>> data = train_vecs >>> centroids = vocab.wx_to_word >>> import wbia.plottool as pt >>> vt.plot_centroids(data, centroids, num_pca_dims=2) >>> ut.show_if_requested() >>> #config = ibs.depc_annot['vocab'].configclass() """ logger.info('[IBEIS] COMPUTE_VOCAB:') vecs_list = depc.get_native('feat', fid_list, 'vecs') train_vecs = np.vstack(vecs_list).astype(np.float32) num_words = config['num_words'] logger.info( '[smk_index] Train Vocab(nWords=%d) using %d annots and %d descriptors' % (num_words, len(fid_list), len(train_vecs))) if config['algorithm'] == 'kdtree': flann_params = vt.get_flann_params(random_seed=42) kwds = dict(max_iters=20, flann_params=flann_params) words = vt.akmeans(train_vecs, num_words, **kwds) elif config['algorithm'] == 'minibatch': logger.info('Using minibatch kmeans') import sklearn.cluster rng = np.random.RandomState(config['random_seed']) n_init = config['n_init'] with warnings.catch_warnings(): warnings.simplefilter('ignore') init_size = int(num_words * 4) batch_size = 1000 n_batches = ut.get_num_chunks(train_vecs.shape[0], batch_size) minibatch_params = dict( n_clusters=num_words, init='k-means++', init_size=init_size, n_init=n_init, max_iter=30000 // n_batches, batch_size=batch_size, tol=0.0, max_no_improvement=10, reassignment_ratio=0.01, ) logger.info('minibatch_params = %s' % (ut.repr4(minibatch_params), )) clusterer = sklearn.cluster.MiniBatchKMeans(compute_labels=False, random_state=rng, verbose=2, **minibatch_params) try: clusterer.fit(train_vecs) except (Exception, KeyboardInterrupt) as ex: ut.printex(ex, tb=True) if ut.is_developer(): ut.embed() else: raise words = clusterer.cluster_centers_ logger.info('Finished clustering') # if False: # flann_params['checks'] = 64 # flann_params['trees'] = 4 # num_words = 128 # centroids = vt.initialize_centroids(num_words, train_vecs, 'akmeans++') # words, hist = vt.akmeans_iterations( # train_vecs, centroids, max_iters=1000, monitor=True, # flann_params=flann_params) logger.info('Constructing vocab') vocab = VisualVocab(words) logger.info('Building vocab index') vocab.build() logger.info('Returning vocab') return (vocab, )
def openworkdirs_test(): """ problems: PZ_DanExt_All PZ_DanExt_Test GZ_March2012 Wildebeest_ONLY_MATCHES python dev.py --convert --dbdir /raid/work/PZ_Marianne --force-delete python dev.py --convert --dbdir /raid/work/SL_Siva --force-delete python dev.py --convert --dbdir /raid/work/PZ_SweatwaterSmall --force-delete """ canskip = [ '/raid/work/NAUT_test2', '/raid/work/WD_Siva', '/raid/work/PZ_FlankHack', '/raid/work/PZ_Mothers', '/raid/work/GZ_Foals', '/raid/work/PZ_MTEST', '/raid/work/GIR_Tanya', '/raid/work/GZ_Siva', '/raid/work/Wildebeest', '/raid/work/sonograms', '/raid/work/MISC_Jan12', '/raid/work/GZ_Master0', '/raid/work/LF_OPTIMIZADAS_NI_V_E', '/raid/work/LF_Bajo_bonito', '/raid/work/Frogs', '/raid/work/GZ_ALL', '/raid/work/JAG_Kelly', '/raid/work/NAUT_test (copy)', '/raid/work/WS_hard', '/raid/work/WY_Toads', '/raid/work/NAUT_Dan', '/raid/work/LF_WEST_POINT_OPTIMIZADAS', '/raid/work/Seals', '/raid/work/Rhinos_Stewart', '/raid/work/Elephants_Stewart', '/raid/work/NAUT_test', ] import ibeis from ibeis.init import sysres import os import utool as ut # NOQA from os.path import join from ibeis.dbio import ingest_hsdb import ibeis.other.dbinfo ibeis.other.dbinfo.rrr() workdir = sysres.get_workdir() dbname_list = os.listdir(workdir) dbpath_list = [join(workdir, name) for name in dbname_list] is_hsdb_list = list(map(ingest_hsdb.is_hsdb, dbpath_list)) hsdb_list = ut.compress(dbpath_list, is_hsdb_list) #is_ibs_cvt_list = np.array(list(map(is_succesful_convert, dbpath_list))) regen_cmds = [] for hsdb_dpath in hsdb_list: if hsdb_dpath in canskip: continue try: ibs = ibeis.opendb(hsdb_dpath) # NOQA print('Succesfully opened hsdb: ' + hsdb_dpath) print(ibs.get_dbinfo_str()) except Exception as ex: ut.printex(ex, 'Failed to convert hsdb: ' + hsdb_dpath) regen_cmd = 'python dev.py --convert --dbdir ' + hsdb_dpath regen_cmds.append(regen_cmd) print('\n'.join(regen_cmds))
def make_header(tblname): """ Args: table_name - the internal table name """ tblnice = TABLE_NICE[tblname] colnames = TABLE_COLNAMES[tblname] editset = TABLE_EDITSET[tblname] tblgetters = getters[tblname] tblsetters = setters[tblname] #if levels aren't found, we're not dealing with a tree, so everything is at level 0 collevel_dict = TABLE_TREE_LEVELS.get(tblname, ut.ddict(lambda: 0)) collevels = [collevel_dict[colname] for colname in colnames] hiddencols = TABLE_HIDDEN_LIST.get( tblname, [False for _ in range(len(colnames))]) numstripes = TABLE_STRIPE_LIST.get(tblname, 1) colwidths_dict = widths.get(tblname, {}) colwidths = [colwidths_dict.get(colname, 100) for colname in colnames] def get_column_data(colname): try: coldef_tup = COL_DEF[colname] coltype, colnice = coldef_tup except KeyError as ex: strict = False ut.printex(ex, 'Need to add type info for colname=%r to COL_DEF' % colname, iswarning=not strict) if strict: raise else: # default coldef to give a string type and nice=colname coltype, colnice = (str, colname) coledit = colname in editset colgetter = tblgetters[colname] colsetter = None if not coledit else tblsetters.get(colname, None) return (coltype, colnice, coledit, colgetter, colsetter) try: _tuplist = list(zip(*list(map(get_column_data, colnames)))) (coltypes, colnices, coledits, colgetters, colsetters) = _tuplist except KeyError as ex: ut.printex(ex, key_list=['tblname', 'colnames']) raise header = { 'name': tblname, 'nice': tblnice, 'iders': iders[tblname], 'col_name_list': colnames, 'col_type_list': coltypes, 'col_nice_list': colnices, 'col_edit_list': coledits, 'col_getter_list': colgetters, 'col_setter_list': colsetters, 'col_level_list': collevels, 'col_hidden_list': hiddencols, 'num_duplicates': numstripes, 'get_thumb_size': lambda: ibs.cfg.other_cfg.thumb_size, 'col_width_list': colwidths, # TODO } return header
def run(thread): try: thread._run() except Exception as ex: ut.printex(ex, 'thread failed', tb=True)