def ibeis_user_profile(): import utool as ut import sys addpath = True module_fpath = ut.truepath('~/local/init/REPOS1.py') if addpath: module_dpath = dirname(module_fpath) sys.path.append(module_dpath) REPOS1 = ut.import_module_from_fpath(module_fpath) self = UserProfile() #self.project_dpaths = REPOS1.PROJECT_REPOS self.project_dpaths = REPOS1.IBEIS_REPOS self.project_dpaths += [ut.truepath('~/latex/crall-candidacy-2015/')] self.project_include_patterns = [ '*.py', '*.cxx', '*.cpp', '*.hxx', '*.hpp', '*.c', '*.h', '*.vim' #'*.py', # '*.cxx', '*.cpp', '*.hxx', '*.hpp', '*.c', '*.h', '*.vim' ] self.project_exclude_dirs = [ '_graveyard', '_broken', 'CompilerIdCXX', 'CompilerIdC', 'build', 'old', '_old_qt_hs_matcher', ] self.project_exclude_patterns = [ '_grave*', '_autogen_explicit_controller*' ] return self
def change_doctestcommand_to_use_dashm_flag(): r""" VimRegex: # note sure how to execute replace command in vim in one lin %s/python\s*\([A-Za-z_]+[\\/]\S*\)\.py\(.*\)/python -m \1 \2 """ # http://stackoverflow.com/questions/18737863/passing-a-function-to-re-sub-in-python # CANNOT USE [^ ] FOR SOME GOD DAMN REASON USE /S instead regex_list = ['python [A-Za-z_]+[\\/]\S* --allexamples'] dpath_list = [ ut.ensure_crossplat_path(ut.truepath('~/code/utool/utool')), ut.ensure_crossplat_path(ut.truepath('~/code/ibeis/ibeis')), ut.ensure_crossplat_path(ut.truepath('~/code/vtool/vtool')), ut.ensure_crossplat_path(ut.truepath('~/code/plottool/plottool')), ut.ensure_crossplat_path(ut.truepath('~/code/guitool/guitool')), ] #ut.named_field_repl(['python ', ('modrelpath',),]) #['python ', ('modrelpath', 'utool[\\/].*'), '--allexamples']) res = ut.grep(regex_list, recursive=True, dpath_list=dpath_list, verbose=True) found_filestr_list, found_lines_list, found_lxs_list = res fpath = res[0][0] import re keypat_list = [ ('prefix', 'python\s*'), ('modrelpath', '[A-Za-z_]+[\\/]\S*'), ('suffix', '.*'), ] namedregex = ut.named_field_regex(keypat_list) # Define function to pass to re.sub def replmodpath(matchobj): groupdict_ = matchobj.groupdict() relpath = groupdict_['modrelpath'] prefix = groupdict_['prefix'] suffix = groupdict_['suffix'] modname = relpath modname = modname.replace('\\', '.') modname = modname.replace('/', '.') modname = modname.replace('.py', '') return prefix + '-m ' + modname + suffix for fpath in found_filestr_list: text = ut.read_from(fpath) #matchobj = re.search(namedregex, text, flags=re.MULTILINE) #print(text) #for matchobj in re.finditer(namedregex, text): # print(ut.get_match_text(matchobj)) # print('--') newtext = re.sub(namedregex, replmodpath, text) # Perform replacement ut.write_to(fpath, newtext)
def gen_cap(): fpaths = [ ut.truepath('~/latex/crall-thesis-2017/figdef1.tex'), ut.truepath('~/latex/crall-thesis-2017/figdef2.tex'), ut.truepath('~/latex/crall-thesis-2017/figdef3.tex'), ut.truepath('~/latex/crall-thesis-2017/figdef4.tex'), ut.truepath('~/latex/crall-thesis-2017/figdef5.tex'), ] for fpath in fpaths: context = {'fpath': fpath} for sent in caption_sentences(fpath): yield sent, context
def set_workdir(work_dir=None, allow_gui=ALLOW_GUI): """ Sets the workdirectory for this computer Args: work_dir (None): (default = None) allow_gui (bool): (default = True) CommandLine: python -c "import ibeis; ibeis.sysres.set_workdir('/raid/work2')" python -c "import ibeis; ibeis.sysres.set_workdir('/raid/work')" python -m ibeis.init.sysres --exec-set_workdir --workdir Example: >>> # DISABLE_DOCTEST >>> from ibeis.init.sysres import * # NOQA >>> print('current_work_dir = %s' % (str(get_workdir(False)),)) >>> work_dir = ut.get_argval('--workdir', type_=str, default=None) >>> allow_gui = True >>> result = set_workdir(work_dir, allow_gui) """ if work_dir is None: if allow_gui: try: work_dir = guiselect_workdir() except ImportError: allow_gui = False if not allow_gui: work_dir = ut.truepath(input('specify a workdir: ')) if work_dir is None or not exists(work_dir): raise AssertionError('invalid workdir=%r' % work_dir) _ibeis_cache_write(WORKDIR_CACHEID, work_dir)
def models_cnn( ibs, config_dict, parse_classes_func, parse_line_func, check_hash=False, hidden_models=[], **kwargs, ): import urllib model_dict = {} for config_tag in config_dict: if config_tag in hidden_models: continue try: config_url = config_dict[config_tag] classes_url = parse_classes_func(config_url) try: classes_filepath = ut.grab_file_url( classes_url, appname='wbia', check_hash=check_hash ) assert exists(classes_filepath) except (urllib.error.HTTPError, AssertionError): continue classes_filepath = ut.truepath(classes_filepath) line_list = parse_line_func(classes_filepath) model_dict[config_tag] = line_list except Exception: pass return model_dict
def check_language(fpath): """ fpath = 'outline_chapter2-related-work.md' """ jarpath = ut.truepath(LANGTOOL_JAR) base_args = ['java', '-jar', jarpath] args = base_args[:] args += ['-l', 'en-US'] if DISABLE_RULES: args += ['--disable', ','.join(DISABLE_RULES)] args += [fpath] print('Checking fpath = %r' % (fpath,)) assert ut.checkpath(fpath) info = ut.cmd2(' '.join(args)) out = info['out'] items = out.split('\n\n') if items and items[0].startswith('No language specified, using'): items = items[1:] if items and items[0].startswith('Expected text language:'): items = items[1:] if items and items[0].startswith('Working on '): items = items[1:] print('Found %d errors' % (len(items),)) for item in items: if not should_ignore(item): print('\n') print(item) print('Done checking fpath = %r' % (fpath,))
def get_bibtex_dict(): import utool as ut # HACK: custom current bibtex file possible_bib_fpaths = [ ut.truepath('./My_Library_clean.bib'), #ut.truepath('~/latex/crall-thesis-2017/My_Library_clean.bib'), ] bib_fpath = None for bib_fpath_ in possible_bib_fpaths: if exists(bib_fpath_): bib_fpath = bib_fpath_ break if bib_fpath is None: raise Exception('cant find bibtex file') # import bibtexparser from bibtexparser import bparser parser = bparser.BibTexParser() parser.ignore_nonstandard_types = True bib_text = ut.read_from(bib_fpath) bibtex_db = parser.parse(bib_text) bibtex_dict = bibtex_db.get_entry_dict() return bibtex_dict
def parse_latex_comments_for_commmands(): r""" CommandLine: python -m ibeis.scripts.gen_cand_expts --exec-parse_latex_comments_for_commmands Example: >>> # SCRIPT >>> from ibeis.scripts.gen_cand_expts import * # NOQA >>> parse_latex_comments_for_commmands() """ fname = ut.get_argval('--fname', type_=str, default='figdefexpt.tex') text = ut.read_from(ut.truepath('~/latex/crall-candidacy-2015/' + fname)) #text = ut.read_from(ut.truepath('~/latex/crall-candidacy-2015/figdefindiv.tex')) lines = text.split('\n') cmd_list = [''] in_comment = True for line in lines: if line.startswith('% ---'): # Keep separators toadd = line.replace('%', '#') if not (len(cmd_list) > 1 and cmd_list[-1].startswith('# ---')): cmd_list[-1] += (toadd) else: cmd_list.append(toadd) cmd_list.append('') if line.strip().startswith(r'\begin{comment}'): in_comment = True continue if in_comment: line = line.strip() if line == '' or line.startswith('#') or line.startswith('%'): in_comment = False else: cmd_list[-1] = cmd_list[-1] + line if not line.strip().endswith('\\'): cmd_list[-1] = cmd_list[-1] + ' $@' #cmd_list.append('') #cmd_list.append('#--') cmd_list.append('') in_comment = False else: cmd_list[-1] = cmd_list[-1] + '\n' cmd_list = [cmd.replace('--render', '').replace('--diskshow', '') for cmd in cmd_list] # formatting cmd_list2 = [] for cmd in cmd_list: #cmd = cmd.replace(' -t ', ' \\\n -t ') #cmd = cmd.replace('--db', '\\\n --db') #cmd = cmd.replace('python -m ibeis.dev', './dev.py') cmd = cmd.replace('python -m ibeis.dev -e', 'ibeis -e') cmd_list2.append(cmd) cmd_list = cmd_list2 print('cmd_list = %s' % (ut.list_str(cmd_list),)) from os.path import splitext script_fname = 'regen_' + splitext(fname)[0] + '.sh' fname, script, line_list = write_script_lines(cmd_list, script_fname)
def find_module_callers(): """ TODO: attempt to build a call graph between module functions to make it easy to see what can be removed and what cannot. """ import utool as ut from os.path import normpath mod_fpath = ut.truepath('~/code/ibeis/ibeis/expt/results_analyzer.py') mod_fpath = ut.truepath('~/code/ibeis/ibeis/expt/results_all.py') mod_fpath = ut.truepath('~/code/ibeis/ibeis/expt/results_organizer.py') module = ut.import_module_from_fpath(mod_fpath) user_profile = ut.ensure_user_profile() doctestables = list( ut.iter_module_doctestable(module, include_builtin=False)) grepkw = {} grepkw['exclude_dirs'] = user_profile.project_exclude_dirs grepkw['dpath_list'] = user_profile.project_dpaths grepkw['verbose'] = True usage_map = {} for funcname, func in doctestables: print('Searching for funcname = %r' % (funcname, )) found_fpath_list, found_lines_list, found_lxs_list = ut.grep( [funcname], **grepkw) used_in = (found_fpath_list, found_lines_list, found_lxs_list) usage_map[funcname] = used_in external_usage_map = {} for funcname, used_in in usage_map.items(): (found_fpath_list, found_lines_list, found_lxs_list) = used_in isexternal_flag = [ normpath(fpath) != normpath(mod_fpath) for fpath in found_fpath_list ] ext_used_in = (ut.compress(found_fpath_list, isexternal_flag), ut.compress(found_lines_list, isexternal_flag), ut.compress(found_lxs_list, isexternal_flag)) external_usage_map[funcname] = ext_used_in for funcname, used_in in external_usage_map.items(): (found_fpath_list, found_lines_list, found_lxs_list) = used_in print('Calling modules: \n' + ut.repr2(ut.unique_ordered( ut.flatten([used_in[0] for used_in in external_usage_map.values()])), nl=True))
def init_console2(): assert ut.WIN32, 'win32 only script' url = 'http://downloads.sourceforge.net/project/console/console-devel/2.00/Console-2.00b148-Beta_32bit.zip' unzipped_fpath = ut.grab_zipped_url(url) # FIXME: bugged unzipped_fpath2 = join(dirname(unzipped_fpath), 'Console2') win32_bin = ut.truepath('~/local/PATH') ut.copy(ut.ls(unzipped_fpath2), win32_bin)
def __init__(self, fname, ext='.cPkl'): relevant_params = relevance[fname] relevant_cfg = ut.dict_subset(config, relevant_params) cfgstr = ut.get_cfg_lbl(relevant_cfg) dbdir = ut.truepath('/raid/work/Oxford/') super(SMKCacher, self).__init__(fname, cfgstr, cache_dir=dbdir, ext=ext)
def test(gpath_list, canonical_weight_filepath=None, **kwargs): from wbia.detecttools.directory import Directory # Get correct weight if specified with shorthand archive_url = None ensemble_index = None if canonical_weight_filepath is not None and ':' in canonical_weight_filepath: assert canonical_weight_filepath.count(':') == 1 canonical_weight_filepath, ensemble_index = canonical_weight_filepath.split( ':') ensemble_index = int(ensemble_index) if canonical_weight_filepath in ARCHIVE_URL_DICT: archive_url = ARCHIVE_URL_DICT[canonical_weight_filepath] archive_path = ut.grab_file_url(archive_url, appname='wbia', check_hash=True) else: raise RuntimeError('canonical_weight_filepath %r not recognized' % (canonical_weight_filepath, )) assert os.path.exists(archive_path) archive_path = ut.truepath(archive_path) ensemble_path = archive_path.strip('.zip') if not os.path.exists(ensemble_path): ut.unarchive_file(archive_path, output_dir=ensemble_path) assert os.path.exists(ensemble_path) direct = Directory(ensemble_path, include_file_extensions=['weights'], recursive=True) weights_path_list = direct.files() weights_path_list = sorted(weights_path_list) assert len(weights_path_list) > 0 if ensemble_index is not None: assert 0 <= ensemble_index and ensemble_index < len(weights_path_list) weights_path_list = [weights_path_list[ensemble_index]] assert len(weights_path_list) > 0 logger.info('Using weights in the ensemble: %s ' % (ut.repr3(weights_path_list), )) result_list = test_ensemble(gpath_list, weights_path_list, **kwargs) for result in result_list: x0 = max(result['x0'], 0.0) y0 = max(result['y0'], 0.0) x1 = max(result['x1'], 0.0) y1 = max(result['y1'], 0.0) yield ( x0, y0, x1, y1, )
def __init__(self): import utool as ut self.modname = None code_dpath = ut.truepath(ut.get_argval('--code-dir', default='~/code')) self.code_dpath = ut.unexpanduser(code_dpath) self.repo_fname = (ut.get_argval(('--repo', '--repo-name'), type_=str)) self.repo_dpath = join(code_dpath, self.repo_fname) self.modname = ut.get_argval('--modname', default=self.repo_fname) self.regenfmt = 'python -m utool SetupRepo.{cmd} --modname={modname} --repo={repo_fname} --codedir={code_dpath}' ut.ensuredir(self.repo_dpath, verbose=True)
def get_fpath_args(arglist_=None, pat='*'): import utool if arglist_ is None: arglist_ = sys.argv[1:] input_path_list = [] for input_path in arglist_: input_path = utool.truepath(input_path) if os.path.isdir(input_path): input_path_list.extend(utool.glob(input_path, pat, recursive=False, with_dirs=False)) else: input_path_list.append(input_path) return input_path_list
def find_module_callers(): """ TODO: attempt to build a call graph between module functions to make it easy to see what can be removed and what cannot. """ import utool as ut from os.path import normpath mod_fpath = ut.truepath('~/code/ibeis/ibeis/expt/results_analyzer.py') mod_fpath = ut.truepath('~/code/ibeis/ibeis/expt/results_all.py') mod_fpath = ut.truepath('~/code/ibeis/ibeis/expt/results_organizer.py') module = ut.import_module_from_fpath(mod_fpath) user_profile = ut.ensure_user_profile() doctestables = list(ut.iter_module_doctestable(module, include_builtin=False)) grepkw = {} grepkw['exclude_dirs'] = user_profile.project_exclude_dirs grepkw['dpath_list'] = user_profile.project_dpaths grepkw['verbose'] = True usage_map = {} for funcname, func in doctestables: print('Searching for funcname = %r' % (funcname,)) found_fpath_list, found_lines_list, found_lxs_list = ut.grep([funcname], **grepkw) used_in = (found_fpath_list, found_lines_list, found_lxs_list) usage_map[funcname] = used_in external_usage_map = {} for funcname, used_in in usage_map.items(): (found_fpath_list, found_lines_list, found_lxs_list) = used_in isexternal_flag = [normpath(fpath) != normpath(mod_fpath) for fpath in found_fpath_list] ext_used_in = (ut.compress(found_fpath_list, isexternal_flag), ut.compress(found_lines_list, isexternal_flag), ut.compress(found_lxs_list, isexternal_flag)) external_usage_map[funcname] = ext_used_in for funcname, used_in in external_usage_map.items(): (found_fpath_list, found_lines_list, found_lxs_list) = used_in print('Calling modules: \n' + ut.repr2(ut.unique_ordered(ut.flatten([used_in[0] for used_in in external_usage_map.values()])), nl=True))
def build_sphinx_apidoc_cmdstr(): print('') print('if this fails try: sudo pip install sphinx') print('') apidoc = 'sphinx-apidoc' if ut.WIN32: winprefix = 'C:/Python27/Scripts/' sphinx_apidoc_exe = winprefix + apidoc + '.exe' else: sphinx_apidoc_exe = apidoc apidoc_argfmt_list = [ sphinx_apidoc_exe, '--force', '--full', '--maxdepth="{maxdepth}"', '--doc-author="{author}"', '--doc-version="{doc_version}"', '--doc-release="{doc_release}"', '--output-dir="_doc"', #'--separate', # Put documentation for each module on its own page '--private', # Include "_private" modules '{pkgdir}', ] outputdir = '_doc' author = ut.parse_author() packages = ut.find_packages(maxdepth=1) assert len( packages) != 0, 'directory must contain at least one package' if len(packages) > 1: assert len(packages) == 1,\ ('FIXME I dont know what to do with more than one root package: %r' % (packages,)) pkgdir = packages[0] version = ut.parse_package_for_version(pkgdir) modpath = dirname(ut.truepath(pkgdir)) apidoc_fmtdict = { 'author': author, 'maxdepth': '8', 'pkgdir': pkgdir, 'doc_version': version, 'doc_release': version, 'outputdir': outputdir, } ut.assert_exists('setup.py') ut.ensuredir('_doc') apidoc_fmtstr = ' '.join(apidoc_argfmt_list) apidoc_cmdstr = apidoc_fmtstr.format(**apidoc_fmtdict) print('[util_setup] autogenerate sphinx docs for %r' % (pkgdir, )) if ut.VERBOSE: print(ut.dict_str(apidoc_fmtdict)) return apidoc_cmdstr, modpath, outputdir
def __init__(drive, root_dpath=None, state_fpath=None): drive.root_dpath = ut.truepath(ut.ensure_unicode(root_dpath)) print('Initializing drive %s' % (drive.root_dpath, )) ut.assert_exists(drive.root_dpath) # Mapping from dpath strings to fpath indexes assert state_fpath is None, 'not yet supported for external analysis' drive.cache_fname = join(drive.root_dpath, 'ut_pathreg_cache.shelf') drive.fpath_bytes_list_ = None drive.dpath_to_fidx_ = None drive.fpath_hashX_list_ = None drive.hash_to_fidxs_ = None drive.cache = ut.ShelfCacher(drive.cache_fname)
def __init__(drive, root_dpath=None, state_fpath=None): drive.root_dpath = ut.truepath(ut.ensure_unicode(root_dpath)) print('Initializing drive %s' % (drive.root_dpath,)) ut.assert_exists(drive.root_dpath) # Mapping from dpath strings to fpath indexes assert state_fpath is None, 'not yet supported for external analysis' drive.cache_fname = join(drive.root_dpath, 'ut_pathreg_cache.shelf') drive.fpath_bytes_list_ = None drive.dpath_to_fidx_ = None drive.fpath_hashX_list_ = None drive.hash_to_fidxs_ = None drive.cache = ut.ShelfCacher(drive.cache_fname)
def get_fpath_args(arglist_=None, pat='*'): import utool if arglist_ is None: arglist_ = sys.argv[1:] input_path_list = [] for input_path in arglist_: input_path = utool.truepath(input_path) if os.path.isdir(input_path): input_path_list.extend( utool.glob(input_path, pat, recursive=False, with_dirs=False)) else: input_path_list.append(input_path) return input_path_list
def build_sphinx_apidoc_cmdstr(): print('') print('if this fails try: sudo pip install sphinx') print('') apidoc = 'sphinx-apidoc' if ut.WIN32: winprefix = 'C:/Python27/Scripts/' sphinx_apidoc_exe = winprefix + apidoc + '.exe' else: sphinx_apidoc_exe = apidoc apidoc_argfmt_list = [ sphinx_apidoc_exe, '--force', '--full', '--maxdepth="{maxdepth}"', '--doc-author="{author}"', '--doc-version="{doc_version}"', '--doc-release="{doc_release}"', '--output-dir="_doc"', #'--separate', # Put documentation for each module on its own page '--private', # Include "_private" modules '{pkgdir}', ] outputdir = '_doc' author = ut.parse_author() packages = ut.find_packages(maxdepth=1) assert len(packages) != 0, 'directory must contain at least one package' if len(packages) > 1: assert len(packages) == 1,\ ('FIXME I dont know what to do with more than one root package: %r' % (packages,)) pkgdir = packages[0] version = ut.parse_package_for_version(pkgdir) modpath = dirname(ut.truepath(pkgdir)) apidoc_fmtdict = { 'author': author, 'maxdepth': '8', 'pkgdir': pkgdir, 'doc_version': version, 'doc_release': version, 'outputdir': outputdir, } ut.assert_exists('setup.py') ut.ensuredir('_doc') apidoc_fmtstr = ' '.join(apidoc_argfmt_list) apidoc_cmdstr = apidoc_fmtstr.format(**apidoc_fmtdict) print('[util_setup] autogenerate sphinx docs for %r' % (pkgdir,)) if ut.VERBOSE: print(ut.dict_str(apidoc_fmtdict)) return apidoc_cmdstr, modpath, outputdir
def ibeis_user_profile(): import utool as ut import sys addpath = True module_fpath = ut.truepath('~/local/init/REPOS1.py') if addpath: module_dpath = dirname(module_fpath) sys.path.append(module_dpath) REPOS1 = ut.import_module_from_fpath(module_fpath) self = UserProfile(name='ibeis') #self.project_dpaths = REPOS1.PROJECT_REPOS self.project_dpaths = REPOS1.IBEIS_REPOS self.project_dpaths += [ut.truepath('~/latex/crall-candidacy-2015/')] self.project_include_patterns = [ '*.py', '*.cxx', '*.cpp', '*.hxx', '*.hpp', '*.c', '*.h', '*.vim' #'*.py', # '*.cxx', '*.cpp', '*.hxx', '*.hpp', '*.c', '*.h', '*.vim' ] self.project_exclude_dirs = [ '_graveyard', '_broken', 'CompilerIdCXX', 'CompilerIdC', 'build', 'old', '_old_qt_hs_matcher', 'htmlcov' ] self.project_exclude_patterns = ['_grave*', '_autogen_explicit_controller*'] return self
def get_latex_figure_str2(fpath_list, cmdname, **kwargs): """ hack for candidacy """ import utool as ut from os.path import relpath # Make relative paths if kwargs.pop('relpath', True): start = ut.truepath('~/latex/crall-candidacy-2015') fpath_list = [relpath(fpath, start) for fpath in fpath_list] cmdname = ut.latex_sanitize_command_name(cmdname) kwargs['caption_str'] = kwargs.get('caption_str', cmdname) figure_str = ut.get_latex_figure_str(fpath_list, **kwargs) latex_block = ut.latex_newcommand(cmdname, figure_str) return latex_block
def _devcheck_backups(): import dtool as dt dbdir = ut.truepath('~/work/PZ_Master1/_ibsdb') sorted(ut.glob(join(dbdir, '_ibeis_backups'), '*staging_back*.sqlite3')) fpaths = sorted( ut.glob(join(dbdir, '_ibeis_backups'), '*database_back*.sqlite3')) for fpath in fpaths: db = dt.SQLDatabaseController(fpath=fpath) print('fpath = %r' % (fpath, )) num_edges = len(db.executeone('SELECT rowid from annotmatch')) print('num_edges = %r' % (num_edges, )) num_names = len( db.executeone('SELECT DISTINCT name_rowid from annotations')) print('num_names = %r' % (num_names, ))
def grab_selenium_chromedriver(redownload=False): r""" Automatically download selenium chrome driver if needed CommandLine: python -m utool.util_grabdata --test-grab_selenium_chromedriver:1 Example: >>> # DISABLE_DOCTEST >>> ut.grab_selenium_chromedriver() >>> import selenium.webdriver >>> driver = selenium.webdriver.Chrome() >>> driver.get('http://www.google.com') >>> search_field = driver.find_element_by_name('q') >>> search_field.send_keys('puppies') >>> search_field.send_keys(selenium.webdriver.common.keys.Keys.ENTER) Example1: >>> # DISABLE_DOCTEST >>> import selenium.webdriver >>> driver = selenium.webdriver.Firefox() >>> driver.get('http://www.google.com') >>> search_field = driver.find_element_by_name('q') >>> search_field.send_keys('puppies') >>> search_field.send_keys(selenium.webdriver.common.keys.Keys.ENTER) """ import utool as ut import os import stat # TODO: use a better download dir (but it must be in the PATh or selenium freaks out) chromedriver_dpath = ut.ensuredir(ut.truepath('~/bin')) chromedriver_fpath = join(chromedriver_dpath, 'chromedriver') if not ut.checkpath(chromedriver_fpath) or redownload: assert chromedriver_dpath in os.environ['PATH'].split(os.pathsep) # TODO: make this work for windows as well if ut.LINUX and ut.util_cplat.is64bit_python(): import requests rsp = requests.get('http://chromedriver.storage.googleapis.com/LATEST_RELEASE') assert rsp.status_code == 200 url = 'http://chromedriver.storage.googleapis.com/' + rsp.text.strip() + '/chromedriver_linux64.zip' ut.grab_zipped_url(url, download_dir=chromedriver_dpath, redownload=True) else: raise AssertionError('unsupported chrome driver getter script') if not ut.WIN32: st = os.stat(chromedriver_fpath) os.chmod(chromedriver_fpath, st.st_mode | stat.S_IEXEC) ut.assert_exists(chromedriver_fpath) os.environ['webdriver.chrome.driver'] = chromedriver_fpath return chromedriver_fpath
def make_cacher(name, cfgstr=None): if cfgstr is None: cfgstr = ut.hashstr27(qreq_.get_cfgstr()) if False and ut.is_developer(): return ut.Cacher(fname=name + '_' + qreq_.ibs.get_dbname(), cfgstr=cfgstr, cache_dir=ut.ensuredir( ut.truepath('~/Desktop/smkcache'))) else: wrp = ut.DynStruct() def ensure(func): return func() wrp.ensure = ensure return wrp
def _devcheck_backups(): from wbia import dtool as dt dbdir = ut.truepath('~/work/PZ_Master1/_ibsdb') sorted(ut.glob(join(dbdir, '_wbia_backups'), '*staging_back*.sqlite3')) fpaths = sorted( ut.glob(join(dbdir, '_wbia_backups'), '*database_back*.sqlite3')) for fpath in fpaths: db_uri = 'sqlite:///{}'.format(realpath(fpath)) db = dt.SQLDatabaseController(db_uri, 'PZ_Master1') logger.info('fpath = %r' % (fpath, )) num_edges = len(db.executeone('SELECT rowid from annotmatch')) logger.info('num_edges = %r' % (num_edges, )) num_names = len( db.executeone('SELECT DISTINCT name_rowid from annotations')) logger.info('num_names = %r' % (num_names, ))
def _init_dirs(ibs, dbdir=None, dbname='testdb_1', workdir='~/ibeis_workdir', ensure=True): """ Define ibs directories """ PATH_NAMES = const.PATH_NAMES REL_PATHS = const.REL_PATHS if not ut.QUIET: print('[ibs._init_dirs] ibs.dbdir = %r' % dbdir) if dbdir is not None: workdir, dbname = split(dbdir) ibs.workdir = ut.truepath(workdir) ibs.dbname = dbname ibs.sqldb_fname = PATH_NAMES.sqldb ibs.sqlstaging_fname = PATH_NAMES.sqlstaging # Make sure you are not nesting databases assert PATH_NAMES._ibsdb != ut.dirsplit(ibs.workdir), \ 'cannot work in _ibsdb internals' assert PATH_NAMES._ibsdb != dbname,\ 'cannot create db in _ibsdb internals' ibs.dbdir = join(ibs.workdir, ibs.dbname) # All internal paths live in <dbdir>/_ibsdb # TODO: constantify these # so non controller objects (like in score normalization) have access # to these ibs._ibsdb = join(ibs.dbdir, REL_PATHS._ibsdb) ibs.trashdir = join(ibs.dbdir, REL_PATHS.trashdir) ibs.cachedir = join(ibs.dbdir, REL_PATHS.cache) ibs.backupdir = join(ibs.dbdir, REL_PATHS.backups) ibs.logsdir = join(ibs.dbdir, REL_PATHS.logs) ibs.chipdir = join(ibs.dbdir, REL_PATHS.chips) ibs.imgdir = join(ibs.dbdir, REL_PATHS.images) ibs.uploadsdir = join(ibs.dbdir, REL_PATHS.uploads) # All computed dirs live in <dbdir>/_ibsdb/_ibeis_cache ibs.thumb_dpath = join(ibs.dbdir, REL_PATHS.thumbs) ibs.flanndir = join(ibs.dbdir, REL_PATHS.flann) ibs.qresdir = join(ibs.dbdir, REL_PATHS.qres) ibs.bigcachedir = join(ibs.dbdir, REL_PATHS.bigcache) ibs.distinctdir = join(ibs.dbdir, REL_PATHS.distinctdir) if ensure: ibs.ensure_directories() assert dbdir is not None, 'must specify database directory'
def current_gvim_edit(op='e', fpath=''): r""" CommandLine: python -m utool.util_ubuntu XCtrl.current_gvim_edit sp ~/.bashrc """ import utool as ut fpath = ut.unexpanduser(ut.truepath(fpath)) # print('fpath = %r' % (fpath,)) ut.copy_text_to_clipboard(fpath) # print(ut.get_clipboard()) doscript = [ ('focus', 'gvim'), ('key', 'Escape'), ('type2', ';' + op + ' ' + fpath), # ('type2', ';' + op + ' '), # ('key', 'ctrl+v'), ('key', 'KP_Enter'), ] XCtrl.do(*doscript, verbose=0, sleeptime=.001)
def _init_dirs(ibs, dbdir=None, dbname='testdb_1', workdir='~/ibeis_workdir', ensure=True): """ Define ibs directories """ PATH_NAMES = const.PATH_NAMES REL_PATHS = const.REL_PATHS if not ut.QUIET: print('[ibs._init_dirs] ibs.dbdir = %r' % dbdir) if dbdir is not None: workdir, dbname = split(dbdir) ibs.workdir = ut.truepath(workdir) ibs.dbname = dbname ibs.sqldb_fname = PATH_NAMES.sqldb # Make sure you are not nesting databases assert PATH_NAMES._ibsdb != ut.dirsplit(ibs.workdir), \ 'cannot work in _ibsdb internals' assert PATH_NAMES._ibsdb != dbname,\ 'cannot create db in _ibsdb internals' ibs.dbdir = join(ibs.workdir, ibs.dbname) # All internal paths live in <dbdir>/_ibsdb # TODO: constantify these # so non controller objects (like in score normalization) have access # to these ibs._ibsdb = join(ibs.dbdir, REL_PATHS._ibsdb) ibs.trashdir = join(ibs.dbdir, REL_PATHS.trashdir) ibs.cachedir = join(ibs.dbdir, REL_PATHS.cache) ibs.backupdir = join(ibs.dbdir, REL_PATHS.backups) ibs.chipdir = join(ibs.dbdir, REL_PATHS.chips) ibs.imgdir = join(ibs.dbdir, REL_PATHS.images) ibs.uploadsdir = join(ibs.dbdir, REL_PATHS.uploads) # All computed dirs live in <dbdir>/_ibsdb/_ibeis_cache ibs.thumb_dpath = join(ibs.dbdir, REL_PATHS.thumbs) ibs.flanndir = join(ibs.dbdir, REL_PATHS.flann) ibs.qresdir = join(ibs.dbdir, REL_PATHS.qres) ibs.bigcachedir = join(ibs.dbdir, REL_PATHS.bigcache) ibs.distinctdir = join(ibs.dbdir, REL_PATHS.distinctdir) if ensure: ibs.ensure_directories() assert dbdir is not None, 'must specify database directory'
def dump_vectors(qreq_): """ Example: >>> # DISABLE_DOCTEST >>> from wbia.algo.smk.smk_pipeline import * # NOQA >>> import wbia >>> ibs, aid_list = wbia.testdata_aids(defaultdb='PZ_MTEST', a='default:mingt=2,pername=2') >>> qaids = aid_list[0:2] >>> daids = aid_list[:] >>> config = {'nAssign': 1, 'num_words': 8000, >>> 'sv_on': True} >>> qreq_ = SMKRequest(ibs, qaids, daids, config) >>> qreq_.ensure_data() """ inva = qreq_.dinva X = qreq_.dinva.get_annot(qreq_.daids[0]) n_words = inva.wx_list[-1] + 1 n_dims = X.agg_rvecs.shape[1] n_annots = len(qreq_.daids) X.agg_rvecs.dtype vlads = np.zeros((n_annots, n_words, n_dims), dtype=np.float32) ids_ = list(zip(qreq_.dnids, qreq_.daids)) for count, (nid, aid) in enumerate(ut.ProgIter(ids_, label='vlad')): # X.rrr() X = qreq_.dinva.get_annot(aid) out = vlads[count] out[X.wx_list] = X.agg_rvecs # X.to_dense(out=out) # Flatten out vlads.shape = (n_annots, n_words * n_dims) ut.print_object_size(vlads) fname = 'vlad_%d_d%d_%s' % (n_annots, n_words * n_dims, qreq_.ibs.get_dbname()) fpath = ut.truepath('~/' + fname + '.mat') import scipy.io mdict = { 'vlads': vlads, 'nids': qreq_.dnids, } scipy.io.savemat(fpath, mdict)
def hyrule_vocab_test(): from yael.yutils import load_ext from os.path import join import sklearn.cluster dbdir = ut.truepath('/raid/work/Oxford/') datadir = dbdir + '/smk_data_iccv_2013/data/' # Files storing descriptors/geometry for Oxford5k dataset test_sift_fname = join(datadir, 'oxford_sift.uint8') # test_nf_fname = join(datadir, 'oxford_nsift.uint32') all_vecs = load_ext(test_sift_fname, ndims=128, verbose=True).astype(np.float32) logger.info(ut.print_object_size(all_vecs)) # nfeats_list = load_ext(test_nf_fname, verbose=True) with ut.embed_on_exception_context: rng = np.random.RandomState(13421421) # init_size = int(config['num_words'] * 8) num_words = int(2**16) init_size = num_words * 4 # converged after 26043 iterations minibatch_params = dict( n_clusters=num_words, init='k-means++', # init='random', init_size=init_size, n_init=1, max_iter=100, batch_size=1000, tol=0.0, max_no_improvement=10, reassignment_ratio=0.01, ) clusterer = sklearn.cluster.MiniBatchKMeans(compute_labels=False, random_state=rng, verbose=1, **minibatch_params) clusterer.fit(all_vecs) words = clusterer.cluster_centers_ logger.info(words.shape)
def check_image_sizes(data_uri_order, all_kpts, offset_list): """ Check if any keypoints go out of bounds wrt their associated images """ import vtool as vt from os.path import join imgdir = ut.truepath('/raid/work/Oxford/oxbuild_images') gpath_list = [join(imgdir, imgid + '.jpg') for imgid in data_uri_order] imgsize_list = [vt.open_image_size(gpath) for gpath in gpath_list] kpts_list = [all_kpts[l:r] for l, r in ut.itertwo(offset_list)] kpts_extent = [ vt.get_kpts_image_extent(kpts, outer=False, only_xy=False) for kpts in ut.ProgIter(kpts_list, 'kpts extent') ] for i, (size, extent) in enumerate(zip(imgsize_list, kpts_extent)): w, h = size _, maxx, _, maxy = extent assert np.isnan(maxx) or maxx < w assert np.isnan(maxy) or maxy < h
def show_data_image(data_uri_order, i, offset_list, all_kpts, all_vecs): """ i = 12 """ import vtool as vt from os.path import join imgdir = ut.truepath('/raid/work/Oxford/oxbuild_images') gpath = join(imgdir, data_uri_order[i] + '.jpg') image = vt.imread(gpath) import plottool as pt pt.qt4ensure() # pt.imshow(image) l = offset_list[i] r = offset_list[i + 1] kpts = all_kpts[l:r] vecs = all_vecs[l:r] pt.interact_keypoints.ishow_keypoints(image, kpts, vecs, ori=False, ell_alpha=.4, color='distinct')
def send_public_key_to_server(username, server): """ Can just use this instead ssh-copy-id id@server ssh-copy-id [email protected] ssh-copy-id [email protected] ssh-copy-id [email protected] ut.copy_text_to_clipboard(remote_cmdstr) chmod 700 ~git/.ssh chmod 600 ~git/.ssh/authorized_keys """ public_key = ut.read_from(ut.truepath('~/.ssh/id_rsa.pub')) fmtstr = 'ssh {user}@{server} "{remote_cmdstr}"' remote_cmdstr = 'echo {public_key} >> ~{username}/.ssh/authorized_keys'.format(public_key=public_key.replace( '\\', '\\\\'), username=username) sshcmdstr = fmtstr.format(server=server, remote_cmdstr=remote_cmdstr) ut.copy_text_to_clipboard(sshcmdstr) print('You need to run the command in your clipboard')
def send_public_key_to_server(username, server): """ Can just use this instead ssh-copy-id id@server ssh-copy-id [email protected] ssh-copy-id [email protected] ssh-copy-id [email protected] ut.copy_text_to_clipboard(remote_cmdstr) chmod 700 ~git/.ssh chmod 600 ~git/.ssh/authorized_keys """ public_key = ut.read_from(ut.truepath('~/.ssh/id_rsa.pub')) fmtstr = 'ssh {user}@{server} "{remote_cmdstr}"' remote_cmdstr = 'echo {public_key} >> ~{username}/.ssh/authorized_keys'.format( public_key=public_key.replace('\\', '\\\\'), username=username) sshcmdstr = fmtstr.format(server=server, remote_cmdstr=remote_cmdstr) ut.copy_text_to_clipboard(sshcmdstr) print('You need to run the command in your clipboard')
def get_juction_dpath(): r""" Returns: str: junction_dpath CommandLine: python -m ibeis_cnn --tf get_juction_dpath --show Example: >>> # ENABLE_DOCTEST >>> from ibeis_cnn.dataset import * # NOQA >>> junction_dpath = get_juction_dpath() >>> result = ('junction_dpath = %s' % (str(junction_dpath),)) >>> print(result) >>> ut.quit_if_noshow() >>> ut.vd(junction_dpath) """ junction_dpath = ut.ensure_app_resource_dir('ibeis_cnn', 'training_junction') # Hacks to keep junction clean home_dlink = ut.truepath('~/training_junction') if not exists(home_dlink): ut.symlink(junction_dpath, home_dlink) ut.remove_broken_links(junction_dpath) return junction_dpath
def load_oxford_2013(): """ Found this data in README of SMK publication https://hal.inria.fr/hal-00864684/document http://people.rennes.inria.fr/Herve.Jegou/publications.html with download script CommandLine: # Download oxford13 data cd ~/work/Oxford mkdir -p smk_data_iccv_2013 cd smk_data_iccv_2013 wget -nH --cut-dirs=4 -r -Pdata/ ftp://ftp.irisa.fr/local/texmex/corpus/iccv2013/ This dataset has 5063 images wheras 07 has 5062 This dataset seems to contain an extra junk image: ashmolean_000214 # Remember that matlab is 1 indexed! # DONT FORGET TO CONVERT TO 0 INDEXING! """ from yael.ynumpy import fvecs_read from yael.yutils import load_ext import scipy.io import vtool as vt from os.path import join dbdir = ut.truepath('/raid/work/Oxford/') datadir = dbdir + '/smk_data_iccv_2013/data/' # we are not retraining, so this is unused # # Training data descriptors for Paris6k dataset # train_sift_fname = join(datadir, 'paris_sift.uint8') # NOQA # # File storing visual words of Paris6k descriptors used in our ICCV paper # train_vw_fname = join(datadir, 'clust_preprocessed/oxford_train_vw.int32') # Pre-learned quantizer used in ICCV paper (used if docluster=false) codebook_fname = join(datadir, 'clust_preprocessed/oxford_codebook.fvecs') # Files storing descriptors/geometry for Oxford5k dataset test_sift_fname = join(datadir, 'oxford_sift.uint8') test_geom_fname = join(datadir, 'oxford_geom_sift.float') test_nf_fname = join(datadir, 'oxford_nsift.uint32') # File storing visual words of Oxford5k descriptors used in our ICCV paper test_vw_fname = join(datadir, 'clust_preprocessed/oxford_vw.int32') # Ground-truth for Oxford dataset gnd_fname = join(datadir, 'gnd_oxford.mat') oxford_vecs = load_ext(test_sift_fname, ndims=128, verbose=True) oxford_nfeats = load_ext(test_nf_fname, verbose=True) oxford_words = fvecs_read(codebook_fname) oxford_wids = load_ext(test_vw_fname, verbose=True) - 1 test_geom_invV_fname = test_geom_fname + '.invV.pkl' try: all_kpts = ut.load_data(test_geom_invV_fname) logger.info('loaded invV keypoints') except IOError: oxford_kptsZ = load_ext(test_geom_fname, ndims=5, verbose=True) logger.info('converting to invV keypoints') all_kpts = vt.convert_kptsZ_to_kpts(oxford_kptsZ) ut.save_data(test_geom_invV_fname, all_kpts) gnd_ox = scipy.io.loadmat(gnd_fname) imlist = [x[0][0] for x in gnd_ox['imlist']] qx_to_dx = gnd_ox['qidx'] - 1 data_uri_order = imlist query_uri_order = ut.take(data_uri_order, qx_to_dx) offset_list = np.hstack(([0], oxford_nfeats.cumsum())).astype(np.int64) # query_gnd = gnd_ox['gnd'][0][0] # bboxes = query_gnd[0] # qx_to_ok_gtidxs1 = [x[0] for x in query_gnd[1][0]] # qx_to_junk_gtidxs2 = [x[0] for x in query_gnd[2][0]] # # ut.depth_profile(qx_to_gtidxs1) # # ut.depth_profile(qx_to_gtidxs2) assert sum(oxford_nfeats) == len(oxford_vecs) assert offset_list[-1] == len(oxford_vecs) assert len(oxford_wids) == len(oxford_vecs) assert oxford_wids.max() == len(oxford_words) - 1 data = { 'offset_list': offset_list, 'all_kpts': all_kpts, 'all_vecs': oxford_vecs, 'words': oxford_words, 'idx_to_wx': oxford_wids, 'data_uri_order': data_uri_order, 'query_uri_order': query_uri_order, } return data
def load_oxford_2007(): """ Loads data from http://www.robots.ox.ac.uk:5000/~vgg/publications/2007/Philbin07/philbin07.pdf >>> from wbia.algo.smk.script_smk import * # NOQA """ from os.path import join, basename, splitext import pandas as pd import vtool as vt dbdir = ut.truepath('/raid/work/Oxford/') data_fpath0 = join(dbdir, 'data_2007.pkl') if ut.checkpath(data_fpath0): data = ut.load_data(data_fpath0) return data else: word_dpath = join(dbdir, 'word_oxc1_hesaff_sift_16M_1M') _word_fpath_list = ut.ls(word_dpath) imgid_to_word_fpath = { splitext(basename(word_fpath))[0]: word_fpath for word_fpath in _word_fpath_list } readme_fpath = join(dbdir, 'README2.txt') imgid_order = ut.readfrom(readme_fpath).split('\n')[20:-1] imgid_order = imgid_order data_uri_order = [x.replace('oxc1_', '') for x in imgid_order] imgid_to_df = {} for imgid in ut.ProgIter(imgid_order, label='reading kpts'): word_fpath = imgid_to_word_fpath[imgid] row_gen = (map(float, line.strip('\n').split(' ')) for line in ut.read_lines_from(word_fpath)[2:]) rows = [(int(word_id), x, y, e11, e12, e22) for (word_id, x, y, e11, e12, e22) in row_gen] df = pd.DataFrame( rows, columns=['word_id', 'x', 'y', 'e11', 'e12', 'e22']) imgid_to_df[imgid] = df df_list = ut.take(imgid_to_df, imgid_order) nfeat_list = [len(df_) for df_ in df_list] offset_list = [0] + ut.cumsum(nfeat_list) shape = (offset_list[-1], 128) # shape = (16334970, 128) sift_fpath = join(dbdir, 'OxfordSIFTDescriptors', 'feat_oxc1_hesaff_sift.bin') try: file_ = open(sift_fpath, 'rb') with ut.Timer('Reading SIFT binary file'): nbytes = np.prod(shape) all_vecs = np.fromstring(file_.read(nbytes), dtype=np.uint8) all_vecs = all_vecs.reshape(shape) finally: file_.close() kpts_list = [ df_.loc[:, ('x', 'y', 'e11', 'e12', 'e22')].values for df_ in df_list ] wordid_list = [df_.loc[:, 'word_id'].values for df_ in df_list] kpts_Z = np.vstack(kpts_list) idx_to_wx = np.hstack(wordid_list) # assert len(np.unique(idx_to_wx)) == 1E6 # Reqd standard query order query_files = sorted( ut.glob(dbdir + '/oxford_groundtruth', '*_query.txt')) query_uri_order = [] for qpath in query_files: text = ut.readfrom(qpath, verbose=0) query_uri = text.split(' ')[0].replace('oxc1_', '') query_uri_order.append(query_uri) logger.info('converting to invV') all_kpts = vt.convert_kptsZ_to_kpts(kpts_Z) data = { 'offset_list': offset_list, 'all_kpts': all_kpts, 'all_vecs': all_vecs, 'idx_to_wx': idx_to_wx, 'data_uri_order': data_uri_order, 'query_uri_order': query_uri_order, } ut.save_data(data_fpath0, data) return data
# -*- coding: utf-8 -*- from __future__ import absolute_import, division, print_function, unicode_literals import utool as ut import sys (print, rrr, profile) = ut.inject2(__name__, '[ibs]') sys.path.append(ut.truepath('~/local/build_scripts/flannscripts')) from flann_defs import define_flann_bindings # NOQA @profile def update_bindings(): r""" Returns: dict: matchtups CommandLine: python ~/local/build_scripts/flannscripts/autogen_bindings.py --exec-update_bindings utprof.py ~/local/build_scripts/flannscripts/autogen_bindings.py --exec-update_bindings Example: >>> # DISABLE_DOCTEST >>> from autogen_bindings import * # NOQA >>> import sys >>> import utool as ut >>> sys.path.append(ut.truepath('~/local/build_scripts/flannscripts')) >>> matchtups = update_bindings() >>> result = ('matchtups = %s' % (ut.repr2(matchtups),)) >>> print(result) >>> ut.quit_if_noshow()
def TEST_GUI_ALL(ibs, back, gpath_list): """ Creates a new database Adds test images Creates dummy ANNOTATIONS Selects things """ # DELETE OLD print('[TEST] DELETE_OLD_DATABASE') work_dir = sysres.get_workdir() new_dbname = 'testdb_guiall' new_dbdir = utool.truepath(utool.join(work_dir, new_dbname)) ibs_dbdir = utool.truepath(ibs.dbdir) msg = 'must start in different dir new_dbdir=%r != ibs_dbdir=%r,' % (new_dbdir, ibs_dbdir) assert new_dbdir != ibs_dbdir, msg print('passed: ' + msg) utool.delete(new_dbdir, ignore_errors=False) # # # CREATE NEW print('[TEST] CREATE_NEW_DATABASE') back.new_database(new_dbdir) ibs = back.ibs # The backend has a new ibeis do not use the old one # Dont refresh for speed _kwargs = {'refresh': False} # # # IMPORT IMAGES print('[TEST] IMPORT_TEST_GPATHS') print('gpath_list = ' + utool.indentjoin(gpath_list)) gid_list = back.import_images(gpath_list=gpath_list, **_kwargs) print('\n'.join(' * gid_list[%d] = %r' % (count, gid) for count, gid in enumerate(gid_list))) assert len(gid_list) == len(gpath_list) # # # ADD ANNOTATIONS print('[TEST] ADD_ANNOTATIONS') def add_annot(gid, bbox, theta=0.0): aid = back.add_annot(gid=gid, bbox=bbox, theta=theta, **_kwargs) return aid preadd_aids = ibs.get_valid_aids() # this should be [] assert len(preadd_aids) == 0, 'there are already aids in the database!' print('preadd_aids = %r' % preadd_aids) aid1 = add_annot(gid_list[0], (50, 50, 100, 100), (np.tau / 8)) aid2 = add_annot(gid_list[1], (50, 50, 100, 100)) aid3 = add_annot(gid_list[2], (50, 50, 64, 64)) aid4 = add_annot(gid_list[2], (50, 50, 200, 200)) aid5 = add_annot(gid_list[1], (0, 0, 400, 400)) print('aid1 = %r' % aid1) print('aid2 = %r' % aid2) print('aid3 = %r' % aid3) print('aid4 = %r' % aid4) print('aid5 = %r' % aid5) # # # SELECT ANNOTATIONS print('[TEST] SELECT ANNOTATION / Add Chips') # get_valid_aids seems to return aids in an arbitrary order, it's an SQL thing aid_list = sorted(ibs.get_valid_aids()) print('\n'.join(' * aid_list[%d] = %r' % (count, aid) for count, aid in enumerate(aid_list))) back.select_aid(aid_list[0], show_image=True, **_kwargs) try: bbox_list = ibs.get_annot_bboxes(aid_list) assert bbox_list[0] == (50, 50, 100, 100) except AssertionError as ex: utool.printex(ex, key_list=['bbox_list', 'aid_list']) raise back.reselect_annotation(bbox=[51, 52, 103, 104]) assert ibs.get_annot_bboxes(aid_list[0]) == (51, 52, 103, 104) back.compute_encounters() unixtime_list = [100, 23, 24] ibs.set_image_unixtime(gid_list, unixtime_list) back.compute_encounters() # Change some ANNOTATIONs #add_annot(gid_list[2], None) # user selection #add_annot(None, [42, 42, 8, 8]) # back selection # I'm not sure how I want to integrate that IPython stuff return locals()
def main(): target = 'dev_combo' master = 'master' mixins = [ # 'mbkm_fixup', # 'progiter', # 'multiclass_mcc', 'missing_values_rf', ] ut.cprint('--- OPEN REPO ---', 'blue') # dpath = os.getcwd() dpath = ut.truepath('~/code/scikit-learn') repo = ut.Repo(dpath=dpath, url='[email protected]:Erotemic/scikit-learn.git') if not repo.is_cloned(): repo.clone() # repo.issue('pip install -e .') # Make sure remotes are properly setup repo._ensure_remote_exists( 'source', 'https://github.com/scikit-learn/scikit-learn.git') repo._ensure_remote_exists('raghavrv', 'https://github.com/raghavrv/scikit-learn.git') # Master should point to the scikit-learn official repo if repo.get_branch_remote('master') != 'source': repo.set_branch_remote('master', 'source') update_all(repo, master, mixins) REBASE_VERSION = True if REBASE_VERSION: ut.cprint('--- REBASE BRANCHES ON MASTER ---', 'blue') rebase_mixins = [] for branch in mixins: new_branch = make_dev_rebased_mixin(repo, master, branch) rebase_mixins.append(new_branch) ut.cprint('--- CHECKOUT DEV MASTER --- ', 'blue') reset_dev_branch(repo, master, target) ut.cprint('--- MERGE INTO DEV MASTER --- ', 'blue') for branch in rebase_mixins: repo.issue('git merge --no-edit -s recursive ' + branch) # repo.issue('git merge --no-edit -s recursive -Xours ' + branch) else: # Attempt to automerge taking whatever is in the mixin branches as de-facto ut.cprint('--- CHECKOUT DEV MASTER --- ', 'blue') reset_dev_branch(repo, master, target) ut.cprint('--- MERGE INTO DEV MASTER --- ', 'blue') for branch in mixins: repo.issue('git merge --no-edit -s recursive -Xtheirs ' + branch) # cleanup because we didn't rebase fpath = join(repo.dpath, 'sklearn/utils/validation.py') ut.sedfile(fpath, 'accept_sparse=None', 'accept_sparse=False', force=True) repo.issue('git commit -am "quick fix of known merge issue"') # # Recompile the if True: repo.issue('python setup.py clean') repo.issue('python setup.py build -j9') repo.issue('pip install -e .')
def autogen_sphinx_apidoc(): r""" autogen_sphinx_docs.py Ignore: C:\Python27\Scripts\autogen_sphinx_docs.py autogen_sphinx_docs.py pip uninstall sphinx pip install sphinx pip install sphinxcontrib-napoleon cd C:\Python27\Scripts ls C:\Python27\Scripts """ import utool as ut # TODO: assert sphinx-apidoc exe is found # TODO: make find_exe word? print('') print('if this fails try: sudo pip install sphinx') print('') winprefix = 'C:/Python27/Scripts/' apidoc = 'sphinx-apidoc' sphinx_apidoc_exe = apidoc if not ut.WIN32 else winprefix + apidoc + '.exe' apidoc_argfmt_list = [ sphinx_apidoc_exe, '--force', '--full', '--maxdepth="{maxdepth}"', '--doc-author="{author}"', '--doc-version="{doc_version}"', '--doc-release="{doc_release}"', '--output-dir="_doc"', #'--separate', # Put documentation for each module on its own page '--private', # Include "_private" modules '{pkgdir}', ] outputdir = '_doc' author = ut.parse_author() packages = ut.find_packages(maxdepth=1) assert len(packages) != 0, 'directory must contain at least one package' if len(packages) > 1: assert len(packages) == 1,\ ('FIXME I dont know what to do with more than one root package: %r' % (packages,)) pkgdir = packages[0] version = ut.parse_package_for_version(pkgdir) modpath = dirname(ut.truepath(pkgdir)) apidoc_fmtdict = { 'author': author, 'maxdepth': '8', 'pkgdir': pkgdir, 'doc_version': version, 'doc_release': version, 'outputdir': outputdir, } ut.assert_exists('setup.py') ut.ensuredir('_doc') apidoc_fmtstr = ' '.join(apidoc_argfmt_list) apidoc_cmdstr = apidoc_fmtstr.format(**apidoc_fmtdict) # sphinx-apidoc outputs conf.py to <outputdir>, add custom commands print('[util_setup] autogenerate sphinx docs for %r' % (pkgdir,)) if ut.VERBOSE: print(ut.dict_str(apidoc_fmtdict)) ut.cmd(apidoc_cmdstr, shell=True) # # Change dir to <outputdir> print('chdir' + outputdir) os.chdir(outputdir) # # Make custom edits to conf.py # FIXME: #ext_search_text = ut.unindent( # r''' # extensions = [ # [^\]]* # ] # ''') ext_search_text = r'extensions = \[[^/]*\]' # TODO: http://sphinx-doc.org/ext/math.html#module-sphinx.ext.pngmath #'sphinx.ext.mathjax', exclude_modules = [] # ['ibeis.all_imports'] ext_repl_text = ut.codeblock( ''' MOCK_MODULES = {exclude_modules} if len(MOCK_MODULES) > 0: import mock for mod_name in MOCK_MODULES: sys.modules[mod_name] = mock.Mock() extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.viewcode', # For LaTeX 'sphinx.ext.pngmath', # For Google Sytle Docstrs # https://pypi.python.org/pypi/sphinxcontrib-napoleon 'sphinxcontrib.napoleon', #'sphinx.ext.napoleon', ] ''' ).format(exclude_modules=str(exclude_modules)) theme_search = 'html_theme = \'default\'' theme_repl = ut.codeblock( ''' import sphinx_rtd_theme html_theme = "sphinx_rtd_theme" html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] ''') head_text = ut.codeblock( ''' from sphinx.ext.autodoc import between import sphinx_rtd_theme import sys import os # Dont parse IBEIS args os.environ['IBIES_PARSE_ARGS'] = 'OFF' os.environ['UTOOL_AUTOGEN_SPHINX_RUNNING'] = 'ON' sys.path.append('{modpath}') sys.path.append(sys.path.insert(0, os.path.abspath("../"))) autosummary_generate = True modindex_common_prefix = ['_'] ''' ).format(modpath=ut.truepath(modpath)) tail_text = ut.codeblock( ''' def setup(app): # Register a sphinx.ext.autodoc.between listener to ignore everything # between lines that contain the word IGNORE app.connect('autodoc-process-docstring', between('^.*IGNORE.*$', exclude=True)) return app ''' ) conf_fname = 'conf.py' conf_text = ut.read_from(conf_fname) conf_text = conf_text.replace('import sys', 'import sys # NOQA') conf_text = conf_text.replace('import os', 'import os # NOQA') conf_text = ut.regex_replace(theme_search, theme_repl, conf_text) conf_text = ut.regex_replace(ext_search_text, ext_repl_text, conf_text) conf_text = head_text + '\n' + conf_text + tail_text ut.write_to(conf_fname, conf_text) # Make the documentation #if ut.LINUX: # ut.cmd('make html', shell=True) #if ut.WIN32: #raw_input('waiting') ut.cmd('make', 'html', shell=True)
def make_application_icon(exe_fpath, dry=True, props={}): r""" CommandLine: python -m utool.util_ubuntu --exec-make_application_icon --exe=cockatrice --icon=/home/joncrall/code/Cockatrice/cockatrice/resources/cockatrice.png python -m utool.util_ubuntu --exec-make_application_icon --exe=cockatrice --icon=/home/joncrall/code/Cockatrice/cockatrice/resources/cockatrice.png python -m utool.util_ubuntu --exec-make_application_icon --exe=/opt/zotero/zotero --icon=/opt/zotero/chrome/icons/default/main-window.ico python -m utool.util_ubuntu --exec-make_application_icon --exe "env WINEPREFIX="/home/joncrall/.wine" wine C:\\\\windows\\\\command\\\\start.exe /Unix /home/joncrall/.wine32-dotnet45/dosdevices/c:/users/Public/Desktop/Hearthstone.lnk" --path "/home/joncrall/.wine/dosdevices/c:/Program Files (x86)/Hearthstone" # Exec=env WINEPREFIX="/home/joncrall/.wine" wine /home/joncrall/.wine/drive_c/Program\ Files\ \(x86\)/Battle.net/Battle.net.exe --icon=/opt/zotero/chrome/icons/default/main-window.ico python -m utool.util_ubuntu --exec-make_application_icon --exe=/home/joncrall/code/build-ArenaTracker-Desktop_Qt_5_6_1_GCC_64bit-Debug update-desktop-database ~/.local/share/applications Example: >>> # DISABLE_DOCTEST >>> from utool.util_ubuntu import * # NOQA >>> import utool as ut >>> exe_fpath = ut.get_argval('--exe', default='cockatrice') >>> icon = ut.get_argval('--icon', default=None) >>> dry = not ut.get_argflag(('--write', '-w')) >>> props = {'terminal': False, 'icon': icon} >>> result = make_application_icon(exe_fpath, dry, props) >>> print(result) """ import utool as ut exe_fname_noext = splitext(basename(exe_fpath))[0] app_name = exe_fname_noext.replace('_', '-') nice_name = ' '.join( [word[0].upper() + word[1:].lower() for word in app_name.replace('-', ' ').split(' ')] ) lines = [ '[Desktop Entry]', 'Name={nice_name}', 'Exec={exe_fpath}', ] if 'mime_name' in props: lines += ['MimeType=application/x-{mime_name}'] if 'icon' in props: lines += ['Icon={icon}'] if props.get('path'): lines += ['Path={path}'] # if props.get('comment'): # lines += ['Path={comment}'] lines += [ 'Terminal={terminal}', 'Type=Application', 'Categories=Utility;Application;', 'Comment=Custom App', ] fmtdict = locals() fmtdict.update(props) prefix = ut.truepath('~/.local/share') app_codeblock = '\n'.join(lines).format(**fmtdict) app_dpath = join(prefix, 'applications') app_fpath = join(app_dpath, '{app_name}.desktop'.format(**locals())) print(app_codeblock) print('---') print(app_fpath) print('L___') if not dry: ut.writeto(app_fpath, app_codeblock, verbose=ut.NOT_QUIET, n=None) ut.cmd('update-desktop-database ~/.local/share/applications')
def nx_agraph_layout(graph, orig_graph=None, inplace=False, verbose=None, **kwargs): r""" orig_graph = graph graph = layout_graph References: http://www.graphviz.org/content/attrs http://www.graphviz.org/doc/info/attrs.html """ import networkx as nx import pygraphviz kwargs = kwargs.copy() prog = kwargs.pop('prog', 'dot') if prog != 'dot': kwargs['overlap'] = kwargs.get('overlap', 'false') kwargs['splines'] = kwargs.get('splines', 'spline') kwargs['notranslate'] = 'true' # for neato postprocessing argparts = ['-G%s=%s' % (key, str(val)) for key, val in kwargs.items()] args = ' '.join(argparts) splines = kwargs['splines'] if verbose is None: verbose = ut.VERBOSE if verbose: print('args = %r' % (args,)) # Convert to agraph format graph_ = graph.copy() ut.nx_ensure_agraph_color(graph_) # Reduce size to be in inches not pixels # FIXME: make robust to param settings # Hack to make the w/h of the node take thae max instead of # dot which takes the minimum shaped_nodes = [n for n, d in graph_.nodes(data=True) if 'width' in d] node_attrs = ut.dict_take(graph_.node, shaped_nodes) width_px = np.array(ut.take_column(node_attrs, 'width')) height_px = np.array(ut.take_column(node_attrs, 'height')) scale = np.array(ut.dict_take_column(node_attrs, 'scale', default=1.0)) width_in = width_px / 72.0 * scale height_in = height_px / 72.0 * scale width_in_dict = dict(zip(shaped_nodes, width_in)) height_in_dict = dict(zip(shaped_nodes, height_in)) nx.set_node_attributes(graph_, 'width', width_in_dict) nx.set_node_attributes(graph_, 'height', height_in_dict) ut.nx_delete_node_attr(graph_, 'scale') # Check for any nodes with groupids node_to_groupid = nx.get_node_attributes(graph_, 'groupid') if node_to_groupid: groupid_to_nodes = ut.group_items(*zip(*node_to_groupid.items())) else: groupid_to_nodes = {} # Initialize agraph format #import utool #utool.embed() ut.nx_delete_None_edge_attr(graph_) agraph = nx.nx_agraph.to_agraph(graph_) # Add subgraphs labels # TODO: subgraph attrs group_attrs = graph.graph.get('groupattrs', {}) for groupid, nodes in groupid_to_nodes.items(): # subgraph_attrs = {} subgraph_attrs = group_attrs.get(groupid, {}).copy() cluster_flag = True # FIXME: make this more natural to specify if 'cluster' in subgraph_attrs: cluster_flag = subgraph_attrs['cluster'] del subgraph_attrs['cluster'] # subgraph_attrs = dict(rankdir='LR') # subgraph_attrs = dict(rankdir='LR') # subgraph_attrs['rank'] = 'min' # subgraph_attrs['rank'] = 'source' name = groupid if cluster_flag: # graphviz treast subgraphs labeld with cluster differently name = 'cluster_' + groupid else: name = groupid agraph.add_subgraph(nodes, name, **subgraph_attrs) for node in graph_.nodes(): # force pinning of node points anode = pygraphviz.Node(agraph, node) if anode.attr['pin'] == 'true': if anode.attr['pos'] is not None and len(anode.attr['pos']) > 0 and not anode.attr['pos'].endswith('!'): import re #utool.embed() ptstr_ = anode.attr['pos'] #print('ptstr_ = %r' % (ptstr_,)) ptstr = ptstr_.strip('[]').strip(' ').strip('()') #print('ptstr = %r' % (ptstr,)) ptstr_list = [x.rstrip(',') for x in re.split(r'\s+', ptstr)] #print('ptstr_list = %r' % (ptstr_list,)) pt_list = list(map(float, ptstr_list)) #print('pt_list = %r' % (pt_list,)) pt_arr = np.array(pt_list) / 72.0 #print('pt_arr = %r' % (pt_arr,)) new_ptstr_list = list(map(str, pt_arr)) new_ptstr = ','.join(new_ptstr_list) + '!' #print('new_ptstr = %r' % (new_ptstr,)) anode.attr['pos'] = new_ptstr # Run layout #print('prog = %r' % (prog,)) if ut.VERBOSE or verbose > 0: print('BEFORE LAYOUT\n' + str(agraph)) agraph.layout(prog=prog, args=args) agraph.draw(ut.truepath('~/test_graphviz_draw.png')) if ut.VERBOSE or verbose > 1: print('AFTER LAYOUT\n' + str(agraph)) # TODO: just replace with a single dict of attributes node_layout_attrs = ut.ddict(dict) edge_layout_attrs = ut.ddict(dict) #for node in agraph.nodes(): for node in graph_.nodes(): anode = pygraphviz.Node(agraph, node) node_attrs = parse_anode_layout_attrs(anode) for key, val in node_attrs.items(): node_layout_attrs[key][node] = val edges = list(ut.nx_edges(graph_, keys=True)) for edge in edges: aedge = pygraphviz.Edge(agraph, *edge) edge_attrs = parse_aedge_layout_attrs(aedge) for key, val in edge_attrs.items(): edge_layout_attrs[key][edge] = val if orig_graph is not None and kwargs.get('draw_implicit', True): # ADD IN IMPLICIT EDGES layout_edges = set(ut.nx_edges(graph_, keys=True)) orig_edges = set(ut.nx_edges(orig_graph, keys=True)) implicit_edges = list(orig_edges - layout_edges) #all_edges = list(set.union(orig_edges, layout_edges)) needs_implicit = len(implicit_edges) > 0 if needs_implicit: # Pin down positions for node in agraph.nodes(): anode = pygraphviz.Node(agraph, node) anode.attr['pin'] = 'true' anode.attr['pos'] += '!' # Add new edges to route for iedge in implicit_edges: data = orig_graph.get_edge_data(*iedge) agraph.add_edge(*iedge, **data) if ut.VERBOSE or verbose: print('BEFORE IMPLICIT LAYOUT\n' + str(agraph)) # Route the implicit edges (must use neato) control_node = pygraphviz.Node(agraph, node) #print('control_node = %r' % (control_node,)) node1_attr1 = parse_anode_layout_attrs(control_node) #print('node1_attr1 = %r' % (node1_attr1,)) implicit_kw = kwargs.copy() implicit_kw['overlap'] = 'true' #del implicit_kw['overlap'] # can cause node positions to change argparts = ['-G%s=%s' % (key, str(val)) for key, val in implicit_kw.items()] args = ' '.join(argparts) #print('args = %r' % (args,)) #import utool #utool.embed() agraph.layout(prog='neato', args='-n ' + args) agraph.draw(ut.truepath('~/implicit_test_graphviz_draw.png')) if ut.VERBOSE or verbose: print('AFTER IMPLICIT LAYOUT\n' + str(agraph)) control_node = pygraphviz.Node(agraph, node) print('control_node = %r' % (control_node,)) node1_attr2 = parse_anode_layout_attrs(control_node) print('node1_attr2 = %r' % (node1_attr2,)) # graph positions shifted # This is not the right place to divide by 72 translation = (node1_attr1['pos'] - node1_attr2['pos'] ) #print('translation = %r' % (translation,)) #translation = np.array([0, 0]) print('translation = %r' % (translation,)) #for iedge in all_edges: for iedge in implicit_edges: aedge = pygraphviz.Edge(agraph, *iedge) iedge_attrs = parse_aedge_layout_attrs(aedge, translation) for key, val in iedge_attrs.items(): edge_layout_attrs[key][iedge] = val graph_layout_attrs = dict( splines=splines ) layout_info = { 'graph': graph_layout_attrs, 'edge': dict(edge_layout_attrs), 'node': dict(node_layout_attrs), } if inplace: if orig_graph is not None: graph = orig_graph apply_graph_layout_attrs(graph, layout_info) return graph, layout_info
def make_individual_latex_figures(ibs, fpaths_list, flat_case_labels, cfgx2_shortlbl, case_figdir, analysis_fpath_list): # HACK MAKE LATEX CONVINENCE STUFF #print('LATEX HACK') if len(fpaths_list) == 0: print('nothing to render') return RENDER = ut.get_argflag('--render') DUMP_FIGDEF = ut.get_argflag(('--figdump', '--dump-figdef', '--figdef')) if not (DUMP_FIGDEF or RENDER): # HACK return latex_code_blocks = [] latex_block_keys = [] caption_prefix = ut.get_argval('--cappref', type_=str, default='') caption_suffix = ut.get_argval('--capsuf', type_=str, default='') cmdaug = ut.get_argval('--cmdaug', type_=str, default='custom') selected = None for case_idx, (fpaths, labels) in enumerate(zip(fpaths_list, flat_case_labels)): if labels is None: labels = [cmdaug] if len(fpaths) < 4: nCols = len(fpaths) else: nCols = 2 _cmdname = ibs.get_dbname() + ' Case ' + ' '.join(labels) + '_' + str( case_idx) #print('_cmdname = %r' % (_cmdname,)) cmdname = ut.latex_sanitize_command_name(_cmdname) label_str = cmdname if len(caption_prefix) == 0: caption_str = ut.escape_latex( 'Casetags: ' + ut.list_str(labels, nl=False, strvals=True) + ', db=' + ibs.get_dbname() + '. ') else: caption_str = '' use_sublbls = len(cfgx2_shortlbl) > 1 if use_sublbls: caption_str += ut.escape_latex( 'Each figure shows a different configuration: ') sublbls = [ '(' + chr(97 + count) + ') ' for count in range(len(cfgx2_shortlbl)) ] else: #caption_str += ut.escape_latex('This figure depicts correct and #incorrect matches from configuration: ') sublbls = [''] * len(cfgx2_shortlbl) def wrap_tt(text): return r'{\tt ' + text + '}' _shortlbls = cfgx2_shortlbl _shortlbls = list(map(ut.escape_latex, _shortlbls)) # Adjust spacing for breaks #tex_small_space = r'' tex_small_space = r'\hspace{0pt}' # Remove query specific config flags in individual results _shortlbls = [ re.sub('\\bq[^,]*,?', '', shortlbl) for shortlbl in _shortlbls ] # Let config strings be broken over newlines _shortlbls = [ re.sub('\\+', tex_small_space + '+' + tex_small_space, shortlbl) for shortlbl in _shortlbls ] _shortlbls = [ re.sub(', *', ',' + tex_small_space, shortlbl) for shortlbl in _shortlbls ] _shortlbls = list(map(wrap_tt, _shortlbls)) cfgx2_texshortlbl = [ '\n ' + lbl + shortlbl for lbl, shortlbl in zip(sublbls, _shortlbls) ] caption_str += ut.conj_phrase(cfgx2_texshortlbl, 'and') + '.\n ' caption_str = '\n ' + caption_prefix + caption_str + caption_suffix caption_str = caption_str.rstrip() figure_str = ut.get_latex_figure_str(fpaths, nCols=nCols, label_str=label_str, caption_str=caption_str, use_sublbls=None, use_frame=True) latex_block = ut.latex_newcommand(cmdname, figure_str) latex_block = '\n%----------\n' + latex_block latex_code_blocks.append(latex_block) latex_block_keys.append(cmdname) # HACK remove_fpath = ut.truepath('~/latex/crall-candidacy-2015') + '/' latex_fpath = join(case_figdir, 'latex_cases.tex') if selected is not None: selected_keys = selected else: selected_keys = latex_block_keys selected_blocks = ut.dict_take( dict(zip(latex_block_keys, latex_code_blocks)), selected_keys) figdef_block = '\n'.join(selected_blocks) figcmd_block = '\n'.join(['\\' + key for key in latex_block_keys]) selected_block = figdef_block + '\n\n' + figcmd_block # HACK: need full paths to render selected_block_renderable = selected_block selected_block = selected_block.replace(remove_fpath, '') if RENDER: ut.render_latex_text(selected_block_renderable) if DUMP_FIGDEF: ut.writeto(latex_fpath, selected_block) #if NOT DUMP AND NOT RENDER: # print('STANDARD LATEX RESULTS') # cmdname = ibs.get_dbname() + 'Results' # latex_block = ut.get_latex_figure_str2(analysis_fpath_list, cmdname, nCols=1) # ut.print_code(latex_block, 'latex') if DUMP_FIGDEF or RENDER: ut.print_code(selected_block, 'latex')
def build_conf_replstr(): # # Make custom edits to conf.py # FIXME: #ext_search_text = ut.unindent( # r''' # extensions = [ # [^\]]* # ] # ''') ext_search_text = r'extensions = \[[^/]*\]' # TODO: http://sphinx-doc.org/ext/math.html#module-sphinx.ext.pngmath #'sphinx.ext.mathjax', exclude_modules = [] # ['ibeis.all_imports'] ext_repl_text = ut.codeblock( ''' MOCK_MODULES = {exclude_modules} if len(MOCK_MODULES) > 0: import mock for mod_name in MOCK_MODULES: sys.modules[mod_name] = mock.Mock() extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.viewcode', # For LaTeX 'sphinx.ext.pngmath', # For Google Sytle Docstrs # https://pypi.python.org/pypi/sphinxcontrib-napoleon 'sphinxcontrib.napoleon', #'sphinx.ext.napoleon', ] ''' ).format(exclude_modules=str(exclude_modules)) #theme_search = 'html_theme = \'default\'' theme_search = 'html_theme = \'[a-zA-Z_1-3]*\'' theme_repl = ut.codeblock( ''' import sphinx_rtd_theme html_theme = "sphinx_rtd_theme" html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] ''') head_text = ut.codeblock( ''' from sphinx.ext.autodoc import between import sphinx_rtd_theme import sys import os # Dont parse IBEIS args os.environ['IBIES_PARSE_ARGS'] = 'OFF' os.environ['UTOOL_AUTOGEN_SPHINX_RUNNING'] = 'ON' sys.path.append('{modpath}') sys.path.append(sys.path.insert(0, os.path.abspath("../"))) autosummary_generate = True modindex_common_prefix = ['_'] ''' ).format(modpath=ut.truepath(modpath)) tail_text = ut.codeblock( ''' def setup(app): # Register a sphinx.ext.autodoc.between listener to ignore everything # between lines that contain the word IGNORE app.connect('autodoc-process-docstring', between('^.*IGNORE.*$', exclude=True)) return app ''' ) return (ext_search_text, ext_repl_text, theme_search, theme_repl, head_text, tail_text)
def add_new_mimetype_association(ext, mime_name, exe_fpath=None, dry=True): """ TODO: move to external manager and generalize Args: ext (str): extension to associate mime_name (str): the name of the mime_name to create (defaults to ext) exe_fpath (str): executable location if this is for one specific file References: https://wiki.archlinux.org/index.php/Default_applications#Custom_file_associations Args: ext (str): extension to associate exe_fpath (str): executable location mime_name (str): the name of the mime_name to create (defaults to ext) CommandLine: python -m utool.util_ubuntu --exec-add_new_mimetype_association # Add ability to open ipython notebooks via double click python -m utool.util_ubuntu --exec-add_new_mimetype_association --mime-name=ipynb+json --ext=.ipynb --exe-fpath=/usr/local/bin/ipynb python -m utool.util_ubuntu --exec-add_new_mimetype_association --mime-name=ipynb+json --ext=.ipynb --exe-fpath=jupyter-notebook --force python -m utool.util_ubuntu --exec-add_new_mimetype_association --mime-name=sqlite --ext=.sqlite --exe-fpath=sqlitebrowser Example: >>> # SCRIPT >>> from utool.util_ubuntu import * # NOQA >>> import utool as ut >>> ext = ut.get_argval('--ext', type_=str, default=None) >>> mime_name = ut.get_argval('--mime_name', type_=str, default=None) >>> exe_fpath = ut.get_argval('--exe_fpath', type_=str, default=None) >>> dry = not ut.get_argflag('--force') >>> result = add_new_mimetype_association(ext, mime_name, exe_fpath, dry) >>> print(result) """ import utool as ut terminal = True mime_codeblock = ut.codeblock( ''' <?xml version="1.0" encoding="UTF-8"?> <mime-info xmlns="http://www.freedesktop.org/standards/shared-mime-info"> <mime-type type="application/x-{mime_name}"> <glob-deleteall/> <glob pattern="*{ext}"/> </mime-type> </mime-info> ''' ).format(**locals()) prefix = ut.truepath('~/.local/share') mime_dpath = join(prefix, 'mime/packages') mime_fpath = join(mime_dpath, 'application-x-{mime_name}.xml'.format(**locals())) print(mime_codeblock) print('---') print(mime_fpath) print('L___') if exe_fpath is not None: exe_fname_noext = splitext(basename(exe_fpath))[0] app_name = exe_fname_noext.replace('_', '-') nice_name = ' '.join( [word[0].upper() + word[1:].lower() for word in app_name.replace('-', ' ').split(' ')] ) app_codeblock = ut.codeblock( ''' [Desktop Entry] Name={nice_name} Exec={exe_fpath} MimeType=application/x-{mime_name} Terminal={terminal} Type=Application Categories=Utility;Application; Comment=Custom App ''' ).format(**locals()) app_dpath = join(prefix, 'applications') app_fpath = join(app_dpath, '{app_name}.desktop'.format(**locals())) print(app_codeblock) print('---') print(app_fpath) print('L___') # WRITE FILES if not dry: ut.ensuredir(mime_dpath) ut.ensuredir(app_dpath) ut.writeto(mime_fpath, mime_codeblock, verbose=ut.NOT_QUIET, n=None) if exe_fpath is not None: ut.writeto(app_fpath, app_codeblock, verbose=ut.NOT_QUIET, n=None) # UPDATE BACKENDS #ut.cmd('update-mime-database /usr/share/mime') #~/.local/share/applications/mimeapps.list print(ut.codeblock( ''' Run these commands: update-desktop-database ~/.local/share/applications update-mime-database ~/.local/share/mime ''' )) if exe_fpath is not None: ut.cmd('update-desktop-database ~/.local/share/applications') ut.cmd('update-mime-database ~/.local/share/mime') else: print('dry_run')
def postload_commands(ibs, back): """ Postload commands deal with a specific ibeis database ibeis --db PZ_MTEST --occur "*All Images" --query 1 ibeis --db PZ_MTEST --occur "*All Images" --query-intra """ if ut.NOT_QUIET: print('\n[main_cmd] postload_commands') if params.args.view_database_directory: print('got arg --vdd') vdd(ibs) if params.args.set_default_dbdir: sysres.set_default_dbdir(ibs.get_dbdir()) if params.args.update_query_cfg is not None: # Set query parameters from command line using the --cfg flag cfgdict = ut.parse_cfgstr_list(params.args.update_query_cfg) print('Custom cfgdict specified') print(ut.dict_str(cfgdict)) ibs.update_query_cfg(**cfgdict) #print(ibs.cfg.query_cfg.get_cfgstr()) if params.args.edit_notes: ut.editfile(ibs.get_dbnotes_fpath(ensure=True)) if params.args.delete_cache: ibs.delete_cache() if params.args.delete_cache_complete: ibs.delete_cache(delete_chips=True, delete_imagesets=True) if params.args.delete_query_cache: ibs.delete_qres_cache() if params.args.set_all_species is not None: ibs._overwrite_all_annot_species_to(params.args.set_all_species) if params.args.dump_schema: ibs.db.print_schema() # DEPRICATE if params.args.set_notes is not None: ibs.set_dbnotes(params.args.set_notes) if params.args.set_aids_as_hard is not None: aid_list = params.args.set_aids_as_hard ibs.set_annot_is_hard(aid_list, [True] * len(aid_list)) #/DEPRICATE if ut.get_argflag('--ipynb'): back.launch_ipy_notebook() select_imgsetid = ut.get_argval(('--select-imgsetid', '--imgsetid', '--occur'), None) if select_imgsetid is not None: print('\n+ --- CMD SELECT EID=%r ---' % (select_imgsetid,)) # Whoa: this doesnt work. weird. #back.select_imgsetid(select_imgsetid) # This might be the root of gui problems #back.front._change_imageset(select_imgsetid) back.front.select_imageset_tab(select_imgsetid) print('L ___ CMD SELECT EID=%r ___\n' % (select_imgsetid,)) # Send commands to GUIBack if params.args.select_aid is not None: if back is not None: try: ibsfuncs.assert_valid_aids(ibs, (params.args.select_aid,)) except AssertionError: print('Valid RIDs are: %r' % (ibs.get_valid_aids(),)) raise back.select_aid(params.args.select_aid) if params.args.select_gid is not None: back.select_gid(params.args.select_gid) if params.args.select_nid is not None: back.select_nid(params.args.select_nid) select_name = ut.get_argval('--select-name') if select_name is not None: import ibeis.gui.guiheaders as gh back.ibswgt.select_table_indicies_from_text(gh.NAMES_TREE, select_name, allow_table_change=True) if ut.get_argflag(('--intra-occur-query', '--query-intra-occur', '--query-intra')): back.special_query_funcs['intra_occurrence'](cfgdict={'use_k_padding': False}) qaid_list = ut.get_argval(('--query-aid', '--query'), type_=list, default=None) if qaid_list is not None: #qaid_list = params.args.query_aid # fix stride case if len(qaid_list) == 1 and isinstance(qaid_list[0], tuple): qaid_list = list(qaid_list[0]) daids_mode = ut.get_argval('--daids-mode', type_=str, default=const.VS_EXEMPLARS_KEY) back.compute_queries(qaid_list=qaid_list, daids_mode=daids_mode, ranks_lt=10) if ut.get_argflag('--inc-query'): back.incremental_query() if ut.get_argflag(('--dbinfo', '--display_dbinfo')): back.display_dbinfo() pass aidcmd = ut.get_argval('--aidcmd', default=None) aid = ut.get_argval('--aid', type_=int, default=1) if aidcmd: #aidcmd = 'Interact image' metadata = ibs.get_annot_lazy_dict(aid) annot_context_options = metadata['annot_context_options'] aidcmd_dict = dict(annot_context_options) print('aidcmd_dict = %s' % (ut.repr3(aidcmd_dict),)) command = aidcmd_dict[aidcmd] command() #import utool #utool.embed() #back.start_web_server_parallel() if ut.get_argflag('--start-web'): back.start_web_server_parallel() screengrab_fpath = ut.get_argval('--screengrab') if screengrab_fpath: from guitool.__PYQT__.QtGui import QPixmap from PyQt4.QtTest import QTest from PyQt4.QtCore import Qt fpath = ut.truepath(screengrab_fpath) import guitool #ut.embed() timer2 = guitool.__PYQT__.QtCore.QTimer() done = [1000] def delayed_screenshot_func(): if done[0] == 500: #back.mainwin.menubar.triggered.emit(back.mainwin.menuFile) print('Mouseclick') QTest.mouseClick(back.mainwin.menuFile, Qt.LeftButton) # This works #QTest.mouseClick(back.front.import_button, Qt.LeftButton) if done[0] == 1: timer2.stop() print('screengrab to %r' % (fpath,)) screenimg = QPixmap.grabWindow(back.mainwin.winId()) screenimg.save(fpath, 'jpg') ut.startfile(fpath) print('lub dub2') done[0] -= 1 return None CLICK_FILE_MENU = True if CLICK_FILE_MENU: #ut.embed() #QTest::keyClick(menu, Qt::Key_Down) pass timer2.delayed_screenshot_func = delayed_screenshot_func timer2.timeout.connect(timer2.delayed_screenshot_func) timer2.start(1) back.mainwin.timer2 = timer2 guitool.activate_qwindow(back.mainwin) #QPixmap.grabWindow(back.mainwin.winId()).save(fpath, 'jpg') #ut.startfile(fpath) #ut.embed() pass if params.args.postload_exit: print('[main_cmd] postload exit') sys.exit(0)
def run_asmk_script(): with ut.embed_on_exception_context: # NOQA """ >>> from wbia.algo.smk.script_smk import * """ # NOQA # ============================================== # PREPROCESSING CONFIGURATION # ============================================== config = { # 'data_year': 2013, 'data_year': None, 'dtype': 'float32', # 'root_sift': True, 'root_sift': False, # 'centering': True, 'centering': False, 'num_words': 2**16, # 'num_words': 1E6 # 'num_words': 8000, 'kmeans_impl': 'sklearn.mini', 'extern_words': False, 'extern_assign': False, 'assign_algo': 'kdtree', 'checks': 1024, 'int_rvec': True, 'only_xy': False, } # Define which params are relevant for which operations relevance = {} relevance['feats'] = ['dtype', 'root_sift', 'centering', 'data_year'] relevance['words'] = relevance['feats'] + [ 'num_words', 'extern_words', 'kmeans_impl', ] relevance['assign'] = relevance['words'] + [ 'checks', 'extern_assign', 'assign_algo', ] # relevance['ydata'] = relevance['assign'] + ['int_rvec'] # relevance['xdata'] = relevance['assign'] + ['only_xy', 'int_rvec'] nAssign = 1 class SMKCacher(ut.Cacher): def __init__(self, fname, ext='.cPkl'): relevant_params = relevance[fname] relevant_cfg = ut.dict_subset(config, relevant_params) cfgstr = ut.get_cfg_lbl(relevant_cfg) dbdir = ut.truepath('/raid/work/Oxford/') super(SMKCacher, self).__init__(fname, cfgstr, cache_dir=dbdir, ext=ext) # ============================================== # LOAD DATASET, EXTRACT AND POSTPROCESS FEATURES # ============================================== if config['data_year'] == 2007: data = load_oxford_2007() elif config['data_year'] == 2013: data = load_oxford_2013() elif config['data_year'] is None: data = load_oxford_wbia() offset_list = data['offset_list'] all_kpts = data['all_kpts'] raw_vecs = data['all_vecs'] query_uri_order = data['query_uri_order'] data_uri_order = data['data_uri_order'] # del data # ================ # PRE-PROCESS # ================ import vtool as vt # Alias names to avoid errors in interactive sessions proc_vecs = raw_vecs del raw_vecs feats_cacher = SMKCacher('feats', ext='.npy') all_vecs = feats_cacher.tryload() if all_vecs is None: if config['dtype'] == 'float32': logger.info('Converting vecs to float32') proc_vecs = proc_vecs.astype(np.float32) else: proc_vecs = proc_vecs raise NotImplementedError('other dtype') if config['root_sift']: with ut.Timer('Apply root sift'): np.sqrt(proc_vecs, out=proc_vecs) vt.normalize(proc_vecs, ord=2, axis=1, out=proc_vecs) if config['centering']: with ut.Timer('Apply centering'): mean_vec = np.mean(proc_vecs, axis=0) # Center and then re-normalize np.subtract(proc_vecs, mean_vec[None, :], out=proc_vecs) vt.normalize(proc_vecs, ord=2, axis=1, out=proc_vecs) if config['dtype'] == 'int8': smk_funcs all_vecs = proc_vecs feats_cacher.save(all_vecs) del proc_vecs # ===================================== # BUILD VISUAL VOCABULARY # ===================================== if config['extern_words']: words = data['words'] assert config['num_words'] is None or len( words) == config['num_words'] else: word_cacher = SMKCacher('words') words = word_cacher.tryload() if words is None: with ut.embed_on_exception_context: if config['kmeans_impl'] == 'sklearn.mini': import sklearn.cluster rng = np.random.RandomState(13421421) # init_size = int(config['num_words'] * 8) init_size = int(config['num_words'] * 4) # converged after 26043 iterations clusterer = sklearn.cluster.MiniBatchKMeans( config['num_words'], init_size=init_size, batch_size=1000, compute_labels=False, max_iter=20, random_state=rng, n_init=1, verbose=1, ) clusterer.fit(all_vecs) words = clusterer.cluster_centers_ elif config['kmeans_impl'] == 'yael': from yael import ynumpy centroids, qerr, dis, assign, nassign = ynumpy.kmeans( all_vecs, config['num_words'], init='kmeans++', verbose=True, output='all', ) words = centroids word_cacher.save(words) # ===================================== # ASSIGN EACH VECTOR TO ITS NEAREST WORD # ===================================== if config['extern_assign']: assert config[ 'extern_words'], 'need extern cluster to extern assign' idx_to_wxs = vt.atleast_nd(data['idx_to_wx'], 2) idx_to_maws = np.ones(idx_to_wxs.shape, dtype=np.float32) idx_to_wxs = np.ma.array(idx_to_wxs) idx_to_maws = np.ma.array(idx_to_maws) else: from wbia.algo.smk import vocab_indexer vocab = vocab_indexer.VisualVocab(words) dassign_cacher = SMKCacher('assign') assign_tup = dassign_cacher.tryload() if assign_tup is None: vocab.flann_params['algorithm'] = config['assign_algo'] vocab.build() # Takes 12 minutes to assign jegous vecs to 2**16 vocab with ut.Timer('assign vocab neighbors'): _idx_to_wx, _idx_to_wdist = vocab.nn_index( all_vecs, nAssign, checks=config['checks']) if nAssign > 1: idx_to_wxs, idx_to_maws = smk_funcs.weight_multi_assigns( _idx_to_wx, _idx_to_wdist, massign_alpha=1.2, massign_sigma=80.0, massign_equal_weights=True, ) else: idx_to_wxs = np.ma.masked_array(_idx_to_wx, fill_value=-1) idx_to_maws = np.ma.ones(idx_to_wxs.shape, fill_value=-1, dtype=np.float32) idx_to_maws.mask = idx_to_wxs.mask assign_tup = (idx_to_wxs, idx_to_maws) dassign_cacher.save(assign_tup) idx_to_wxs, idx_to_maws = assign_tup # Breakup vectors, keypoints, and word assignments by annotation wx_lists = [ idx_to_wxs[left:right] for left, right in ut.itertwo(offset_list) ] maw_lists = [ idx_to_maws[left:right] for left, right in ut.itertwo(offset_list) ] vecs_list = [ all_vecs[left:right] for left, right in ut.itertwo(offset_list) ] kpts_list = [ all_kpts[left:right] for left, right in ut.itertwo(offset_list) ] # ======================= # FIND QUERY SUBREGIONS # ======================= ibs, query_annots, data_annots, qx_to_dx = load_ordered_annots( data_uri_order, query_uri_order) daids = data_annots.aids qaids = query_annots.aids query_super_kpts = ut.take(kpts_list, qx_to_dx) query_super_vecs = ut.take(vecs_list, qx_to_dx) query_super_wxs = ut.take(wx_lists, qx_to_dx) query_super_maws = ut.take(maw_lists, qx_to_dx) # Mark which keypoints are within the bbox of the query query_flags_list = [] only_xy = config['only_xy'] for kpts_, bbox in zip(query_super_kpts, query_annots.bboxes): flags = kpts_inside_bbox(kpts_, bbox, only_xy=only_xy) query_flags_list.append(flags) logger.info('Queries are crops of existing database images.') logger.info('Looking at average percents') percent_list = [ flags_.sum() / flags_.shape[0] for flags_ in query_flags_list ] percent_stats = ut.get_stats(percent_list) logger.info('percent_stats = %s' % (ut.repr4(percent_stats), )) import vtool as vt query_kpts = vt.zipcompress(query_super_kpts, query_flags_list, axis=0) query_vecs = vt.zipcompress(query_super_vecs, query_flags_list, axis=0) query_wxs = vt.zipcompress(query_super_wxs, query_flags_list, axis=0) query_maws = vt.zipcompress(query_super_maws, query_flags_list, axis=0) # ======================= # CONSTRUCT QUERY / DATABASE REPR # ======================= # int_rvec = not config['dtype'].startswith('float') int_rvec = config['int_rvec'] X_list = [] _prog = ut.ProgPartial(length=len(qaids), label='new X', bs=True, adjust=True) for aid, fx_to_wxs, fx_to_maws in _prog( zip(qaids, query_wxs, query_maws)): X = new_external_annot(aid, fx_to_wxs, fx_to_maws, int_rvec) X_list.append(X) # ydata_cacher = SMKCacher('ydata') # Y_list = ydata_cacher.tryload() # if Y_list is None: Y_list = [] _prog = ut.ProgPartial(length=len(daids), label='new Y', bs=True, adjust=True) for aid, fx_to_wxs, fx_to_maws in _prog(zip(daids, wx_lists, maw_lists)): Y = new_external_annot(aid, fx_to_wxs, fx_to_maws, int_rvec) Y_list.append(Y) # ydata_cacher.save(Y_list) # ====================== # Add in some groundtruth logger.info('Add in some groundtruth') for Y, nid in zip(Y_list, ibs.get_annot_nids(daids)): Y.nid = nid for X, nid in zip(X_list, ibs.get_annot_nids(qaids)): X.nid = nid for Y, qual in zip(Y_list, ibs.get_annot_quality_texts(daids)): Y.qual = qual # ====================== # Add in other properties for Y, vecs, kpts in zip(Y_list, vecs_list, kpts_list): Y.vecs = vecs Y.kpts = kpts imgdir = ut.truepath('/raid/work/Oxford/oxbuild_images') for Y, imgid in zip(Y_list, data_uri_order): gpath = ut.unixjoin(imgdir, imgid + '.jpg') Y.gpath = gpath for X, vecs, kpts in zip(X_list, query_vecs, query_kpts): X.kpts = kpts X.vecs = vecs # ====================== logger.info('Building inverted list') daids = [Y.aid for Y in Y_list] # wx_list = sorted(ut.list_union(*[Y.wx_list for Y in Y_list])) wx_list = sorted(set.union(*[Y.wx_set for Y in Y_list])) assert daids == data_annots.aids assert len(wx_list) <= config['num_words'] wx_to_aids = smk_funcs.invert_lists(daids, [Y.wx_list for Y in Y_list], all_wxs=wx_list) # Compute IDF weights logger.info('Compute IDF weights') ndocs_total = len(daids) # Use only the unique number of words ndocs_per_word = np.array([len(set(wx_to_aids[wx])) for wx in wx_list]) logger.info('ndocs_perword stats: ' + ut.repr4(ut.get_stats(ndocs_per_word))) idf_per_word = smk_funcs.inv_doc_freq(ndocs_total, ndocs_per_word) wx_to_weight = dict(zip(wx_list, idf_per_word)) logger.info('idf stats: ' + ut.repr4(ut.get_stats(wx_to_weight.values()))) # Filter junk Y_list_ = [Y for Y in Y_list if Y.qual != 'junk'] # ======================= # CHOOSE QUERY KERNEL # ======================= params = { 'asmk': dict(alpha=3.0, thresh=0.0), 'bow': dict(), 'bow2': dict(), } # method = 'bow' method = 'bow2' method = 'asmk' smk = SMK(wx_to_weight, method=method, **params[method]) # Specific info for the type of query if method == 'asmk': # Make residual vectors if True: # The stacked way is 50x faster # TODO: extend for multi-assignment and record fxs flat_query_vecs = np.vstack(query_vecs) flat_query_wxs = np.vstack(query_wxs) flat_query_offsets = np.array( [0] + ut.cumsum(ut.lmap(len, query_wxs))) flat_wxs_assign = flat_query_wxs flat_offsets = flat_query_offsets flat_vecs = flat_query_vecs tup = smk_funcs.compute_stacked_agg_rvecs( words, flat_wxs_assign, flat_vecs, flat_offsets) all_agg_vecs, all_error_flags, agg_offset_list = tup if int_rvec: all_agg_vecs = smk_funcs.cast_residual_integer( all_agg_vecs) agg_rvecs_list = [ all_agg_vecs[left:right] for left, right in ut.itertwo(agg_offset_list) ] agg_flags_list = [ all_error_flags[left:right] for left, right in ut.itertwo(agg_offset_list) ] for X, agg_rvecs, agg_flags in zip(X_list, agg_rvecs_list, agg_flags_list): X.agg_rvecs = agg_rvecs X.agg_flags = agg_flags[:, None] flat_wxs_assign = idx_to_wxs flat_offsets = offset_list flat_vecs = all_vecs tup = smk_funcs.compute_stacked_agg_rvecs( words, flat_wxs_assign, flat_vecs, flat_offsets) all_agg_vecs, all_error_flags, agg_offset_list = tup if int_rvec: all_agg_vecs = smk_funcs.cast_residual_integer( all_agg_vecs) agg_rvecs_list = [ all_agg_vecs[left:right] for left, right in ut.itertwo(agg_offset_list) ] agg_flags_list = [ all_error_flags[left:right] for left, right in ut.itertwo(agg_offset_list) ] for Y, agg_rvecs, agg_flags in zip(Y_list, agg_rvecs_list, agg_flags_list): Y.agg_rvecs = agg_rvecs Y.agg_flags = agg_flags[:, None] else: # This non-stacked way is about 500x slower _prog = ut.ProgPartial(label='agg Y rvecs', bs=True, adjust=True) for Y in _prog(Y_list_): make_agg_vecs(Y, words, Y.vecs) _prog = ut.ProgPartial(label='agg X rvecs', bs=True, adjust=True) for X in _prog(X_list): make_agg_vecs(X, words, X.vecs) elif method == 'bow2': # Hack for orig tf-idf bow vector nwords = len(words) for X in ut.ProgIter(X_list, label='make bow vector'): ensure_tf(X) bow_vector(X, wx_to_weight, nwords) for Y in ut.ProgIter(Y_list_, label='make bow vector'): ensure_tf(Y) bow_vector(Y, wx_to_weight, nwords) if method != 'bow2': for X in ut.ProgIter(X_list, 'compute X gamma'): X.gamma = smk.gamma(X) for Y in ut.ProgIter(Y_list_, 'compute Y gamma'): Y.gamma = smk.gamma(Y) # Execute matches (could go faster by enumerating candidates) scores_list = [] for X in ut.ProgIter(X_list, label='query %s' % (smk, )): scores = [smk.kernel(X, Y) for Y in Y_list_] scores = np.array(scores) scores = np.nan_to_num(scores) scores_list.append(scores) import sklearn.metrics avep_list = [] _iter = list(zip(scores_list, X_list)) _iter = ut.ProgIter(_iter, label='evaluate %s' % (smk, )) for scores, X in _iter: truth = [X.nid == Y.nid for Y in Y_list_] avep = sklearn.metrics.average_precision_score(truth, scores) avep_list.append(avep) avep_list = np.array(avep_list) mAP = np.mean(avep_list) logger.info('mAP = %r' % (mAP, ))
if __name__ == '__main__': CODE_DIR = get_codedir() rman = ut.RepoManager(repo_urls=[ 'https://github.com/Erotemic/utool.git', 'https://github.com/Erotemic/guitool.git', 'https://github.com/Erotemic/plottool.git', 'https://github.com/Erotemic/vtool.git', 'https://github.com/bluemellophone/detecttools.git', 'https://github.com/Erotemic/hesaff.git', 'https://github.com/bluemellophone/pyrf.git', 'https://github.com/Erotemic/ibeis.git', 'https://github.com/aweinstock314/cyth.git', #'https://github.com/hjweide/pygist', ], code_dir=CODE_DIR) # (IBEIS_REPO_URLS, IBEIS_REPO_DIRS) = ut.repo_list(, forcessh=False) # ut.set_project_repos(IBEIS_REPO_URLS, IBEIS_REPO_DIRS) dpath_list = rman.repo_dirs # IBEIS_REPO_DIRS fname = ut.truepath(sys.argv[1]) #if len(sys.argv) >= 3: # grep_dpath = ut.truepath(sys.argv[2]) print('Classfuncs of %r' % fname) funcname_list = ut.list_class_funcnames(fname) funcname_list = ut.list_global_funcnames(fname) print(ut.indentjoin(funcname_list, '\n * ')) show_function_usage(fname, funcname_list, dpath_list)
def dump_nx_ondisk(graph, fpath): agraph = make_agraph(graph) agraph.layout(prog='dot') agraph.draw(ut.truepath(fpath))
lines, lineno = ut.grepfile(mod_fpath, funcname_regex) if len(lines) > 0: print(mod_fpath + ' failed ' + funcname) print(lines) def ensure_utool_compatible(mod_fpath): ut_inject_line1 = r'print, print_, printDBG, rrr, profile =' ut_inject_line2 = r'\(print, print_, printDBG, rrr, profile\) =' ut_inject_line3 = r'inject\(__name__,' ut_inject_lines = (ut_inject_line1, ut_inject_line2, ut_inject_line3) #ut.inject(__name' lines, lineno = ut.grepfile(mod_fpath, ut_inject_lines) if len(lines) == 0: print(mod_fpath + ' does not have utool') #def ensure_compatible_modfpath_list(mod_fpath_list): if __name__ == '__main__': package_dir = ut.truepath('~/code/ibeis/ibeis') if 'module_fpath_list' not in vars(): module_fpath_list = ut.glob_python_modules(package_dir) for mod_fpath in module_fpath_list: #ensure_compatible_modfpath(mod_fpath) #ensure_six_moves_compatible(mod_fpath) #ensure_utool_compatible(mod_fpath) ensure_no_invalid_commands(mod_fpath)
def get_rawdir(): """ Returns the standard raw data directory """ workdir = get_workdir() rawdir = ut.truepath(join(workdir, '../raw')) return rawdir
def setup_repo(): r""" Creates default structure for a new repo CommandLine: python -m utool setup_repo --repo=dtool --codedir=~/code python -m utool setup_repo --repo=dtool --codedir=~/code python -m utool setup_repo --repo=ibeis-flukematch-module --codedir=~/code --modname=ibeis_flukematch python -m utool setup_repo --repo=mtgmonte --codedir=~/code --modname=mtgmonte python -m utool setup_repo --repo=pydarknet --codedir=~/code --modname=pydarknet python -m utool setup_repo --repo=sandbox_utools --codedir=~/code --modname=sandbox_utools python -m utool setup_repo --repo=ubelt --codedir=~/code --modname=ubelt -w python -m utool setup_repo Python: ipython import utool as ut ut.rrrr(0); ut.setup_repo() Example: >>> # SCRIPT >>> from utool.util_project import * # NOQA >>> import utool as ut >>> result = setup_repo() >>> print(result) """ print('\n [setup_repo]!') # import os from functools import partial import utool as ut # import os code_dpath = ut.truepath(ut.get_argval('--code-dir', default='~/code')) _code_dpath = ut.unexpanduser(code_dpath) repo_fname = (ut.get_argval(('--repo', '--repo-name'), type_=str)) repo_dpath = join(code_dpath, repo_fname) modname = ut.get_argval('--modname', default=repo_fname) ut.ensuredir(repo_dpath, verbose=True) _regencmd = 'python -m utool --tf setup_repo --repo={repo_fname} --codedir={_code_dpath} --modname={modname}' flake8_noqacmd = 'flake8' + ':noqa' regencmd = _regencmd.format(**locals()) with ut.ChdirContext(repo_dpath): # os.chdir(repo_fname) locals_ = locals() force = True _ensure_text = partial(ensure_text, repo_dpath='.', force=None, locals_=locals_) _ensure_text( fname='todo.md', text=ut.codeblock( r''' # STARTBLOCK # {modname} TODO File * Add TODOS! # ENDBLOCK ''') ) _ensure_text( fname='README.md', text=ut.codeblock( r''' # STARTBLOCK # {modname} README FILE # ENDBLOCK ''') ) _ensure_text( fname='setup.py', chmod='+x', text=ut.codeblock( r''' # STARTBLOCK #!/usr/bin/env python """ Initially Generated By: {regencmd} --force-{fname} """ from __future__ import absolute_import, division, print_function, unicode_literals from setuptools import setup try: from utool import util_setup except ImportError: print('ERROR: setup requires utool') raise INSTALL_REQUIRES = [ #'cython >= 0.21.1', #'numpy >= 1.9.0', #'scipy >= 0.16.0', ] CLUTTER_PATTERNS = [ # Patterns removed by python setup.py clean ] if __name__ == '__main__': kwargs = util_setup.setuptools_setup( setup_fpath=__file__, name='{modname}', packages=util_setup.find_packages(), version=util_setup.parse_package_for_version('{modname}'), license=util_setup.read_license('LICENSE'), long_description=util_setup.parse_readme('README.md'), ext_modules=util_setup.find_ext_modules(), cmdclass=util_setup.get_cmdclass(), #description='description of module', #url='https://github.com/<username>/{repo_fname}.git', #author='<author>', #author_email='<author_email>', keywords='', install_requires=INSTALL_REQUIRES, clutter_patterns=CLUTTER_PATTERNS, #package_data={{'build': ut.get_dynamic_lib_globstrs()}}, #build_command=lambda: ut.std_build_command(dirname(__file__)), classifiers=[], ) setup(**kwargs) # ENDBLOCK ''' ) ) _ensure_text( fname='.gitignore', text=ut.codeblock( r''' # STARTBLOCK *.py[cod] # C extensions *.so # Packages *.egg *.egg-info dist build eggs parts bin var sdist develop-eggs .installed.cfg lib lib64 __pycache__ # Installer logs pip-log.txt # Print Logs logs # Unit test / coverage reports .coverage .tox nosetests.xml # Translations *.mo # Mr Developer .mr.developer.cfg .project .pydevproject .DS_Store *.dump.txt *.sqlite3 # profiler *.lprof *.prof *.flann *.npz # utool output _timeings.txt failed.txt *.orig _doc timeings.txt failed_doctests.txt # ENDBLOCK ''' ) ) _ensure_text( fname=join(repo_dpath, modname, '__init__.py'), text=ut.codeblock( r''' # STARTBLOCK # -*- coding: utf-8 -*- # {flake8_noqacmd} """ Initially Generated By: {regencmd} """ from __future__ import absolute_import, division, print_function, unicode_literals import sys __version__ = '0.0.0' IMPORT_TUPLES = [ # ('<modname>', None), ] __DYNAMIC__ = '--nodyn' not in sys.argv """ python -c "import {modname}" --dump-{modname}-init python -c "import {modname}" --update-{modname}-init """ DOELSE = False if __DYNAMIC__: # Dynamically import listed util libraries and their members. from utool._internal import util_importer ignore_endswith = [] import_execstr = util_importer.dynamic_import( __name__, IMPORT_TUPLES, ignore_endswith=ignore_endswith) exec(import_execstr) DOELSE = False else: DOELSE = True if DOELSE: # <AUTOGEN_INIT> pass # </AUTOGEN_INIT> # ENDBLOCK ''' ) ) _ensure_text( fname=join(repo_dpath, modname, '__main__.py'), chmod='+x', text=ut.codeblock( r''' # STARTBLOCK #!/usr/bin/env python # -*- coding: utf-8 -*- """ Initially Generated By: {regencmd} """ from __future__ import absolute_import, division, print_function, unicode_literals def {modname}_main(): ignore_prefix = [] ignore_suffix = [] import utool as ut ut.main_function_tester('{modname}', ignore_prefix, ignore_suffix) if __name__ == '__main__': """ Usage: python -m {modname} <funcname> """ print('Running {modname} main') {modname}_main() # ENDBLOCK ''' ) ) _ensure_text( fname='run_tests.py', chmod='+x', text=ut.codeblock( r''' # STARTBLOCK #!/usr/bin/env python """ Initially Generated By: {regencmd} --force-{fname} """ from __future__ import absolute_import, division, print_function import sys import utool as ut def run_tests(): # Build module list and run tests import sys ut.change_term_title('RUN {modname} TESTS') exclude_doctests_fnames = set([ ]) exclude_dirs = [ '_broken', 'old', 'tests', 'timeits', '_scripts', '_timeits', '_doc', 'notebook', ] dpath_list = ['{modname}'] doctest_modname_list = ut.find_doctestable_modnames( dpath_list, exclude_doctests_fnames, exclude_dirs) coverage = ut.get_argflag(('--coverage', '--cov',)) if coverage: import coverage cov = coverage.Coverage(source=doctest_modname_list) cov.start() print('Starting coverage') exclude_lines = [ 'pragma: no cover', 'def __repr__', 'if self.debug:', 'if settings.DEBUG', 'raise AssertionError', 'raise NotImplementedError', 'if 0:', 'if ut.VERBOSE', 'if _debug:', 'if __name__ == .__main__.:', 'print(.*)', ] for line in exclude_lines: cov.exclude(line) for modname in doctest_modname_list: exec('import ' + modname, globals()) module_list = [sys.modules[name] for name in doctest_modname_list] nPass, nTotal, failed_cmd_list = ut.doctest_module_list(module_list) if coverage: print('Stoping coverage') cov.stop() print('Saving coverage') cov.save() print('Generating coverage html report') cov.html_report() if nPass != nTotal: return 1 else: return 0 if __name__ == '__main__': import multiprocessing multiprocessing.freeze_support() retcode = run_tests() sys.exit(retcode) # ENDBLOCK ''' ) ) ut.ensuredir(join(repo_dpath, modname), verbose=True)
def make_individual_latex_figures(ibs, fpaths_list, flat_case_labels, cfgx2_shortlbl, case_figdir, analysis_fpath_list): # HACK MAKE LATEX CONVINENCE STUFF #print('LATEX HACK') if len(fpaths_list) == 0: print('nothing to render') return RENDER = ut.get_argflag('--render') DUMP_FIGDEF = ut.get_argflag(('--figdump', '--dump-figdef', '--figdef')) if not (DUMP_FIGDEF or RENDER): # HACK return latex_code_blocks = [] latex_block_keys = [] caption_prefix = ut.get_argval('--cappref', type_=str, default='') caption_suffix = ut.get_argval('--capsuf', type_=str, default='') cmdaug = ut.get_argval('--cmdaug', type_=str, default='custom') selected = None for case_idx, (fpaths, labels) in enumerate(zip(fpaths_list, flat_case_labels)): if labels is None: labels = [cmdaug] if len(fpaths) < 4: nCols = len(fpaths) else: nCols = 2 _cmdname = ibs.get_dbname() + ' Case ' + ' '.join(labels) + '_' + str(case_idx) #print('_cmdname = %r' % (_cmdname,)) cmdname = ut.latex_sanitize_command_name(_cmdname) label_str = cmdname if len(caption_prefix) == 0: caption_str = ut.escape_latex('Casetags: ' + ut.list_str(labels, nl=False, strvals=True) + ', db=' + ibs.get_dbname() + '. ') else: caption_str = '' use_sublbls = len(cfgx2_shortlbl) > 1 if use_sublbls: caption_str += ut.escape_latex('Each figure shows a different configuration: ') sublbls = ['(' + chr(97 + count) + ') ' for count in range(len(cfgx2_shortlbl))] else: #caption_str += ut.escape_latex('This figure depicts correct and #incorrect matches from configuration: ') sublbls = [''] * len(cfgx2_shortlbl) def wrap_tt(text): return r'{\tt ' + text + '}' _shortlbls = cfgx2_shortlbl _shortlbls = list(map(ut.escape_latex, _shortlbls)) # Adjust spacing for breaks #tex_small_space = r'' tex_small_space = r'\hspace{0pt}' # Remove query specific config flags in individual results _shortlbls = [re.sub('\\bq[^,]*,?', '', shortlbl) for shortlbl in _shortlbls] # Let config strings be broken over newlines _shortlbls = [re.sub('\\+', tex_small_space + '+' + tex_small_space, shortlbl) for shortlbl in _shortlbls] _shortlbls = [re.sub(', *', ',' + tex_small_space, shortlbl) for shortlbl in _shortlbls] _shortlbls = list(map(wrap_tt, _shortlbls)) cfgx2_texshortlbl = ['\n ' + lbl + shortlbl for lbl, shortlbl in zip(sublbls, _shortlbls)] caption_str += ut.conj_phrase(cfgx2_texshortlbl, 'and') + '.\n ' caption_str = '\n ' + caption_prefix + caption_str + caption_suffix caption_str = caption_str.rstrip() figure_str = ut.get_latex_figure_str(fpaths, nCols=nCols, label_str=label_str, caption_str=caption_str, use_sublbls=None, use_frame=True) latex_block = ut.latex_newcommand(cmdname, figure_str) latex_block = '\n%----------\n' + latex_block latex_code_blocks.append(latex_block) latex_block_keys.append(cmdname) # HACK remove_fpath = ut.truepath('~/latex/crall-candidacy-2015') + '/' latex_fpath = join(case_figdir, 'latex_cases.tex') if selected is not None: selected_keys = selected else: selected_keys = latex_block_keys selected_blocks = ut.dict_take(dict(zip(latex_block_keys, latex_code_blocks)), selected_keys) figdef_block = '\n'.join(selected_blocks) figcmd_block = '\n'.join(['\\' + key for key in latex_block_keys]) selected_block = figdef_block + '\n\n' + figcmd_block # HACK: need full paths to render selected_block_renderable = selected_block selected_block = selected_block.replace(remove_fpath, '') if RENDER: ut.render_latex_text(selected_block_renderable) if DUMP_FIGDEF: ut.writeto(latex_fpath, selected_block) #if NOT DUMP AND NOT RENDER: # print('STANDARD LATEX RESULTS') # cmdname = ibs.get_dbname() + 'Results' # latex_block = ut.get_latex_figure_str2(analysis_fpath_list, cmdname, nCols=1) # ut.print_code(latex_block, 'latex') if DUMP_FIGDEF or RENDER: ut.print_code(selected_block, 'latex')
import os import utool as ut from os.path import basename, join, split # NOQA app_dpath = ut.truepath('~/fsclean_indexes') ut.ensurepath(app_dpath) root = '/media/joncrall/media' class Path(ut.NiceRepr): @property @ut.memoize def abspath(self): return join(self.r, self.n) def __nice__(self): return (self.r, self.n) def __eq__(self, other): return self.abspath == other.abspath def __hash__(self): return hash(self.abspath) @property @ut.memoize def depth(self): return self.abspath.count(os.path.sep)