Esempio n. 1
0
def preload_commands(dbdir, **kwargs):
    """ Preload commands work with command line arguments and global caches """
    #print('[main_cmd] preload_commands')
    if params.args.dump_argv:
        print(ut.dict_str(vars(params.args), sorted_=False))
    if params.args.dump_global_cache:
        ut.global_cache_dump()  # debug command, dumps to stdout
    if params.args.set_workdir is not None:
        sysres.set_workdir(params.args.set_workdir)
    if params.args.get_workdir:
        print(' Current work dir = %s' % sysres.get_workdir())
    if params.args.logdir is not None:
        sysres.set_logdir(params.args.logdir)
    if params.args.get_logdir:
        print(' Current log dir = %s' % (sysres.get_logdir(),))
    if params.args.view_logdir:
        ut.view_directory(sysres.get_logdir())
    if ut.get_argflag('--vwd'):
        vwd()
    if ut.get_argflag('--vdq'):
        print('got arg --vdq')
        vdq(dbdir)
    if kwargs.get('delete_ibsdir', False):
        ibsfuncs.delete_ibeis_database(dbdir)
    if params.args.convert:
        preload_convert_hsdb(dbdir)
    if params.args.preload_exit:
        print('[main_cmd] preload exit')
        sys.exit(1)
Esempio n. 2
0
def add_to_win32_PATH(script_fpath, *add_path_list):
    r"""
    Writes a registery script to update the PATH variable into the sync registry

    CommandLine:
        python -m utool.util_win32 --test-add_to_win32_PATH --newpath "C:\Program Files (x86)\Graphviz2.38\bin"

    Example:
        >>> # SCRIPT
        >>> from utool.util_win32 import *  # NOQA
        >>> script_fpath = join(ut.truepath('~'), 'Sync/win7/registry', 'UPDATE_PATH.reg')
        >>> new_path = ut.get_argval('--newpath', str, default=None)
        >>> result = add_to_win32_PATH(script_fpath, new_path)
        >>> print(result)
    """
    import utool as ut
    write_dir = dirname(script_fpath)
    key = '[HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Control\Session Manager\Environment]'
    rtype = 'REG_EXPAND_SZ'
    # Read current PATH values
    win_pathlist = list(os.environ['PATH'].split(os.path.pathsep))
    new_path_list = ut.unique_ordered(win_pathlist + list(add_path_list))
    #new_path_list = unique_ordered(win_pathlist, rob_pathlist)
    print('\n'.join(new_path_list))
    pathtxt = pathsep.join(new_path_list)
    varval_list = [('Path', pathtxt)]
    regfile_str = make_regfile_str(key, varval_list, rtype)
    ut.view_directory(write_dir)
    print(regfile_str)
    ut.writeto(script_fpath, regfile_str, mode='wb')
    print('Please have an admin run the script. You may need to restart')
Esempio n. 3
0
def preload_commands(dbdir, **kwargs):
    """ Preload commands work with command line arguments and global caches """
    #print('[main_cmd] preload_commands')
    if params.args.dump_argv:
        print(ut.dict_str(vars(params.args), sorted_=False))
    if params.args.dump_global_cache:
        ut.global_cache_dump()  # debug command, dumps to stdout
    if params.args.set_workdir is not None:
        sysres.set_workdir(params.args.set_workdir)
    if params.args.get_workdir:
        print(' Current work dir = %s' % sysres.get_workdir())
    if params.args.logdir is not None:
        sysres.set_logdir(params.args.logdir)
    if params.args.get_logdir:
        print(' Current log dir = %s' % (sysres.get_logdir(), ))
    if params.args.view_logdir:
        ut.view_directory(sysres.get_logdir())
    if ut.get_argflag('--vwd'):
        vwd()
    if ut.get_argflag('--vdq'):
        print('got arg --vdq')
        vdq(dbdir)
    if kwargs.get('delete_ibsdir', False):
        ibsfuncs.delete_ibeis_database(dbdir)
    if params.args.convert:
        preload_convert_hsdb(dbdir)
    if params.args.preload_exit:
        print('[main_cmd] preload exit')
        sys.exit(0)
Esempio n. 4
0
def add_to_win32_PATH(script_fpath, *add_path_list):
    r"""
    Writes a registery script to update the PATH variable into the sync registry

    CommandLine:
        python -m utool.util_win32 --test-add_to_win32_PATH --newpath "C:\Program Files (x86)\Graphviz2.38\bin"

    Example:
        >>> # SCRIPT
        >>> from utool.util_win32 import *  # NOQA
        >>> script_fpath = join(ut.truepath('~'), 'Sync/win7/registry', 'UPDATE_PATH.reg')
        >>> new_path = ut.get_argval('--newpath', str, default=None)
        >>> result = add_to_win32_PATH(script_fpath, new_path)
        >>> print(result)
    """
    import utool as ut
    write_dir = dirname(script_fpath)
    key = '[HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Control\Session Manager\Environment]'
    rtype = 'REG_EXPAND_SZ'
    # Read current PATH values
    win_pathlist = list(os.environ['PATH'].split(os.path.pathsep))
    new_path_list = ut.unique_ordered(win_pathlist + list(add_path_list))
    #new_path_list = unique_ordered(win_pathlist, rob_pathlist)
    print('\n'.join(new_path_list))
    pathtxt = pathsep.join(new_path_list)
    varval_list = [('Path', pathtxt)]
    regfile_str = make_regfile_str(key, varval_list, rtype)
    ut.view_directory(write_dir)
    print(regfile_str)
    ut.writeto(script_fpath, regfile_str, mode='wb')
    print('Please have an admin run the script. You may need to restart')
Esempio n. 5
0
def vizualize_vocabulary(ibs, invindex):
    """
    cleaned up version of dump_word_patches. Makes idf scatter plots and dumps
    the patches that contributed to each word.

    CommandLine:
        python -m ibeis.algo.hots.smk.smk_plots --test-vizualize_vocabulary
        python -m ibeis.algo.hots.smk.smk_plots --test-vizualize_vocabulary --vf

    Example:
        >>> from ibeis.algo.hots.smk.smk_plots import *  # NOQA
        >>> from ibeis.algo.hots.smk import smk_debug
        >>> from ibeis.algo.hots.smk import smk_repr
        >>> #tup = smk_debug.testdata_raw_internals0(db='GZ_ALL', nWords=64000)
        >>> #tup = smk_debug.testdata_raw_internals0(db='GZ_ALL', nWords=8000)
        >>> tup = smk_debug.testdata_raw_internals0(db='PZ_Master0', nWords=64000)
        >>> #tup = smk_debug.testdata_raw_internals0(db='PZ_Mothers', nWords=8000)
        >>> ibs, annots_df, daids, qaids, invindex, qreq_ = tup
        >>> smk_repr.compute_data_internals_(invindex, qreq_.qparams, delete_rawvecs=False)
        >>> vizualize_vocabulary(ibs, invindex)
    """
    invindex.idx2_wxs = np.array(invindex.idx2_wxs)

    print('[smk_plots] Vizualizing vocabulary')

    # DUMPING PART --- dumps patches to disk
    figdir = ibs.get_fig_dir()
    ut.ensuredir(figdir)
    if ut.get_argflag('--vf'):
        ut.view_directory(figdir)

    # Compute Word Statistics
    metrics = compute_word_metrics(invindex)
    wx2_nMembers, wx2_pdist_stats, wx2_wdist_stats = metrics
    #(wx2_pdist, wx2_wdist, wx2_nMembers, wx2_pdist_stats, wx2_wdist_stats) = metrics

    #wx2_prad = {wx: pdist_stats['max'] for wx, pdist_stats in six.iteritems(wx2_pdist_stats) if 'max' in pdist_stats}
    #wx2_wrad = {wx: wdist_stats['max'] for wx, wdist_stats in six.iteritems(wx2_wdist_stats) if 'max' in wdist_stats}

    wx2_prad = {wx: stats['max'] for wx, stats in wx2_pdist_stats.items() if 'max' in stats}
    wx2_wrad = {wx: stats['max'] for wx, stats in wx2_wdist_stats.items() if 'max' in stats}
    #wx2_prad = get_metric(metrics, 'wx2_pdist_stats', 'max')
    #wx2_wrad = get_metric(metrics, 'wx2_wdist_stats', 'max')

    wx_sample1 = select_by_metric(wx2_nMembers)
    wx_sample2 = select_by_metric(wx2_prad)
    wx_sample3 = select_by_metric(wx2_wrad)

    wx_sample = wx_sample1 + wx_sample2 + wx_sample3
    overlap123 = len(wx_sample) - len(set(wx_sample))
    print('overlap123 = %r' % overlap123)
    wx_sample  = set(wx_sample)
    print('len(wx_sample) = %r' % len(wx_sample))

    #make_scatterplots(ibs, figdir, invindex, metrics)

    vocabdir = join(figdir, 'vocab_patches2')
    wx2_dpath = get_word_dpaths(vocabdir, wx_sample, metrics)

    make_wordfigures(ibs, metrics, invindex, figdir, wx_sample, wx2_dpath)
Esempio n. 6
0
def view_file_in_directory(fpaths):
    import utool as ut
    fpaths = ut.ensure_iterable(fpaths)
    fnames = [basename(f) for f in fpaths]
    dpaths = [dirname(f) for f in fpaths]
    dpath_to_fnames = ut.group_items(fnames, dpaths)
    for dpath, fnames in dpath_to_fnames.items():
        ut.view_directory(dpath, fnames[0], verbose=False)
Esempio n. 7
0
def preload_commands(dbdir, **kwargs):
    """ Preload commands work with command line arguments and global caches """
    # logger.info('[main_cmd] preload_commands')
    params.parse_args()
    if params.args.dump_argv:
        logger.info(ut.repr2(vars(params.args), sorted_=False))
    if params.args.dump_global_cache:
        ut.global_cache_dump()  # debug command, dumps to stdout
    if params.args.set_workdir is not None:
        sysres.set_workdir(params.args.set_workdir)
    if params.args.get_workdir:
        logger.info(' Current work dir = %s' % sysres.get_workdir())
    # if params.args.logdir is not None:
    #     sysres.set_logdir(params.args.logdir)
    if params.args.get_logdir:
        logger.info(' Current local  log dir = %s' %
                    (sysres.get_logdir_local(), ))
        logger.info(' Current global log dir = %s' %
                    (sysres.get_logdir_global(), ))
    if params.args.view_logdir:
        ut.view_directory(sysres.get_logdir_local())
        ut.view_directory(sysres.get_logdir_global())
    if params.args.view_logdir_local:
        ut.view_directory(sysres.get_logdir_local())
    if params.args.view_logdir_global:
        ut.view_directory(sysres.get_logdir_local())
    if ut.get_argflag('--vwd'):
        vwd()
    if ut.get_argflag('--vdq'):
        logger.info('got arg --vdq')
        vdq(dbdir)
    if kwargs.get('delete_ibsdir', False):
        ibsfuncs.delete_wbia_database(dbdir)
    if params.args.preload_exit:
        logger.info('[main_cmd] preload exit')
        sys.exit(0)
Esempio n. 8
0
 def view_database_dir(back):
     """ Help -> View Directory Slots"""
     print('[back] view_database_dir')
     utool.view_directory(back.ibs.get_dbdir())
     pass
Esempio n. 9
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
script to open directory in current window manager
"""
import utool as ut

if __name__ == '__main__':
    import sys
    if len(sys.argv) == 2:
        path = sys.argv[1]
    else:
        path = None
    ut.assertpath(path)
    if ut.checkpath(path, verbose=True):
        ut.view_directory(path)
    #F:\\data\\work\\PZ_MTEST\\_ibsdb\\
Esempio n. 10
0
 def view_directory(dataset):
     ut.view_directory(dataset.dataset_dpath)
Esempio n. 11
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
script to open directory in current window manager
"""
import utool as ut


if __name__ == '__main__':
    import sys
    if len(sys.argv) == 2:
        path = sys.argv[1]
    else:
        path = None
    ut.assertpath(path)
    if ut.checkpath(path, verbose=True):
        ut.view_directory(path)
    #F:\\data\\work\\PZ_MTEST\\_ibsdb\\
Esempio n. 12
0
def vdd(ibs=None, qaid_list=None):
    utool.view_directory(ibs.get_dbdir())
Esempio n. 13
0
 def dev_dumpdb(back):
     """ Help -> Developer Mode"""
     print('[back] dev_dumpdb')
     back.ibs.db.dump()
     utool.view_directory(back.ibs._ibsdb)
     back.ibs.db.dump_tables_to_csv()
Esempio n. 14
0
def view_model_dir(ibs):
    logger.info('[other.detectcore] redownload_detection_models')
    modeldir = ibs.get_detect_modeldir()
    ut.view_directory(modeldir)
Esempio n. 15
0
 def view_app_files_dir(back):
     print('[back] view_model_dir')
     utool.view_directory(utool.get_app_resource_dir('ibeis'))
     pass
Esempio n. 16
0
def netrun():
    r"""
    CommandLine:
        # --- UTILITY
        python -m ibeis_cnn --tf get_juction_dpath --show

        # --- DATASET BUILDING ---
        # Build Dataset Aliases
        python -m ibeis_cnn --tf netrun --db PZ_MTEST --acfg ctrl --ensuredata --show
        python -m ibeis_cnn --tf netrun --db PZ_Master1 --acfg timectrl --ensuredata
        python -m ibeis_cnn --tf netrun --db PZ_Master1 --acfg timectrl:pername=None --ensuredata
        python -m ibeis_cnn --tf netrun --db PZ_Master1 --acfg timectrl:pername=None --ensuredata
        python -m ibeis_cnn --tf netrun --db mnist --ensuredata --show
        python -m ibeis_cnn --tf netrun --db mnist --ensuredata --show --datatype=category
        python -m ibeis_cnn --tf netrun --db mnist --ensuredata --show --datatype=siam-patch

        python -m ibeis_cnn --tf netrun --db PZ_Master1 --acfg ctrl:pername=None,excluderef=False,contributor_contains=FlankHack --ensuredata --show --datatype=siam-part

        # Parts based datasets
        python -m ibeis_cnn --tf netrun --db PZ_MTEST --acfg ctrl --datatype=siam-part --ensuredata --show

        % Patch based dataset (big one)
        python -m ibeis_cnn --tf netrun --db PZ_Master1 --acfg default:is_known=True,qmin_pername=2,view=primary,species=primary,minqual=ok --ensuredata --show --vtd
        python -m ibeis_cnn --tf netrun --ds pzm4 --weights=new --arch=siaml2_128 --train --monitor
        python -m ibeis_cnn --tf netrun --ds pzm4 --arch=siaml2_128 --test
        python -m ibeis_cnn --tf netrun --ds pzm4 --arch=siaml2_128 --veryverbose --no-flask

        # --- TRAINING ---
        python -m ibeis_cnn --tf netrun --db PZ_Master1 --acfg default:is_known=True,qmin_pername=2,view=primary,species=primary,minqual=ok --weights=new --arch=siaml2_128 --train --monitor

        python -m ibeis_cnn --tf netrun --ds timectrl_pzmaster1 --acfg ctrl:pername=None,excluderef=False,contributor_contains=FlankHack --train --weights=new --arch=siaml2_128  --monitor  # NOQA
        python -m ibeis_cnn --tf netrun --ds timectrl_pzmaster1 --acfg ctrl:pername=None,excluderef=False --train --weights=new --arch=siaml2_128  --monitor  # NOQA
        python -m ibeis_cnn --tf netrun --ds pzmtest --weights=new --arch=siaml2_128 --train --monitor --DEBUG_AUGMENTATION
        python -m ibeis_cnn --tf netrun --ds pzmtest --weights=new --arch=siaml2_128 --train --monitor

        python -m ibeis_cnn --tf netrun --ds flankhack --weights=new --arch=siaml2_partmatch --train --monitor --learning_rate=.00001
        python -m ibeis_cnn --tf netrun --ds flankhack --weights=new --arch=siam_deepfaceish --train --monitor --learning_rate=.00001

        # Different ways to train mnist
        python -m ibeis_cnn --tf netrun --db mnist --weights=new --arch=mnist_siaml2 --train --monitor --datatype=siam-patch
        python -m ibeis_cnn --tf netrun --db mnist --weights=new --arch=mnist-category --train --monitor --datatype=category

        # --- INITIALIZED-TRAINING ---
        python -m ibeis_cnn --tf netrun --ds pzmtest --arch=siaml2_128 --weights=gz-gray:current --train --monitor

        # --- TESTING ---
        python -m ibeis_cnn --tf netrun --db liberty --weights=liberty:current --arch=siaml2_128 --test
        python -m ibeis_cnn --tf netrun --db PZ_Master0 --weights=combo:current --arch=siaml2_128 --testall

    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis_cnn.netrun import *  # NOQA
        >>> netrun()
        >>> ut.show_if_requested()
    """
    ut.colorprint('[netrun] NET RUN', 'red')

    requests, hyperparams, tags = parse_args()
    ds_tag         = tags['ds_tag']
    datatype       = tags['datatype']
    extern_ds_tag  = tags['extern_ds_tag']
    arch_tag       = tags['arch_tag']
    checkpoint_tag = tags['checkpoint_tag']

    # ----------------------------
    # Choose the main dataset
    ut.colorprint('[netrun] Ensuring Dataset', 'yellow')
    dataset = ingest_data.grab_dataset(ds_tag, datatype)
    if extern_ds_tag is not None:
        extern_dpath = ingest_data.get_extern_training_dpath(extern_ds_tag)
    else:
        extern_dpath = None

    print('dataset.training_dpath = %r' % (dataset.training_dpath,))

    print('Dataset Alias Key: %r' % (dataset.alias_key,))
    print('Current Dataset Tag: %r' % (
        ut.invert_dict(DS_TAG_ALIAS2).get(dataset.alias_key, None),))

    if requests['ensuredata']:
        # Print alias key that maps to this particular dataset
        if ut.show_was_requested():
            interact_ = dataset.interact()  # NOQA
            return
        print('...exiting')
        sys.exit(1)

    # ----------------------------
    # Choose model architecture
    # TODO: data will need to return info about number of labels in viewpoint models
    # Specify model archichitecture
    ut.colorprint('[netrun] Architecture Specification', 'yellow')
    if arch_tag == 'siam2stream':
        model = models.SiameseCenterSurroundModel(
            data_shape=dataset.data_shape,
            training_dpath=dataset.training_dpath, **hyperparams)
    elif arch_tag.startswith('siam'):
        model = models.SiameseL2(
            data_shape=dataset.data_shape,
            arch_tag=arch_tag,
            training_dpath=dataset.training_dpath, **hyperparams)
    elif arch_tag == 'mnist-category':
        model = models.MNISTModel(
            data_shape=dataset.data_shape,
            output_dims=dataset.output_dims,
            arch_tag=arch_tag,
            training_dpath=dataset.training_dpath, **hyperparams)
        pass
    else:
        raise ValueError('Unknown arch_tag=%r' % (arch_tag,))

    ut.colorprint('[netrun] Initialize archchitecture', 'yellow')
    model.init_arch()

    # ----------------------------
    # Choose weight initialization
    ut.colorprint('[netrun] Setting weights', 'yellow')
    if checkpoint_tag == 'new':
        ut.colorprint('[netrun] * Initializing new weights', 'lightgray')
        model.reinit_weights()
    else:
        checkpoint_tag = model.resolve_fuzzy_checkpoint_pattern(
            checkpoint_tag, extern_dpath)
        ut.colorprint('[netrun] * Resolving weights checkpoint_tag=%r' %
                      (checkpoint_tag,), 'lightgray')
        if extern_dpath is not None:
            model.load_extern_weights(dpath=extern_dpath,
                                      checkpoint_tag=checkpoint_tag)
        elif model.has_saved_state(checkpoint_tag=checkpoint_tag):
            model.load_model_state(checkpoint_tag=checkpoint_tag)
        else:
            model_state_fpath = model.get_model_state_fpath(
                checkpoint_tag=checkpoint_tag)
            print('model_state_fpath = %r' % (model_state_fpath,))
            ut.checkpath(model_state_fpath, verbose=True)
            print('Known checkpoints are: ' + ut.repr3(model.list_saved_checkpoints()))
            raise ValueError(('Unresolved weight init: '
                              'checkpoint_tag=%r, extern_ds_tag=%r') % (
                                  checkpoint_tag, extern_ds_tag,))

    #print('Model State:')
    #print(model.get_state_str())
    # ----------------------------
    if not model.is_train_state_initialized():
        ut.colorprint('[netrun] Need to initialize training state', 'yellow')
        X_train, y_train = dataset.subset('train')
        model.ensure_data_params(X_train, y_train)

    # Run Actions
    if requests['train']:
        ut.colorprint('[netrun] Training Requested', 'yellow')
        # parse training arguments
        config = ut.argparse_dict(dict(
            era_size=15,
            max_epochs=1200,
            rate_decay=.8,
        ))
        model.monitor_config.update(**config)
        X_train, y_train = dataset.subset('train')
        X_valid, y_valid = dataset.subset('valid')
        model.fit(X_train, y_train, X_valid=X_valid, y_valid=y_valid)

    elif requests['test']:
        #assert model.best_results['epoch'] is not None
        ut.colorprint('[netrun] Test Requested', 'yellow')
        if requests['testall']:
            ut.colorprint('[netrun]  * Testing on all data', 'lightgray')
            X_test, y_test = dataset.subset('all')
            flat_metadata = dataset.subset_metadata('all')
        else:
            ut.colorprint('[netrun]  * Testing on test subset', 'lightgray')
            X_test, y_test = dataset.subset('test')
            flat_metadata = dataset.subset_metadata('test')
        data, labels = X_test, y_test
        dataname = dataset.alias_key
        experiments.test_siamese_performance(model, data, labels,
                                             flat_metadata, dataname)
    else:
        if not ut.get_argflag('--cmd'):
            raise ValueError('nothing here. need to train or test')

    if requests['publish']:
        ut.colorprint('[netrun] Publish Requested', 'yellow')
        publish_dpath = ut.truepath('~/Dropbox/IBEIS')
        published_model_state = ut.unixjoin(
            publish_dpath, model.arch_tag + '_model_state.pkl')
        ut.copy(model.get_model_state_fpath(), published_model_state)
        ut.view_directory(publish_dpath)
        print('You need to get the dropbox link and '
              'register it into the appropriate file')
        # pip install dropbox
        # https://www.dropbox.com/developers/core/start/python
        # import dropbox  # need oauth
        #client.share('/myfile.txt', short_url=False)
        # https://cthulhu.dyn.wildme.io/public/models/siaml2_128_model_state.pkl

    if ut.get_argflag('--cmd'):
        ut.embed()
Esempio n. 17
0
def vdd(ibs=None, qaid_list=None):
    utool.view_directory(ibs.get_dbdir())
Esempio n. 18
0
def draw_results(ibs, testres):
    r"""
    Draws results from an experiment harness run.
    Rows store different qaids (query annotation ids)
    Cols store different configurations (algorithm parameters)

    Args:
        testres (TestResult):

    CommandLine:
        python dev.py -t custom:rrvsone_on=True,constrained_coeff=0 custom --qaid 12 --db PZ_MTEST --show --va
        python dev.py -t custom:rrvsone_on=True,constrained_coeff=.3 custom --qaid 12 --db PZ_MTEST --show --va --noqcache
        python dev.py -t custom:rrvsone_on=True custom --qaid 4 --db PZ_MTEST --show --va --noqcache
        python dev.py -t custom:rrvsone_on=True,grid_scale_factor=1 custom --qaid 12 --db PZ_MTEST --show --va --noqcache
        python dev.py -t custom:rrvsone_on=True,grid_scale_factor=1,grid_steps=1 custom --qaid 12 --db PZ_MTEST --show --va --noqcache

    CommandLine:
        python dev.py -t best --db seals2 --allgt --vz --fig-dname query_analysis_easy --show
        python dev.py -t best --db seals2 --allgt --vh --fig-dname query_analysis_hard --show

        python dev.py -t pyrscale --db PZ_MTEST --allgt --vn --fig-dname query_analysis_interesting --show
        python dev.py -t pyrscale --db testdb3 --allgt --vn --fig-dname query_analysis_interesting --vf
        python dev.py -t pyrscale --db testdb3 --allgt --vn --fig-dname query_analysis_interesting --vf --quality

        python -m ibeis.expt.experiment_drawing --test-draw_results --show --vn
        python -m ibeis.expt.experiment_drawing --test-draw_results --show --vn --db PZ_MTEST
        python -m ibeis.expt.old_storage --test-draw_results --show --db PZ_MTEST --gv

    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis.expt.old_storage import *  # NOQA
        >>> from ibeis.init import main_helpers
        >>> ibs, testres = main_helpers.testdata_expts('PZ_MTEST')
        >>> result = draw_results(ibs, testres)
        >>> # verify results
        >>> print(result)
    """
    print(' --- DRAW RESULTS ---')

    # It is very inefficient to turn off caching when view_all is true

    figdir = ibs.get_fig_dir()
    ut.ensuredir(figdir)

    if ut.get_argflag(('--view-fig-directory', '--vf')):
        ut.view_directory(figdir)

    figdir_suffix = ut.get_argval('--fig-dname', type_=str, default=None)
    from os.path import join
    if figdir_suffix is not None:
        figdir = join(figdir, figdir_suffix)
        ut.ensuredir(figdir)
    #gx2_gt_timedelta
    #    cfgres_info['qx2_gf_timedelta'] = qx2_gf_timedelta

    metadata_fpath = join(figdir, 'result_metadata.shelf')
    metadata = ResultMetadata(metadata_fpath)
    #metadata.rrr()
    metadata.connect()
    metadata.sync_test_results(testres)
    #cfgstr = qreq_.get_cfgstr()
    #cfg_metadata = ensure_item(metadata, cfgstr, {})
    #avuuids = ibs.get_annot_visual_uuids(qaids)
    #avuuid2_ax = ensure_item(cfg_metadata, 'avuuid2_ax', {})
    #cfg_columns = ensure_item(cfg_metadata, 'columns', {})
    #import guitool

    # ut.argv_flag_dec(draw_rank_cdf)(ibs, testres)

    # VIZ_INDIVIDUAL_RESULTS = True
    # if VIZ_INDIVIDUAL_RESULTS:
    #     draw_match_cases(ibs, testres, metadata=metadata)

    metadata.write()
    if ut.get_argflag(('--guiview', '--gv')):
        import guitool
        guitool.ensure_qapp()
        #wgt = make_test_result_custom_api(ibs, testres)
        wgt = make_metadata_custom_api(metadata)
        wgt.show()
        wgt.raise_()
        guitool.qtapp_loop(wgt, frequency=100)
    metadata.close()

    if ut.NOT_QUIET:
        print('[DRAW_RESULT] EXIT EXPERIMENT HARNESS')
Esempio n. 19
0
def view_global_cache_dir(appname='default'):
    import utool
    dir_ = utool.get_global_cache_dir(appname=appname)
    utool.view_directory(dir_)
Esempio n. 20
0
def vizualize_vocabulary(ibs, invindex):
    """
    cleaned up version of dump_word_patches. Makes idf scatter plots and dumps
    the patches that contributed to each word.

    CommandLine:
        python -m ibeis.algo.hots.smk.smk_plots --test-vizualize_vocabulary
        python -m ibeis.algo.hots.smk.smk_plots --test-vizualize_vocabulary --vf

    Example:
        >>> from ibeis.algo.hots.smk.smk_plots import *  # NOQA
        >>> from ibeis.algo.hots.smk import smk_debug
        >>> from ibeis.algo.hots.smk import smk_repr
        >>> #tup = smk_debug.testdata_raw_internals0(db='GZ_ALL', nWords=64000)
        >>> #tup = smk_debug.testdata_raw_internals0(db='GZ_ALL', nWords=8000)
        >>> tup = smk_debug.testdata_raw_internals0(db='PZ_Master0', nWords=64000)
        >>> #tup = smk_debug.testdata_raw_internals0(db='PZ_Mothers', nWords=8000)
        >>> ibs, annots_df, daids, qaids, invindex, qreq_ = tup
        >>> smk_repr.compute_data_internals_(invindex, qreq_.qparams, delete_rawvecs=False)
        >>> vizualize_vocabulary(ibs, invindex)
    """
    invindex.idx2_wxs = np.array(invindex.idx2_wxs)

    print('[smk_plots] Vizualizing vocabulary')

    # DUMPING PART --- dumps patches to disk
    figdir = ibs.get_fig_dir()
    ut.ensuredir(figdir)
    if ut.get_argflag('--vf'):
        ut.view_directory(figdir)

    # Compute Word Statistics
    metrics = compute_word_metrics(invindex)
    wx2_nMembers, wx2_pdist_stats, wx2_wdist_stats = metrics
    #(wx2_pdist, wx2_wdist, wx2_nMembers, wx2_pdist_stats, wx2_wdist_stats) = metrics

    #wx2_prad = {wx: pdist_stats['max'] for wx, pdist_stats in six.iteritems(wx2_pdist_stats) if 'max' in pdist_stats}
    #wx2_wrad = {wx: wdist_stats['max'] for wx, wdist_stats in six.iteritems(wx2_wdist_stats) if 'max' in wdist_stats}

    wx2_prad = {
        wx: stats['max']
        for wx, stats in wx2_pdist_stats.items() if 'max' in stats
    }
    wx2_wrad = {
        wx: stats['max']
        for wx, stats in wx2_wdist_stats.items() if 'max' in stats
    }
    #wx2_prad = get_metric(metrics, 'wx2_pdist_stats', 'max')
    #wx2_wrad = get_metric(metrics, 'wx2_wdist_stats', 'max')

    wx_sample1 = select_by_metric(wx2_nMembers)
    wx_sample2 = select_by_metric(wx2_prad)
    wx_sample3 = select_by_metric(wx2_wrad)

    wx_sample = wx_sample1 + wx_sample2 + wx_sample3
    overlap123 = len(wx_sample) - len(set(wx_sample))
    print('overlap123 = %r' % overlap123)
    wx_sample = set(wx_sample)
    print('len(wx_sample) = %r' % len(wx_sample))

    #make_scatterplots(ibs, figdir, invindex, metrics)

    vocabdir = join(figdir, 'vocab_patches2')
    wx2_dpath = get_word_dpaths(vocabdir, wx_sample, metrics)

    make_wordfigures(ibs, metrics, invindex, figdir, wx_sample, wx2_dpath)
Esempio n. 21
0
def draw_results(ibs, testres):
    r"""
    Draws results from an experiment harness run.
    Rows store different qaids (query annotation ids)
    Cols store different configurations (algorithm parameters)

    Args:
        testres (TestResult):

    CommandLine:
        python dev.py -t custom:rrvsone_on=True,constrained_coeff=0 custom --qaid 12 --db PZ_MTEST --show --va
        python dev.py -t custom:rrvsone_on=True,constrained_coeff=.3 custom --qaid 12 --db PZ_MTEST --show --va --noqcache
        python dev.py -t custom:rrvsone_on=True custom --qaid 4 --db PZ_MTEST --show --va --noqcache
        python dev.py -t custom:rrvsone_on=True,grid_scale_factor=1 custom --qaid 12 --db PZ_MTEST --show --va --noqcache
        python dev.py -t custom:rrvsone_on=True,grid_scale_factor=1,grid_steps=1 custom --qaid 12 --db PZ_MTEST --show --va --noqcache

    CommandLine:
        python dev.py -t best --db seals2 --allgt --vz --fig-dname query_analysis_easy --show
        python dev.py -t best --db seals2 --allgt --vh --fig-dname query_analysis_hard --show

        python dev.py -t pyrscale --db PZ_MTEST --allgt --vn --fig-dname query_analysis_interesting --show
        python dev.py -t pyrscale --db testdb3 --allgt --vn --fig-dname query_analysis_interesting --vf
        python dev.py -t pyrscale --db testdb3 --allgt --vn --fig-dname query_analysis_interesting --vf --quality

        python -m ibeis.expt.experiment_drawing --test-draw_results --show --vn
        python -m ibeis.expt.experiment_drawing --test-draw_results --show --vn --db PZ_MTEST
        python -m ibeis.expt.old_storage --test-draw_results --show --db PZ_MTEST --gv

    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis.expt.old_storage import *  # NOQA
        >>> from ibeis.init import main_helpers
        >>> ibs, testres = main_helpers.testdata_expts('PZ_MTEST')
        >>> result = draw_results(ibs, testres)
        >>> # verify results
        >>> print(result)
    """
    print(' --- DRAW RESULTS ---')

    # It is very inefficient to turn off caching when view_all is true

    figdir = ibs.get_fig_dir()
    ut.ensuredir(figdir)

    if ut.get_argflag(('--view-fig-directory', '--vf')):
        ut.view_directory(figdir)

    figdir_suffix = ut.get_argval('--fig-dname', type_=str, default=None)
    from os.path import join
    if figdir_suffix is not None:
        figdir = join(figdir, figdir_suffix)
        ut.ensuredir(figdir)
    #gx2_gt_timedelta
    #    cfgres_info['qx2_gf_timedelta'] = qx2_gf_timedelta

    metadata_fpath = join(figdir, 'result_metadata.shelf')
    metadata = ResultMetadata(metadata_fpath)
    #metadata.rrr()
    metadata.connect()
    metadata.sync_test_results(testres)
    #cfgstr = qreq_.get_cfgstr()
    #cfg_metadata = ensure_item(metadata, cfgstr, {})
    #avuuids = ibs.get_annot_visual_uuids(qaids)
    #avuuid2_ax = ensure_item(cfg_metadata, 'avuuid2_ax', {})
    #cfg_columns = ensure_item(cfg_metadata, 'columns', {})
    #import guitool

    # ut.argv_flag_dec(draw_rank_cdf)(ibs, testres)

    # VIZ_INDIVIDUAL_RESULTS = True
    # if VIZ_INDIVIDUAL_RESULTS:
    #     draw_match_cases(ibs, testres, metadata=metadata)

    metadata.write()
    if ut.get_argflag(('--guiview', '--gv')):
        import guitool
        guitool.ensure_qapp()
        #wgt = make_test_result_custom_api(ibs, testres)
        wgt = make_metadata_custom_api(metadata)
        wgt.show()
        wgt.raise_()
        guitool.qtapp_loop(wgt, frequency=100)
    metadata.close()

    if ut.NOT_QUIET:
        print('[DRAW_RESULT] EXIT EXPERIMENT HARNESS')