Exemple #1
0
def testdata_showchip():
    import ibeis
    ibs = ibeis.opendb(defaultdb='PZ_MTEST')
    aid_list = ut.get_argval(('--aids', '--aid'), type_=list, default=None)
    if aid_list is None:
        aid_list = ibs.get_valid_aids()[0:4]
    weight_label = ut.get_argval('--weight_label',
                                 type_=str,
                                 default='fg_weights')
    annote = not ut.get_argflag('--no-annote')
    kwargs = dict(ori=ut.get_argflag('--ori'),
                  weight_label=weight_label,
                  annote=annote)
    kwargs['notitle'] = ut.get_argflag('--notitle')
    kwargs['pts'] = ut.get_argflag('--drawpts')
    kwargs['ell'] = ut.get_argflag('--drawell')
    kwargs['ell_alpha'] = ut.get_argval('--ellalpha', default=.4)
    kwargs['ell_linewidth'] = ut.get_argval('--ell_linewidth', default=2)
    ut.print_dict(kwargs)
    default_config = dict(
        ibeis.algo.Config.FeatureWeightConfig().parse_items())
    cfgdict = ut.argparse_dict(default_config)
    print('[viz_chip.testdata] cfgdict = %r' % (cfgdict, ))
    config2_ = ibs.new_query_params(cfgdict=cfgdict)
    print('query_cfgstr = ' + config2_.query_cfgstr)
    print('feat_cfgstr = ' + config2_.feat_cfgstr)
    print('[viz_chip.testdata] aid_list = %r' % (aid_list, ))
    return ibs, aid_list, kwargs, config2_
Exemple #2
0
def testdata_showchip():
    import wbia

    ibs = wbia.opendb(defaultdb='PZ_MTEST')
    aid_list = ut.get_argval(('--aids', '--aid'), type_=list, default=None)
    if aid_list is None:
        aid_list = ibs.get_valid_aids()[0:4]
    weight_label = ut.get_argval('--weight_label',
                                 type_=str,
                                 default='fg_weights')
    annote = not ut.get_argflag('--no-annote')
    kwargs = dict(ori=ut.get_argflag('--ori'),
                  weight_label=weight_label,
                  annote=annote)
    kwargs['notitle'] = ut.get_argflag('--notitle')
    kwargs['pts'] = ut.get_argflag('--drawpts')
    kwargs['ell'] = True or ut.get_argflag('--drawell')
    kwargs['ell_alpha'] = ut.get_argval('--ellalpha', default=0.4)
    kwargs['ell_linewidth'] = ut.get_argval('--ell_linewidth', default=2)
    kwargs['draw_lbls'] = ut.get_argval('--draw_lbls', default=True)
    logger.info('kwargs = ' + ut.repr4(kwargs, nl=True))
    default_config = dict(wbia.algo.Config.FeatureWeightConfig().parse_items())
    cfgdict = ut.argparse_dict(default_config)
    logger.info('[viz_chip.testdata] cfgdict = %r' % (cfgdict, ))
    config2_ = cfgdict
    logger.info('[viz_chip.testdata] aid_list = %r' % (aid_list, ))
    return ibs, aid_list, kwargs, config2_
Exemple #3
0
def detect_feats_main():
    import pyhesaff
    from pyhesaff._pyhesaff import grab_test_imgpath
    from pyhesaff._pyhesaff import argparse_hesaff_params
    import cv2
    import ubelt as ub

    img_fpath = grab_test_imgpath(ub.argval('--fname', default='astro.png'))
    kwargs = argparse_hesaff_params()
    print('kwargs = %r' % (kwargs, ))

    (kpts, vecs) = pyhesaff.detect_feats(img_fpath, **kwargs)

    if ub.argflag('--show'):
        # Show keypoints
        imgBGR = cv2.imread(img_fpath)
        default_showkw = dict(ori=False,
                              ell=True,
                              ell_linewidth=2,
                              ell_alpha=.4,
                              ell_color='distinct')
        print('default_showkw = %r' % (default_showkw, ))
        import utool as ut
        showkw = ut.argparse_dict(default_showkw)
        import plottool as pt
        pt.interact_keypoints.ishow_keypoints(imgBGR, kpts, vecs, **showkw)
        pt.show_if_requested()
Exemple #4
0
def argparse_hesaff_params():
    alias_dict = {'affine_invariance': 'ai'}
    alias_dict = {'rotation_invariance': 'ri'}
    default_dict_ = get_hesaff_default_params()
    try:
        import utool as ut
        hesskw = ut.argparse_dict(default_dict_, alias_dict=alias_dict)
    except Exception as ex:
        print('ex = {!r}'.format(ex))
        return default_dict_
    return hesskw
def testdata_matching_affine_inliers():
    import vtool.tests.dummy as dummy
    import vtool as vt
    scale_thresh = 2.0
    xy_thresh = ut.get_argval('--xy-thresh', type_=float, default=.01)
    dlen_sqrd2 = 447271.015
    ori_thresh = 1.57
    xy_thresh_sqrd = dlen_sqrd2 * xy_thresh
    featkw = ut.argparse_dict(vt.get_extract_features_default_params())
    fname1 = ut.get_argval('--fname1', type_=str, default='easy1.png')
    fname2 = ut.get_argval('--fname2', type_=str, default='easy2.png')
    (kpts1, kpts2, fm, fs, rchip1, rchip2) = dummy.testdata_ratio_matches(fname1, fname2, **featkw)
    aff_inliers, aff_errors, Aff = get_best_affine_inliers_(
        kpts1, kpts2, fm, fs, xy_thresh_sqrd, scale_thresh, ori_thresh)
    return kpts1, kpts2, fm, aff_inliers, rchip1, rchip2, xy_thresh_sqrd
def testdata_matching_affine_inliers():
    import vtool.tests.dummy as dummy
    import vtool as vt
    scale_thresh = 2.0
    xy_thresh = ut.get_argval('--xy-thresh', type_=float, default=.01)
    dlen_sqrd2 = 447271.015
    ori_thresh = 1.57
    xy_thresh_sqrd = dlen_sqrd2 * xy_thresh
    featkw = ut.argparse_dict(vt.get_extract_features_default_params())
    fname1 = ut.get_argval('--fname1', type_=str, default='easy1.png')
    fname2 = ut.get_argval('--fname2', type_=str, default='easy2.png')
    (kpts1, kpts2, fm, fs, rchip1,
     rchip2) = dummy.testdata_ratio_matches(fname1, fname2, **featkw)
    aff_inliers, aff_errors, Aff = get_best_affine_inliers_(
        kpts1, kpts2, fm, fs, xy_thresh_sqrd, scale_thresh, ori_thresh)
    return kpts1, kpts2, fm, aff_inliers, rchip1, rchip2, xy_thresh_sqrd
Exemple #7
0
def testdata_showchip():
    import ibeis
    ibs = ibeis.opendb(defaultdb='PZ_MTEST')
    aid_list = ut.get_argval(('--aids', '--aid'), type_=list, default=None)
    if aid_list is None:
        aid_list = ibs.get_valid_aids()[0:4]
    weight_label = ut.get_argval('--weight_label', type_=str, default='fg_weights')
    annote = not ut.get_argflag('--no-annote')
    kwargs = dict(ori=ut.get_argflag('--ori'), weight_label=weight_label, annote=annote)
    kwargs['notitle'] = ut.get_argflag('--notitle')
    kwargs['pts'] = ut.get_argflag('--drawpts')
    kwargs['ell'] = ut.get_argflag('--drawell')
    kwargs['ell_alpha'] = ut.get_argval('--ellalpha', default=.4)
    kwargs['ell_linewidth'] = ut.get_argval('--ell_linewidth', default=2)
    ut.print_dict(kwargs)
    default_config = dict(ibeis.algo.Config.FeatureWeightConfig().parse_items())
    cfgdict = ut.argparse_dict(default_config)
    print('[viz_chip.testdata] cfgdict = %r' % (cfgdict,))
    config2_ = ibs.new_query_params(cfgdict=cfgdict)
    print('query_cfgstr = ' + config2_.query_cfgstr)
    print('feat_cfgstr = ' + config2_.feat_cfgstr)
    print('[viz_chip.testdata] aid_list = %r' % (aid_list,))
    return ibs, aid_list, kwargs, config2_
Exemple #8
0
def print_acfg_list(acfg_list,
                    expanded_aids_list=None,
                    ibs=None,
                    combined=False,
                    **kwargs):
    r"""
    Args:
        acfg_list (list):
        expanded_aids_list (list): (default = None)
        ibs (IBEISController):  ibeis controller object(default = None)
        combined (bool): (default = False)

    CommandLine:
        python -m ibeis.expt.annotation_configs --exec-print_acfg_list --show

    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis.expt.annotation_configs import *  # NOQA
        >>> import ibeis
        >>> acfg_list = '?'
        >>> expanded_aids_list = None
        >>> ibs = None
        >>> combined = False
        >>> result = print_acfg_list(acfg_list, expanded_aids_list, ibs, combined)
        >>> print(result)
        >>> ut.quit_if_noshow()
        >>> import plottool as pt
        >>> ut.show_if_requested()
    """
    _tup = compress_acfg_list_for_printing(acfg_list)
    nonvaried_compressed_dict, varied_compressed_dict_list = _tup

    ut.colorprint('+=== <Info acfg_list> ===', 'white')
    #print('Printing acfg_list info. len(acfg_list) = %r' % (len(acfg_list),))
    print('non-varied aidcfg = ' + ut.dict_str(nonvaried_compressed_dict))
    seen_ = ut.ddict(list)

    # get default kwkeys for annot info
    if ibs is not None:
        annotstats_kw = kwargs.copy()
        kwkeys = ut.parse_func_kwarg_keys(ibs.get_annot_stats_dict)
        annotstats_kw.update(
            ut.argparse_dict(dict(zip(kwkeys, [None] * len(kwkeys))),
                             only_specified=True))

    hashid_list = []
    for acfgx in range(len(acfg_list)):
        acfg = acfg_list[acfgx]
        title = ('q_cfgname=' + acfg['qcfg']['_cfgname'] + ' d_cfgname=' +
                 acfg['dcfg']['_cfgname'])

        ut.colorprint(
            '+--- acfg %d / %d -- %s ---- ' %
            (acfgx + 1, len(acfg_list), title), 'lightgray')
        print('acfg = ' +
              ut.dict_str(varied_compressed_dict_list[acfgx], strvals=True))

        if expanded_aids_list is not None:
            qaids, daids = expanded_aids_list[acfgx]
            key = (ut.hashstr_arr27(qaids,
                                    'qaids'), ut.hashstr_arr27(daids, 'daids'))
            if key not in seen_:
                if ibs is not None:
                    seen_[key].append(acfgx)
                    stats_, locals_ = ibs.get_annotconfig_stats(
                        qaids,
                        daids,
                        verbose=False,
                        combined=combined,
                        **annotstats_kw)
                    hashids = (stats_['qaid_stats']['qhashid'],
                               stats_['daid_stats']['dhashid'])
                    hashid_list.append(hashids)
                    stats_str2 = ut.dict_str(stats_,
                                             strvals=True,
                                             newlines=True,
                                             explicit=False,
                                             nobraces=False)
                    print('annot_config_stats = ' + stats_str2)
            else:
                dupindex = seen_[key]
                print('DUPLICATE of index %r' % (dupindex, ))
                dupdict = varied_compressed_dict_list[dupindex[0]]
                print('DUP OF acfg = ' + ut.dict_str(dupdict, strvals=True))
    print('hashid summary = ' + ut.list_str(hashid_list, nl=1))
    ut.colorprint('L___ </Info acfg_list> ___', 'white')
Exemple #9
0
def get_ibeis_part_siam_dataset(**kwargs):
    """
    PARTS based network data

    CommandLine:
        python -m ibeis_cnn.ingest_data --test-get_ibeis_part_siam_dataset --show
        python -m ibeis_cnn.ingest_data --test-get_ibeis_part_siam_dataset --show --db PZ_Master1 --acfg_name timectrl
        python -m ibeis_cnn.ingest_data --test-get_ibeis_part_siam_dataset --show --db PZ_MTEST --acfg_name unctrl --dryrun

    Example:
        >>> # ENABLE_DOCTEST
        >>> from ibeis_cnn.ingest_data import *  # NOQA
        >>> from ibeis_cnn import draw_results
        >>> import ibeis
        >>> kwargs = {}  # ut.argparse_dict({'max_examples': None, 'num_top': 3})
        >>> dataset = get_ibeis_part_siam_dataset(**kwargs)
        >>> ut.quit_if_noshow()
        >>> dataset.interact(ibs=dataset.getprop('ibs'))
        >>> ut.show_if_requested()
    """
    import ibeis
    datakw = ut.argparse_dict(
        {
            'colorspace': 'gray',
            'acfg_name': 'ctrl',
            #'db': None,
            'db': 'PZ_MTEST',
        },
        alias_dict={'acfg_name': ['acfg']},
        verbose=True)

    datakw.update(kwargs)
    print('\n\n[get_ibeis_part_siam_dataset] START')

    alias_key = ut.dict_str(datakw, nl=False, explicit=True)

    dbname = datakw.pop('db')

    try:
        if NOCACHE_DATASET:
            raise Exception('forced cache off')
        # Try and short circut cached loading
        dataset = DataSet.from_alias_key(alias_key)
        dataset.setprop('ibs', lambda: ibeis.opendb(db=dbname))
        return dataset
    except Exception as ex:
        ut.printex(ex,
                   'alias definitions have changed. alias_key=%r' %
                   (alias_key, ),
                   iswarning=True)

    with ut.Indenter('[LOAD IBEIS DB]'):
        ibs = ibeis.opendb(db=dbname)

    # Nets dir is the root dir for all training on this data
    training_dpath = ibs.get_neuralnet_dir()
    ut.ensuredir(training_dpath)

    with ut.Indenter('[BuildDS]'):
        # Get training data pairs
        colorspace = datakw.pop('colorspace')
        (aid_pairs, label_list,
         flat_metadata) = ingest_ibeis.get_aidpairs_partmatch(ibs, **datakw)
        # Extract and cache the data, labels, and metadata
        if ut.get_argflag('--dryrun'):
            print('exiting due to dry run')
            import sys
            sys.exit(0)
        tup = ingest_ibeis.cached_part_match_training_data_fpaths(
            ibs, aid_pairs, label_list, flat_metadata, colorspace=colorspace)
        data_fpath, labels_fpath, metadata_fpath, training_dpath, data_shape = tup
        print('\n[get_ibeis_part_siam_dataset] FINISH\n\n')

    # hack for caching num_labels
    labels = ut.load_data(labels_fpath)
    num_labels = len(labels)

    dataset = DataSet.new_training_set(
        alias_key=alias_key,
        data_fpath=data_fpath,
        labels_fpath=labels_fpath,
        metadata_fpath=metadata_fpath,
        training_dpath=training_dpath,
        data_shape=data_shape,
        data_per_label=2,
        output_dims=1,
        num_labels=num_labels,
    )
    dataset.setprop('ibs', ibs)
    return dataset
Exemple #10
0
def get_ibeis_patch_siam_dataset(**kwargs):
    """
    CommandLine:
        python -m ibeis_cnn.ingest_data --test-get_ibeis_patch_siam_dataset --show
        python -m ibeis_cnn.ingest_data --test-get_ibeis_patch_siam_dataset --show --db PZ_Master1 --acfg_name default
        python -m ibeis_cnn.ingest_data --test-get_ibeis_patch_siam_dataset --show --db PZ_Master1 --acfg_name timectrl
        python -m ibeis_cnn.ingest_data --test-get_ibeis_patch_siam_dataset --show --db PZ_MTEST --acfg_name unctrl --dryrun

    Example:
        >>> # ENABLE_DOCTEST
        >>> from ibeis_cnn.ingest_data import *  # NOQA
        >>> from ibeis_cnn import draw_results
        >>> import ibeis
        >>> kwargs = {}  # ut.argparse_dict({'max_examples': None, 'num_top': 3})
        >>> dataset = get_ibeis_patch_siam_dataset(**kwargs)
        >>> ut.quit_if_noshow()
        >>> dataset.interact()
        >>> ut.show_if_requested()
    """
    datakw = ut.argparse_dict(
        {
            #'db': 'PZ_MTEST',
            'max_examples': None,
            #'num_top': 3,
            'num_top': None,
            'min_featweight': .8 if not ut.WIN32 else None,
            'controlled': True,
            'colorspace': 'gray',
            'acfg_name': None,
        },
        alias_dict={'acfg_name': ['acfg', 'a']},
        verbose=True)

    datakw.update(kwargs)

    #ut.get_func_kwargs(ingest_ibeis.get_aidpairs_and_matches)

    if datakw['acfg_name'] is not None:
        del datakw['controlled']
    if datakw['max_examples'] is None:
        del datakw['max_examples']
    if datakw['num_top'] is None:
        del datakw['num_top']

    with ut.Indenter('[LOAD IBEIS DB]'):
        import ibeis
        dbname = ut.get_argval('--db', default='PZ_MTEST')
        ibs = ibeis.opendb(dbname=dbname, defaultdb='PZ_MTEST')

    # Nets dir is the root dir for all training on this data
    training_dpath = ibs.get_neuralnet_dir()
    ut.ensuredir(training_dpath)
    print('\n\n[get_ibeis_patch_siam_dataset] START')
    #log_dir = join(training_dpath, 'logs')
    #ut.start_logging(log_dir=log_dir)

    alias_key = ibs.get_dbname() + ';' + ut.dict_str(
        datakw, nl=False, explicit=True)
    try:
        if NOCACHE_DATASET:
            raise Exception('forced cache off')
        # Try and short circut cached loading
        dataset = DataSet.from_alias_key(alias_key)
        dataset.setprop('ibs', lambda: ibeis.opendb(db=dbname))
        return dataset
    except Exception as ex:
        ut.printex(ex,
                   'alias definitions have changed. alias_key=%r' %
                   (alias_key, ),
                   iswarning=True)

    with ut.Indenter('[BuildDS]'):
        # Get training data pairs
        colorspace = datakw.pop('colorspace')
        patchmatch_tup = ingest_ibeis.get_aidpairs_and_matches(ibs, **datakw)
        aid1_list, aid2_list, kpts1_m_list, kpts2_m_list, fm_list, metadata_lists = patchmatch_tup
        # Extract and cache the data
        # TODO: metadata
        if ut.get_argflag('--dryrun'):
            print('exiting due to dry run')
            import sys
            sys.exit(0)
        tup = ingest_ibeis.cached_patchmetric_training_data_fpaths(
            ibs,
            aid1_list,
            aid2_list,
            kpts1_m_list,
            kpts2_m_list,
            fm_list,
            metadata_lists,
            colorspace=colorspace)
        data_fpath, labels_fpath, metadata_fpath, training_dpath, data_shape = tup
        print('\n[get_ibeis_patch_siam_dataset] FINISH\n\n')

    # hack for caching num_labels
    labels = ut.load_data(labels_fpath)
    num_labels = len(labels)

    dataset = DataSet.new_training_set(
        alias_key=alias_key,
        data_fpath=data_fpath,
        labels_fpath=labels_fpath,
        metadata_fpath=metadata_fpath,
        training_dpath=training_dpath,
        data_shape=data_shape,
        data_per_label=2,
        output_dims=1,
        num_labels=num_labels,
    )
    dataset.setprop('ibs', ibs)
    return dataset
Exemple #11
0
def train_aoi(output_path, data_fpath, labels_fpath):
    r"""
    CommandLine:
        python -m ibeis_cnn.train --test-train_aoi

    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis_cnn.train import *  # NOQA
        >>> result = train_aoi()
        >>> print(result)
    """
    era_size = 256
    batch_size = 16
    max_epochs = era_size * 16
    hyperparams = ut.argparse_dict({
        'era_size': era_size,
        'learning_rate': .01,
        'rate_schedule': 0.75,
        'momentum': .9,
        'weight_decay': 0.0001,
        'augment_on': True,
        'augment_on_validate': True,
        'whiten_on': False,
        'max_epochs': max_epochs,
        'stopping_patience': max_epochs,
        'class_weight': None,
    })

    ut.colorprint('[netrun] Ensuring Dataset', 'yellow')
    dataset = ingest_data.get_numpy_dataset2('aoi', data_fpath, labels_fpath,
                                             output_path)
    X_train, y_train = dataset.subset('train')
    X_valid, y_valid = dataset.subset('valid')
    print('dataset.training_dpath = %r' % (dataset.training_dpath, ))

    input_shape = (
        batch_size,
        dataset.data_shape[0] + 4,
    )
    ut.colorprint('[netrun] Architecture Specification', 'yellow')
    model = AoIModel(input_shape=input_shape,
                     training_dpath=dataset.training_dpath,
                     **hyperparams)

    ut.colorprint('[netrun] Initialize architecture', 'yellow')
    model.output_dims = 1
    model.input_shape = (
        None,
        dataset.data_shape[0] + 4,
    )
    model.batch_size = batch_size
    model.init_arch()

    ut.colorprint('[netrun] * Initializing new weights', 'lightgray')
    if model.has_saved_state():
        model.load_model_state()

    ut.colorprint('[netrun] Training Requested', 'yellow')
    # parse training arguments
    config = ut.argparse_dict(
        dict(
            era_size=era_size,
            max_epochs=max_epochs,
            show_confusion=False,
        ))
    model.monitor_config.update(**config)

    print('\n[netrun] Model Info')
    model.print_layer_info()

    ut.colorprint('[netrun] Begin training', 'yellow')
    model.fit(X_train, y_train, X_valid=X_valid, y_valid=y_valid)

    model_path = model.save_model_state()
    return model_path
Exemple #12
0
def demo_refresh():
    r"""
    CommandLine:
        python -m ibeis.algo.graph.refresh demo_refresh \
                --num_pccs=40 --size=2 --show

    Example:
        >>> # ENABLE_DOCTEST
        >>> from ibeis.algo.graph.refresh import *  # NOQA
        >>> demo_refresh()
        >>> ut.show_if_requested()
    """
    from ibeis.algo.graph import demo
    demokw = ut.argparse_dict({'num_pccs': 50, 'size': 4})
    refreshkw = ut.argparse_funckw(RefreshCriteria)
    # make an inference object
    infr = demo.demodata_infr(size_std=0, **demokw)
    edges = list(infr.dummy_verif.find_candidate_edges(K=100))
    scores = np.array(infr.dummy_verif.predict_edges(edges))
    sortx = scores.argsort()[::-1]
    edges = ut.take(edges, sortx)
    scores = scores[sortx]
    ys = infr.match_state_df(edges)[POSTV].values
    y_remainsum = ys[::-1].cumsum()[::-1]
    # Do oracle reviews and wait to converge
    refresh = RefreshCriteria(**refreshkw)
    xdata = []
    pprob_any = []
    rfrac_any = []
    for count, (edge, y) in enumerate(zip(edges, ys)):
        refresh.add(y, user_id='user:oracle')
        rfrac_any.append(y_remainsum[count] / y_remainsum[0])
        pprob_any.append(refresh.prob_any_remain())
        xdata.append(count + 1)
        if refresh.check():
            break
    xdata = xdata
    ydatas = ut.odict([
        ('Est. probability any remain', pprob_any),
        ('Fraction remaining', rfrac_any),
    ])

    ut.quit_if_noshow()
    import plottool_ibeis as pt
    pt.qtensure()
    from ibeis.scripts.thesis import TMP_RC
    import matplotlib as mpl
    mpl.rcParams.update(TMP_RC)
    pt.multi_plot(
        xdata, ydatas, xlabel='# manual reviews', rcParams=TMP_RC, marker='',
        ylim=(0, 1), use_legend=False,
    )
    demokw = ut.map_keys({'num_pccs': '#PCC', 'size': 'PCC size'},
                         demokw)
    thresh = refreshkw.pop('thresh')
    refreshkw['span'] = refreshkw.pop('window')
    pt.relative_text((.02, .58 + .0), ut.get_cfg_lbl(demokw, sep=' ')[1:],
                     valign='bottom')
    pt.relative_text((.02, .68 + .0), ut.get_cfg_lbl(refreshkw, sep=' ')[1:],
                     valign='bottom')
    legend = pt.gca().legend()
    legend.get_frame().set_alpha(1.0)
    pt.plt.plot([xdata[0], xdata[-1]], [thresh, thresh], 'g--', label='thresh')
Exemple #13
0
def netrun():
    r"""
    CommandLine:
        # --- UTILITY
        python -m ibeis_cnn --tf get_juction_dpath --show

        # --- DATASET BUILDING ---
        # Build Dataset Aliases
        python -m ibeis_cnn --tf netrun --db PZ_MTEST --acfg ctrl --ensuredata --show
        python -m ibeis_cnn --tf netrun --db PZ_Master1 --acfg timectrl --ensuredata
        python -m ibeis_cnn --tf netrun --db PZ_Master1 --acfg timectrl:pername=None --ensuredata
        python -m ibeis_cnn --tf netrun --db PZ_Master1 --acfg timectrl:pername=None --ensuredata
        python -m ibeis_cnn --tf netrun --db mnist --ensuredata --show
        python -m ibeis_cnn --tf netrun --db mnist --ensuredata --show --datatype=category
        python -m ibeis_cnn --tf netrun --db mnist --ensuredata --show --datatype=siam-patch

        python -m ibeis_cnn --tf netrun --db PZ_Master1 --acfg ctrl:pername=None,excluderef=False,contributor_contains=FlankHack --ensuredata --show --datatype=siam-part

        # Parts based datasets
        python -m ibeis_cnn --tf netrun --db PZ_MTEST --acfg ctrl --datatype=siam-part --ensuredata --show

        % Patch based dataset (big one)
        python -m ibeis_cnn --tf netrun --db PZ_Master1 --acfg default:is_known=True,qmin_pername=2,view=primary,species=primary,minqual=ok --ensuredata --show --vtd
        python -m ibeis_cnn --tf netrun --ds pzm4 --weights=new --arch=siaml2_128 --train --monitor
        python -m ibeis_cnn --tf netrun --ds pzm4 --arch=siaml2_128 --test
        python -m ibeis_cnn --tf netrun --ds pzm4 --arch=siaml2_128 --veryverbose --no-flask

        # --- TRAINING ---
        python -m ibeis_cnn --tf netrun --db PZ_Master1 --acfg default:is_known=True,qmin_pername=2,view=primary,species=primary,minqual=ok --weights=new --arch=siaml2_128 --train --monitor

        python -m ibeis_cnn --tf netrun --ds timectrl_pzmaster1 --acfg ctrl:pername=None,excluderef=False,contributor_contains=FlankHack --train --weights=new --arch=siaml2_128  --monitor  # NOQA
        python -m ibeis_cnn --tf netrun --ds timectrl_pzmaster1 --acfg ctrl:pername=None,excluderef=False --train --weights=new --arch=siaml2_128  --monitor  # NOQA
        python -m ibeis_cnn --tf netrun --ds pzmtest --weights=new --arch=siaml2_128 --train --monitor --DEBUG_AUGMENTATION
        python -m ibeis_cnn --tf netrun --ds pzmtest --weights=new --arch=siaml2_128 --train --monitor

        python -m ibeis_cnn --tf netrun --ds flankhack --weights=new --arch=siaml2_partmatch --train --monitor --learning_rate=.00001
        python -m ibeis_cnn --tf netrun --ds flankhack --weights=new --arch=siam_deepfaceish --train --monitor --learning_rate=.00001

        # Different ways to train mnist
        python -m ibeis_cnn --tf netrun --db mnist --weights=new --arch=mnist_siaml2 --train --monitor --datatype=siam-patch
        python -m ibeis_cnn --tf netrun --db mnist --weights=new --arch=mnist-category --train --monitor --datatype=category

        # --- INITIALIZED-TRAINING ---
        python -m ibeis_cnn --tf netrun --ds pzmtest --arch=siaml2_128 --weights=gz-gray:current --train --monitor

        # --- TESTING ---
        python -m ibeis_cnn --tf netrun --db liberty --weights=liberty:current --arch=siaml2_128 --test
        python -m ibeis_cnn --tf netrun --db PZ_Master0 --weights=combo:current --arch=siaml2_128 --testall

    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis_cnn.netrun import *  # NOQA
        >>> netrun()
        >>> ut.show_if_requested()
    """
    ut.colorprint('[netrun] NET RUN', 'red')

    requests, hyperparams, tags = parse_args()
    ds_tag         = tags['ds_tag']
    datatype       = tags['datatype']
    extern_ds_tag  = tags['extern_ds_tag']
    arch_tag       = tags['arch_tag']
    checkpoint_tag = tags['checkpoint_tag']

    # ----------------------------
    # Choose the main dataset
    ut.colorprint('[netrun] Ensuring Dataset', 'yellow')
    dataset = ingest_data.grab_dataset(ds_tag, datatype)
    if extern_ds_tag is not None:
        extern_dpath = ingest_data.get_extern_training_dpath(extern_ds_tag)
    else:
        extern_dpath = None

    print('dataset.training_dpath = %r' % (dataset.training_dpath,))

    print('Dataset Alias Key: %r' % (dataset.alias_key,))
    print('Current Dataset Tag: %r' % (
        ut.invert_dict(DS_TAG_ALIAS2).get(dataset.alias_key, None),))

    if requests['ensuredata']:
        # Print alias key that maps to this particular dataset
        if ut.show_was_requested():
            interact_ = dataset.interact()  # NOQA
            return
        print('...exiting')
        sys.exit(1)

    # ----------------------------
    # Choose model architecture
    # TODO: data will need to return info about number of labels in viewpoint models
    # Specify model archichitecture
    ut.colorprint('[netrun] Architecture Specification', 'yellow')
    if arch_tag == 'siam2stream':
        model = models.SiameseCenterSurroundModel(
            data_shape=dataset.data_shape,
            training_dpath=dataset.training_dpath, **hyperparams)
    elif arch_tag.startswith('siam'):
        model = models.SiameseL2(
            data_shape=dataset.data_shape,
            arch_tag=arch_tag,
            training_dpath=dataset.training_dpath, **hyperparams)
    elif arch_tag == 'mnist-category':
        model = models.MNISTModel(
            data_shape=dataset.data_shape,
            output_dims=dataset.output_dims,
            arch_tag=arch_tag,
            training_dpath=dataset.training_dpath, **hyperparams)
        pass
    else:
        raise ValueError('Unknown arch_tag=%r' % (arch_tag,))

    ut.colorprint('[netrun] Initialize archchitecture', 'yellow')
    model.init_arch()

    # ----------------------------
    # Choose weight initialization
    ut.colorprint('[netrun] Setting weights', 'yellow')
    if checkpoint_tag == 'new':
        ut.colorprint('[netrun] * Initializing new weights', 'lightgray')
        model.reinit_weights()
    else:
        checkpoint_tag = model.resolve_fuzzy_checkpoint_pattern(
            checkpoint_tag, extern_dpath)
        ut.colorprint('[netrun] * Resolving weights checkpoint_tag=%r' %
                      (checkpoint_tag,), 'lightgray')
        if extern_dpath is not None:
            model.load_extern_weights(dpath=extern_dpath,
                                      checkpoint_tag=checkpoint_tag)
        elif model.has_saved_state(checkpoint_tag=checkpoint_tag):
            model.load_model_state(checkpoint_tag=checkpoint_tag)
        else:
            model_state_fpath = model.get_model_state_fpath(
                checkpoint_tag=checkpoint_tag)
            print('model_state_fpath = %r' % (model_state_fpath,))
            ut.checkpath(model_state_fpath, verbose=True)
            print('Known checkpoints are: ' + ut.repr3(model.list_saved_checkpoints()))
            raise ValueError(('Unresolved weight init: '
                              'checkpoint_tag=%r, extern_ds_tag=%r') % (
                                  checkpoint_tag, extern_ds_tag,))

    #print('Model State:')
    #print(model.get_state_str())
    # ----------------------------
    if not model.is_train_state_initialized():
        ut.colorprint('[netrun] Need to initialize training state', 'yellow')
        X_train, y_train = dataset.subset('train')
        model.ensure_data_params(X_train, y_train)

    # Run Actions
    if requests['train']:
        ut.colorprint('[netrun] Training Requested', 'yellow')
        # parse training arguments
        config = ut.argparse_dict(dict(
            era_size=15,
            max_epochs=1200,
            rate_decay=.8,
        ))
        model.monitor_config.update(**config)
        X_train, y_train = dataset.subset('train')
        X_valid, y_valid = dataset.subset('valid')
        model.fit(X_train, y_train, X_valid=X_valid, y_valid=y_valid)

    elif requests['test']:
        #assert model.best_results['epoch'] is not None
        ut.colorprint('[netrun] Test Requested', 'yellow')
        if requests['testall']:
            ut.colorprint('[netrun]  * Testing on all data', 'lightgray')
            X_test, y_test = dataset.subset('all')
            flat_metadata = dataset.subset_metadata('all')
        else:
            ut.colorprint('[netrun]  * Testing on test subset', 'lightgray')
            X_test, y_test = dataset.subset('test')
            flat_metadata = dataset.subset_metadata('test')
        data, labels = X_test, y_test
        dataname = dataset.alias_key
        experiments.test_siamese_performance(model, data, labels,
                                             flat_metadata, dataname)
    else:
        if not ut.get_argflag('--cmd'):
            raise ValueError('nothing here. need to train or test')

    if requests['publish']:
        ut.colorprint('[netrun] Publish Requested', 'yellow')
        publish_dpath = ut.truepath('~/Dropbox/IBEIS')
        published_model_state = ut.unixjoin(
            publish_dpath, model.arch_tag + '_model_state.pkl')
        ut.copy(model.get_model_state_fpath(), published_model_state)
        ut.view_directory(publish_dpath)
        print('You need to get the dropbox link and '
              'register it into the appropriate file')
        # pip install dropbox
        # https://www.dropbox.com/developers/core/start/python
        # import dropbox  # need oauth
        #client.share('/myfile.txt', short_url=False)
        # https://cthulhu.dyn.wildme.io/public/models/siaml2_128_model_state.pkl

    if ut.get_argflag('--cmd'):
        ut.embed()
Exemple #14
0
def parse_args():
    ds_default = None
    arch_default = 'siaml2_128'
    weights_tag_default = None
    # Test values
    if False:
        ds_default = 'liberty'
        weights_tag_default = 'current'
        assert ut.inIPython()

    # Parse commandline args
    ds_tag      = ut.get_argval(('--dataset', '--ds'), type_=str,
                                default=ds_default)
    datatype    = ut.get_argval(('--datatype', '--dt'), type_=str,
                                default='siam-patch')
    arch_tag    = ut.get_argval(('--arch', '-a'), default=arch_default)
    weights_tag = ut.get_argval(('--weights', '+w'), type_=str,
                                default=weights_tag_default)

    # Incorporate new config stuff?
    #NEW = False
    #if NEW:
    #    default_dstag_cfg = {
    #        'ds': 'PZ_MTEST',
    #        'mode': 'patches',
    #        'arch': arch_default
    #    }
    #    named_defaults_dict = {
    #        '': default_dstag_cfg
    #    }
    #    ut.parse_argv_cfg('dstag', named_defaults_dict=named_defaults_dict)

    hyperparams = ut.argparse_dict(
        {
            #'batch_size': 128,
            'batch_size': 256,
            #'learning_rate': .0005,
            'learning_rate': .1,
            'momentum': .9,
            #'weight_decay': 0.0005,
            'weight_decay': 0.0001,
        },
        alias_dict={
            'weight_decay': ['decay'],
            'learning_rate': ['learn_rate'],
        }
    )
    requests = ut.argparse_dict(
        {
            'train': False,
            'test': False,
            'testall': False,
            'publish': False,
            'ensuredata': False,
        }
    )
    requests['test'] = requests['test'] or requests['testall']

    # breakup weights tag into extern_ds and checkpoint
    if weights_tag is not None and ':' in weights_tag:
        extern_ds_tag, checkpoint_tag = weights_tag.split(':')
    else:
        extern_ds_tag = None
        checkpoint_tag = weights_tag
    # resolve aliases
    ds_tag = DS_TAG_ALIAS2.get(ds_tag, ds_tag)
    extern_ds_tag = DS_TAG_ALIAS2.get(extern_ds_tag, extern_ds_tag)
    checkpoint_tag = CHECKPOINT_TAG_ALIAS.get(checkpoint_tag, checkpoint_tag)
    tags = {
        'ds_tag': ds_tag,
        'extern_ds_tag': extern_ds_tag,
        'checkpoint_tag': checkpoint_tag,
        'arch_tag': arch_tag,
        'datatype': datatype,
    }
    ut.colorprint('[netrun] * ds_tag=%r' % (ds_tag,), 'lightgray')
    ut.colorprint('[netrun] * arch_tag=%r' % (arch_tag,), 'lightgray')
    ut.colorprint('[netrun] * extern_ds_tag=%r' % (extern_ds_tag,), 'lightgray')
    ut.colorprint('[netrun] * checkpoint_tag=%r' % (checkpoint_tag,), 'lightgray')
    return requests, hyperparams, tags
Exemple #15
0
def argparse_hesaff_params():
    alias_dict = {'affine_invariance': 'ai'}
    alias_dict = {'rotation_invariance': 'ri'}
    hesskw = ut.argparse_dict(get_hesaff_default_params(),
                               alias_dict=alias_dict)
    return hesskw
Exemple #16
0
def train_classifier(output_path, data_fpath, labels_fpath):
    r"""
    CommandLine:
        python -m ibeis_cnn.train --test-train_classifier

    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis_cnn.train import *  # NOQA
        >>> result = train_classifier()
        >>> print(result)
    """
    era_size = 16
    max_epochs = 256
    hyperparams = ut.argparse_dict(
        {
            'era_size'      : era_size,
            'batch_size'    : 128,
            'learning_rate' : .01,
            'rate_schedule' : 0.75,
            'momentum'      : .9,
            'weight_decay'  : 0.0001,
            'augment_on'    : True,
            'whiten_on'     : True,
            'max_epochs'    : max_epochs,
        }
    )

    ut.colorprint('[netrun] Ensuring Dataset', 'yellow')
    dataset = ingest_data.get_numpy_dataset2('classifier', data_fpath, labels_fpath, output_path)
    X_train, y_train = dataset.subset('train')
    X_valid, y_valid = dataset.subset('valid')
    print('dataset.training_dpath = %r' % (dataset.training_dpath,))

    ut.colorprint('[netrun] Architecture Specification', 'yellow')
    model = ClassifierModel(
        data_shape=dataset.data_shape,
        training_dpath=dataset.training_dpath,
        **hyperparams)

    ut.colorprint('[netrun] Init encoder and convert labels', 'yellow')
    if hasattr(model, 'init_encoder'):
        model.init_encoder(y_train)

    ut.colorprint('[netrun] Initialize archchitecture', 'yellow')
    model.init_arch()

    ut.colorprint('[netrun] * Initializing new weights', 'lightgray')
    if model.has_saved_state():
        model.load_model_state()
    # else:
    #     model.reinit_weights()

    # ut.colorprint('[netrun] Need to initialize training state', 'yellow')
    # X_train, y_train = dataset.subset('train')
    # model.ensure_data_params(X_train, y_train)

    ut.colorprint('[netrun] Training Requested', 'yellow')
    # parse training arguments
    config = ut.argparse_dict(dict(
        monitor=True,
        monitor_updates=True,
        show_confusion=True,
        era_size=era_size,
        max_epochs=max_epochs,
    ))
    model.monitor_config.update(**config)

    if getattr(model, 'encoder', None) is not None:
        class_list = list(model.encoder.classes_)
        y_train = np.array([class_list.index(_) for _ in y_train ])
        y_valid = np.array([class_list.index(_) for _ in y_valid ])

    print('\n[netrun] Model Info')
    model.print_layer_info()

    ut.colorprint('[netrun] Begin training', 'yellow')
    model.fit(X_train, y_train, X_valid=X_valid, y_valid=y_valid)

    model_path = model.save_model_state()
    return model_path
Exemple #17
0
def argparse_hesaff_params():
    alias_dict = {'affine_invariance': 'ai'}
    alias_dict = {'rotation_invariance': 'ri'}
    default_dict_ = get_hesaff_default_params()
    hesskw = ut.argparse_dict(default_dict_, alias_dict=alias_dict)
    return hesskw
Exemple #18
0
def print_acfg_list(acfg_list, expanded_aids_list=None, ibs=None,
                    combined=False, **kwargs):
    r"""
    Args:
        acfg_list (list):
        expanded_aids_list (list): (default = None)
        ibs (IBEISController):  ibeis controller object(default = None)
        combined (bool): (default = False)

    CommandLine:
        python -m ibeis.expt.annotation_configs --exec-print_acfg_list --show

    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis.expt.annotation_configs import *  # NOQA
        >>> import ibeis
        >>> acfg_list = '?'
        >>> expanded_aids_list = None
        >>> ibs = None
        >>> combined = False
        >>> result = print_acfg_list(acfg_list, expanded_aids_list, ibs, combined)
        >>> print(result)
        >>> ut.quit_if_noshow()
        >>> import plottool as pt
        >>> ut.show_if_requested()
    """
    _tup = compress_acfg_list_for_printing(acfg_list)
    nonvaried_compressed_dict, varied_compressed_dict_list = _tup

    ut.colorprint('+=== <Info acfg_list> ===', 'white')
    #print('Printing acfg_list info. len(acfg_list) = %r' % (len(acfg_list),))
    print('non-varied aidcfg = ' + ut.dict_str(nonvaried_compressed_dict))
    seen_ = ut.ddict(list)

    # get default kwkeys for annot info
    if ibs is not None:
        annotstats_kw = kwargs.copy()
        kwkeys = ut.parse_func_kwarg_keys(ibs.get_annot_stats_dict)
        annotstats_kw.update(ut.argparse_dict(
            dict(zip(kwkeys, [None] * len(kwkeys))), only_specified=True))

    for acfgx in range(len(acfg_list)):
        acfg = acfg_list[acfgx]
        title = ('q_cfgname=' + acfg['qcfg']['_cfgname'] +
                 ' d_cfgname=' + acfg['dcfg']['_cfgname'])

        ut.colorprint('+--- acfg %d / %d -- %s ---- ' %
                      (acfgx + 1, len(acfg_list), title), 'lightgray')
        print('acfg = ' + ut.dict_str(varied_compressed_dict_list[acfgx],
                                      strvals=True))

        if expanded_aids_list is not None:
            qaids, daids = expanded_aids_list[acfgx]
            key = (ut.hashstr_arr27(qaids, 'qaids'),
                   ut.hashstr_arr27(daids, 'daids'))
            if key not in seen_:
                if ibs is not None:
                    seen_[key].append(acfgx)
                    annotconfig_stats_strs, _ = ibs.get_annotconfig_stats(
                        qaids, daids, verbose=True, combined=combined,
                        **annotstats_kw)
            else:
                dupindex = seen_[key]
                print('DUPLICATE of index %r' % (dupindex,))
                dupdict = varied_compressed_dict_list[dupindex[0]]
                print('DUP OF acfg = ' + ut.dict_str(dupdict, strvals=True))
    ut.colorprint('L___ </Info acfg_list> ___', 'white')
def print_acfg_list(
    acfg_list,
    expanded_aids_list=None,
    ibs=None,
    combined=False,
    only_summary=False,
    **kwargs
):
    r"""
    Args:
        acfg_list (list):
        expanded_aids_list (list): (default = None)
        ibs (IBEISController):  wbia controller object(default = None)
        combined (bool): (default = False)

    CommandLine:
        python -m wbia.expt.annotation_configs --exec-print_acfg_list

    Example:
        >>> # DISABLE_DOCTEST
        >>> from wbia.expt.annotation_configs import *  # NOQA
        >>> import wbia
        >>> ibs = wbia.opendb('testdb1')
        >>> a = ['default']
        >>> acfg_list, expanded_aids_list = wbia.expt.experiment_helpers.get_annotcfg_list(
        >>>     ibs, acfg_name_list=a, verbose=0)
        >>> combined = False
        >>> result = print_acfg_list(acfg_list, expanded_aids_list, ibs, combined)
        >>> print(result)
    """
    _tup = compress_acfg_list_for_printing(acfg_list)
    nonvaried_compressed_dict, varied_compressed_dict_list = _tup

    ut.colorprint('+=== <Info acfg_list> ===', 'white')
    # logger.info('Printing acfg_list info. len(acfg_list) = %r' % (len(acfg_list),))
    logger.info('non-varied aidcfg = ' + ut.repr2(nonvaried_compressed_dict))
    seen_ = ut.ddict(list)

    # get default kwkeys for annot info
    if ibs is not None:
        annotstats_kw = kwargs.copy()
        kwkeys = ut.parse_func_kwarg_keys(ibs.get_annot_stats_dict)
        annotstats_kw.update(
            ut.argparse_dict(dict(zip(kwkeys, [None] * len(kwkeys))), only_specified=True)
        )

    hashid_list = []
    for acfgx in range(len(acfg_list)):
        acfg = acfg_list[acfgx]
        title = (
            'q_cfgname='
            + acfg['qcfg']['_cfgname']
            + ' d_cfgname='
            + acfg['dcfg']['_cfgname']
        )

        if not only_summary:
            ut.colorprint(
                '+--- acfg %d / %d -- %s ---- ' % (acfgx + 1, len(acfg_list), title),
                'gray',
            )
            logger.info('acfg = ' + ut.repr2(varied_compressed_dict_list[acfgx], si=True))

        if expanded_aids_list is not None:
            qaids, daids = expanded_aids_list[acfgx]
            key = (ut.hashstr_arr27(qaids, 'qaids'), ut.hashstr_arr27(daids, 'daids'))
            if key not in seen_:
                if ibs is not None:
                    seen_[key].append(acfgx)
                    stats_ = ibs.get_annotconfig_stats(
                        qaids, daids, verbose=False, combined=combined, **annotstats_kw
                    )
                    hashids = (
                        stats_['qaid_stats']['qhashid'],
                        stats_['daid_stats']['dhashid'],
                    )
                    hashid_list.append(hashids)
                    stats_str2 = ut.repr2(
                        stats_, si=True, nl=True, explicit=False, nobraces=False
                    )
                    if not only_summary:
                        logger.info('annot_config_stats = ' + stats_str2)
            else:
                dupindex = seen_[key]
                dupdict = varied_compressed_dict_list[dupindex[0]]
                if not only_summary:
                    logger.info('DUPLICATE of index %r' % (dupindex,))
                    logger.info('DUP OF acfg = ' + ut.repr2(dupdict, si=True))
    logger.info('hashid summary = ' + ut.repr2(hashid_list, nl=1))
    ut.colorprint('L___ </Info acfg_list> ___', 'white')