コード例 #1
0
ファイル: _devcmds_ibeis.py プロジェクト: warunanc/ibeis
def ensure_mtest():
    """
    CommandLine:
        python dev.py -t mtest
    """
    import ibeis
    ibeis.ensure_pz_mtest()
コード例 #2
0
ファイル: reset_testdbs.py プロジェクト: heroinlin/ibeis
def reset_testdbs(**kwargs):
    # Step 0) Parse Args
    default_args = {'reset_' + key: False
                    for key in six.iterkeys(TEST_DBNAMES_MAP)}
    default_args['reset_all'] = False
    default_args.update(kwargs)
    argdict = ut.parse_dict_from_argv(default_args)
    if not any(list(six.itervalues(argdict))):
        # Default behavior is to reset the small dbs
        argdict['reset_testdb0'] = True
        argdict['reset_testdb1'] = True
        argdict['reset_testdb_guiall'] = True

    # Step 1) Delete DBs to be Reset
    for key, dbname in six.iteritems(TEST_DBNAMES_MAP):
        if argdict.get('reset_' + key, False) or argdict['reset_all']:
            delete_dbdir(dbname)

    # Step 3) Ensure DBs that dont exist
    ensure_smaller_testingdbs()
    workdir = ibeis.sysres.get_workdir()
    if not ut.checkpath(join(workdir, 'PZ_MTEST'), verbose=True):
        ibeis.ensure_pz_mtest()
    if not ut.checkpath(join(workdir, 'NAUT_test'), verbose=True):
        ibeis.ensure_nauts()
    if not ut.checkpath(join(workdir, 'testdb2'), verbose=True):
        ibeis.init.sysres.ensure_testdb2()

    # Step 4) testdb1 becomes the main database
    workdir = ibeis.sysres.get_workdir()
    TESTDB1 = join(workdir, 'testdb1')
    sysres.set_default_dbdir(TESTDB1)
コード例 #3
0
ファイル: smk_debug.py プロジェクト: Erotemic/ibeis
def testdata_ibeis(**kwargs):
    """
    DEPRICATE
    Step 1

    builds ibs for testing

    Example:
        >>> from ibeis.algo.hots.smk.smk_debug import *  # NOQA
        >>> kwargs = {}
    """
    print(' === Test Data IBEIS ===')
    print('kwargs = ' + ut.dict_str(kwargs))
    print('[smk_debug] testdata_ibeis')
    db = kwargs.get('db', ut.get_argval('--db', str, 'PZ_MTEST'))
    #with ut.Indenter('ENSURE'):
    if db == 'PZ_MTEST':
        ibeis.ensure_pz_mtest()
    ibs = ibeis.opendb(db=db)
    ibs._default_config()
    aggregate = kwargs.get('aggregate', ut.get_argflag(('--agg', '--aggregate')))
    nWords    = kwargs.get(   'nWords',  ut.get_argval(('--nWords', '--nCentroids'), int, default=8E3))
    nAssign   = kwargs.get(  'nAssign',  ut.get_argval(('--nAssign', '--K'), int, default=10))
    # Configs
    ibs.cfg.query_cfg.pipeline_root = 'smk'
    ibs.cfg.query_cfg.smk_cfg.aggregate = aggregate
    ibs.cfg.query_cfg.smk_cfg.smk_alpha = 3
    ibs.cfg.query_cfg.smk_cfg.smk_thresh = 0
    ibs.cfg.query_cfg.smk_cfg.vocabtrain_cfg.nWords = nWords
    ibs.cfg.query_cfg.smk_cfg.vocabassign_cfg.nAssign = nAssign
    if ut.VERYVERBOSE:
        ibs.cfg.query_cfg.smk_cfg.printme3()
    return ibs
コード例 #4
0
ファイル: _devcmds_ibeis.py プロジェクト: heroinlin/ibeis
def ensure_mtest():
    """
    CommandLine:
        python dev.py -t mtest
    """
    import ibeis
    ibeis.ensure_pz_mtest()
コード例 #5
0
ファイル: run_tests.py プロジェクト: whaozl/ibeis
def ensure_testing_data():
    from ibeis.tests import reset_testdbs
    print('Making sure test data exists')
    import ibeis
    from os.path import join
    ut.change_term_title('ENSURE IBEIS TETSDATA')
    reset_testdbs.reset_testdbs()
    workdir = ibeis.get_workdir()
    if not ut.checkpath(join(workdir, 'PZ_MTEST')):
        ibeis.ensure_pz_mtest()
    if not ut.checkpath(join(workdir, 'NAUT_test')):
        ibeis.ensure_nauts()
    if not ut.checkpath(join(workdir, 'wd_peter2')):
        ibeis.ensure_wilddogs()
コード例 #6
0
ファイル: smk1.py プロジェクト: Erotemic/ibeis
def testdata():
    ibeis.ensure_pz_mtest()
    ibs = ibeis.opendb('PZ_MTEST')
    # Pandas Annotation Dataframe
    annots_df = make_annot_df(ibs)
    valid_aids = annots_df.index
    # Training set
    taids = valid_aids[:]
    # Database set
    daids  = valid_aids[1:]
    # Search set
    #qaids = valid_aids[0::2]
    qaids = valid_aids[0:1]
    #default = 1000
    default = 2E4
    #default=5)  # default=95000)
    nWords = utool.get_argval(('--nWords', '--nCentroids'), int, default=default)
    return ibs, annots_df, taids, daids, qaids, nWords
コード例 #7
0
ファイル: smk1.py プロジェクト: whaozl/ibeis
def testdata():
    ibeis.ensure_pz_mtest()
    ibs = ibeis.opendb('PZ_MTEST')
    # Pandas Annotation Dataframe
    annots_df = make_annot_df(ibs)
    valid_aids = annots_df.index
    # Training set
    taids = valid_aids[:]
    # Database set
    daids = valid_aids[1:]
    # Search set
    #qaids = valid_aids[0::2]
    qaids = valid_aids[0:1]
    #default = 1000
    default = 2E4
    #default=5)  # default=95000)
    nWords = utool.get_argval(('--nWords', '--nCentroids'),
                              int,
                              default=default)
    return ibs, annots_df, taids, daids, qaids, nWords
コード例 #8
0
ファイル: reset_testdbs.py プロジェクト: warunanc/ibeis
def reset_testdbs(**kwargs):
    # Step 0) Parse Args
    import ibeis
    ibeis.ENABLE_WILDBOOK_SIGNAL = False
    default_args = {
        'reset_' + key: False
        for key in six.iterkeys(TEST_DBNAMES_MAP)
    }
    default_args['reset_all'] = False
    default_args.update(kwargs)
    argdict = ut.parse_dict_from_argv(default_args)
    if not any(list(six.itervalues(argdict))):
        # Default behavior is to reset the small dbs
        argdict['reset_testdb0'] = True
        argdict['reset_testdb1'] = True
        argdict['reset_testdb_guiall'] = True

    # Step 1) Delete DBs to be Reset
    for key, dbname in six.iteritems(TEST_DBNAMES_MAP):
        if argdict.get('reset_' + key, False) or argdict['reset_all']:
            delete_dbdir(dbname)

    # Step 3) Ensure DBs that dont exist
    ensure_smaller_testingdbs()
    workdir = ibeis.sysres.get_workdir()
    if not ut.checkpath(join(workdir, 'PZ_MTEST'), verbose=True):
        ibeis.ensure_pz_mtest()
    if not ut.checkpath(join(workdir, 'NAUT_test'), verbose=True):
        ibeis.ensure_nauts()
    if not ut.checkpath(join(workdir, 'wd_peter2'), verbose=True):
        ibeis.ensure_wilddogs()
    if not ut.checkpath(join(workdir, 'testdb2'), verbose=True):
        ibeis.init.sysres.ensure_testdb2()

    # Step 4) testdb1 becomes the main database
    workdir = ibeis.sysres.get_workdir()
    TESTDB1 = join(workdir, 'testdb1')
    sysres.set_default_dbdir(TESTDB1)
コード例 #9
0
def testdata_ibeis(**kwargs):
    """
    DEPRICATE
    Step 1

    builds ibs for testing

    Example:
        >>> from ibeis.algo.hots.smk.smk_debug import *  # NOQA
        >>> kwargs = {}
    """
    print(' === Test Data IBEIS ===')
    print('kwargs = ' + ut.dict_str(kwargs))
    print('[smk_debug] testdata_ibeis')
    db = kwargs.get('db', ut.get_argval('--db', str, 'PZ_MTEST'))
    #with ut.Indenter('ENSURE'):
    if db == 'PZ_MTEST':
        ibeis.ensure_pz_mtest()
    ibs = ibeis.opendb(db=db)
    ibs._default_config()
    aggregate = kwargs.get('aggregate', ut.get_argflag(
        ('--agg', '--aggregate')))
    nWords = kwargs.get(
        'nWords', ut.get_argval(('--nWords', '--nCentroids'), int,
                                default=8E3))
    nAssign = kwargs.get('nAssign',
                         ut.get_argval(('--nAssign', '--K'), int, default=10))
    # Configs
    ibs.cfg.query_cfg.pipeline_root = 'smk'
    ibs.cfg.query_cfg.smk_cfg.aggregate = aggregate
    ibs.cfg.query_cfg.smk_cfg.smk_alpha = 3
    ibs.cfg.query_cfg.smk_cfg.smk_thresh = 0
    ibs.cfg.query_cfg.smk_cfg.vocabtrain_cfg.nWords = nWords
    ibs.cfg.query_cfg.smk_cfg.vocabassign_cfg.nAssign = nAssign
    if ut.VERYVERBOSE:
        ibs.cfg.query_cfg.smk_cfg.printme3()
    return ibs
コード例 #10
0
ファイル: sysres.py プロジェクト: heroinlin/ibeis
def ensure_pz_mtest_mergesplit_test():
    r"""
    Make a test database for MERGE and SPLIT cases

    CommandLine:
        python -m ibeis.init.sysres --test-ensure_pz_mtest_mergesplit_test

    Example:
        >>> # SCRIPT
        >>> from ibeis.init.sysres import *  # NOQA
        >>> ensure_pz_mtest_mergesplit_test()
    """
    import ibeis
    ibeis.ensure_pz_mtest()
    workdir = ibeis.sysres.get_workdir()
    mtest_dbpath = join(workdir, 'PZ_MTEST')

    source_dbdir = mtest_dbpath
    dest_dbdir = join(workdir, 'PZ_MERGESPLIT_MTEST')

    if ut.get_argflag('--reset'):
        ut.delete(dest_dbdir)
    if ut.checkpath(dest_dbdir):
        return

    copy_ibeisdb(source_dbdir, dest_dbdir)

    ibs = ibeis.opendb('PZ_MERGESPLIT_MTEST')
    assert len(ibs.get_valid_aids()) == 119
    assert len(ibs.get_valid_nids()) == 41

    aid_list = ibs.get_valid_aids()
    aids_list, nid_list = ibs.group_annots_by_name(aid_list)
    num_aids = list(map(len, aids_list))

    # num cases wanted
    num_merge = 3
    num_split = 1
    num_combo = 1

    # num inputs needed
    num_merge_names = num_merge
    num_split_names = num_split * 2
    num_combo_names = num_combo * 3

    total_names = num_merge_names + num_split_names + num_combo_names

    modify_aids = ut.take(aids_list, ut.list_argsort(num_aids, reverse=True)[0:total_names])

    merge_nids1 = ibs.make_next_nids(num_merge, location_text='XMERGE')
    merge_nids2 = ibs.make_next_nids(num_merge, location_text='XMERGE')
    split_nid  = ibs.make_next_nids(num_split, location_text='XSPLIT')[0]
    combo_nids = ibs.make_next_nids(num_combo * 2, location_text='XCOMBO')

    # the first 3 become merge cases
    #left = 0
    #right = left + num_merge
    for aids, nid1, nid2 in zip(modify_aids[0:3], merge_nids1, merge_nids2):
        #ibs.get_annot_nids(aids)
        aids_ = aids[::2]
        ibs.set_annot_name_rowids(aids_, [nid1] * len(aids_))
        ibs.set_annot_name_rowids(aids_, [nid2] * len(aids_))

    # the next 2 become split cases
    #left = right
    #right = left + num_split_names
    for aids in modify_aids[3:5]:
        ibs.set_annot_name_rowids(aids, [split_nid] * len(aids))

    #left = right
    #right = left + num_combo_names
    # The final 3 are a combination case
    for aids in modify_aids[5:8]:
        aids_even = aids[::2]
        aids_odd = aids[1::2]
        ibs.set_annot_name_rowids(aids_even, [combo_nids[0]] * len(aids_even))
        ibs.set_annot_name_rowids(aids_odd, [combo_nids[1]] * len(aids_odd))

    final_result = ibs.unflat_map(ibs.get_annot_nids, modify_aids)
    print('final_result = %s' % (ut.list_str(final_result),))
コード例 #11
0
ファイル: sysres.py プロジェクト: heroinlin/ibeis
def ensure_pz_mtest_batchworkflow_test():
    r"""
    CommandLine:
        python -m ibeis.init.sysres --test-ensure_pz_mtest_batchworkflow_test
        python -m ibeis.init.sysres --test-ensure_pz_mtest_batchworkflow_test --reset
        python -m ibeis.init.sysres --test-ensure_pz_mtest_batchworkflow_test --reset

    Example:
        >>> # SCRIPT
        >>> from ibeis.init.sysres import *  # NOQA
        >>> ensure_pz_mtest_batchworkflow_test()
    """
    import ibeis
    ibeis.ensure_pz_mtest()
    workdir = ibeis.sysres.get_workdir()
    mtest_dbpath = join(workdir, 'PZ_MTEST')

    source_dbdir = mtest_dbpath
    dest_dbdir = join(workdir, 'PZ_BATCH_WORKFLOW_MTEST')

    if ut.get_argflag('--reset'):
        ut.delete(dest_dbdir)

    if ut.checkpath(dest_dbdir):
        return
    else:
        copy_ibeisdb(source_dbdir, dest_dbdir)

    ibs = ibeis.opendb('PZ_BATCH_WORKFLOW_MTEST')
    assert len(ibs.get_valid_aids()) == 119
    assert len(ibs.get_valid_nids()) == 41

    ibs.delete_all_encounters()

    aid_list = ibs.get_valid_aids()

    unixtime_list = ibs.get_annot_image_unixtimes(aid_list)
    untimed_aids = ut.compress(aid_list, [t == -1 for t in unixtime_list])

    ibs.get_annot_groundtruth(untimed_aids, aid_list)

    aids_list, nid_list = ibs.group_annots_by_name(aid_list)

    hourdiffs_list = ibs.get_name_hourdiffs(nid_list)

    encounter_aids_list = [[] for _ in range(4)]

    encounter_idx = 0

    for hourdiffs, aids in zip(hourdiffs_list, aids_list):
        #import scipy.spatial.distance as spdist
        if len(aids) == 1:
            encounter_aids_list[encounter_idx].extend(aids)
            encounter_idx = (encounter_idx + 1) % len(encounter_aids_list)
        else:
            for chunk in list(ut.ichunks(aids, 2)):
                encounter_aids_list[encounter_idx].extend(chunk)
                encounter_idx = (encounter_idx + 1) % len(encounter_aids_list)

            #import vtool as vt
            #import networkx as netx
            #nodes = list(range(len(aids)))
            #edges_pairs = vt.pdist_argsort(hourdiffs)
            #edge_weights = -hourdiffs[hourdiffs.argsort()]
            #netx_graph = make_netx_graph(edges_pairs, nodes, edge_weights)
            #cut_edges = netx.minimum_edge_cut(netx_graph)
            #netx_graph.remove_edges_from(cut_edges)
            #components = list(netx.connected_components(netx_graph))
            #components = ut.sortedby(components, list(map(len, components)), reverse=True)
            #print(components)
            #encounter_aids_list[0].extend(components[0])
            #for compoment in components:

            # TODO do max-nway cut
        #day_diffs = spdist.squareform(hourdiffs) / 24.0
        #print(ut.numpy_str(day_diffs, precision=2, suppress_small=True))
        #import itertools
        #compare_idxs = [(r, c) for r, c in itertools.product(range(len(aids)), range(len(aids))) if (c > r)]
        #print(len(aids))
    #def make_netx_graph(edges_pairs, nodes=None, edge_weights=None):
    #    import networkx as netx
    #    node_lbls = [('id_', 'int')]

    #    edge_lbls = [('weight', 'float')]
    #    edges = [(pair[0], pair[1], weight) for pair, weight in zip(edges_pairs, edge_weights)]

    #    print('make_netx_graph')
    #    # Make a graph between the chips
    #    netx_nodes = [(ntup[0], {key[0]: val for (key, val) in zip(node_lbls, ntup[1:])})
    #                  for ntup in iter(zip(nodes))]

    #    netx_edges = [(etup[0], etup[1], {key[0]: val for (key, val) in zip(edge_lbls, etup[2:])})
    #                  for etup in iter(edges)]
    #    netx_graph = netx.Graph()
    #    netx_graph.add_nodes_from(netx_nodes)
    #    netx_graph.add_edges_from(netx_edges)
    #    return netx_graph

    # Group into encounters based on old names
    gids_list = ibs.unflat_map(ibs.get_annot_image_rowids, encounter_aids_list)
    eid_list = ibs.new_encounters_from_images(gids_list)  # NOQA

    # Remove all names
    ibs.delete_annot_nids(aid_list)
コード例 #12
0
def ensure_pz_mtest_mergesplit_test():
    r"""
    Make a test database for MERGE and SPLIT cases

    CommandLine:
        python -m ibeis.init.sysres --test-ensure_pz_mtest_mergesplit_test

    Example:
        >>> # SCRIPT
        >>> from ibeis.init.sysres import *  # NOQA
        >>> ensure_pz_mtest_mergesplit_test()
    """
    import ibeis
    ibeis.ensure_pz_mtest()
    workdir = ibeis.sysres.get_workdir()
    mtest_dbpath = join(workdir, 'PZ_MTEST')

    source_dbdir = mtest_dbpath
    dest_dbdir = join(workdir, 'PZ_MERGESPLIT_MTEST')

    if ut.get_argflag('--reset'):
        ut.delete(dest_dbdir)
    if ut.checkpath(dest_dbdir):
        return

    copy_ibeisdb(source_dbdir, dest_dbdir)

    ibs = ibeis.opendb('PZ_MERGESPLIT_MTEST')
    assert len(ibs.get_valid_aids()) == 119
    assert len(ibs.get_valid_nids()) == 41

    aid_list = ibs.get_valid_aids()
    aids_list, nid_list = ibs.group_annots_by_name(aid_list)
    num_aids = list(map(len, aids_list))

    # num cases wanted
    num_merge = 3
    num_split = 1
    num_combo = 1

    # num inputs needed
    num_merge_names = num_merge
    num_split_names = num_split * 2
    num_combo_names = num_combo * 3

    total_names = num_merge_names + num_split_names + num_combo_names

    modify_aids = ut.take(
        aids_list,
        ut.list_argsort(num_aids, reverse=True)[0:total_names])

    merge_nids1 = ibs.make_next_nids(num_merge, location_text='XMERGE')
    merge_nids2 = ibs.make_next_nids(num_merge, location_text='XMERGE')
    split_nid = ibs.make_next_nids(num_split, location_text='XSPLIT')[0]
    combo_nids = ibs.make_next_nids(num_combo * 2, location_text='XCOMBO')

    # the first 3 become merge cases
    #left = 0
    #right = left + num_merge
    for aids, nid1, nid2 in zip(modify_aids[0:3], merge_nids1, merge_nids2):
        #ibs.get_annot_nids(aids)
        aids_ = aids[::2]
        ibs.set_annot_name_rowids(aids_, [nid1] * len(aids_))
        ibs.set_annot_name_rowids(aids_, [nid2] * len(aids_))

    # the next 2 become split cases
    #left = right
    #right = left + num_split_names
    for aids in modify_aids[3:5]:
        ibs.set_annot_name_rowids(aids, [split_nid] * len(aids))

    #left = right
    #right = left + num_combo_names
    # The final 3 are a combination case
    for aids in modify_aids[5:8]:
        aids_even = aids[::2]
        aids_odd = aids[1::2]
        ibs.set_annot_name_rowids(aids_even, [combo_nids[0]] * len(aids_even))
        ibs.set_annot_name_rowids(aids_odd, [combo_nids[1]] * len(aids_odd))

    final_result = ibs.unflat_map(ibs.get_annot_nids, modify_aids)
    print('final_result = %s' % (ut.list_str(final_result), ))
コード例 #13
0
def ensure_pz_mtest_batchworkflow_test():
    r"""
    CommandLine:
        python -m ibeis.init.sysres --test-ensure_pz_mtest_batchworkflow_test
        python -m ibeis.init.sysres --test-ensure_pz_mtest_batchworkflow_test --reset
        python -m ibeis.init.sysres --test-ensure_pz_mtest_batchworkflow_test --reset

    Example:
        >>> # SCRIPT
        >>> from ibeis.init.sysres import *  # NOQA
        >>> ensure_pz_mtest_batchworkflow_test()
    """
    import ibeis
    ibeis.ensure_pz_mtest()
    workdir = ibeis.sysres.get_workdir()
    mtest_dbpath = join(workdir, 'PZ_MTEST')

    source_dbdir = mtest_dbpath
    dest_dbdir = join(workdir, 'PZ_BATCH_WORKFLOW_MTEST')

    if ut.get_argflag('--reset'):
        ut.delete(dest_dbdir)

    if ut.checkpath(dest_dbdir):
        return
    else:
        copy_ibeisdb(source_dbdir, dest_dbdir)

    ibs = ibeis.opendb('PZ_BATCH_WORKFLOW_MTEST')
    assert len(ibs.get_valid_aids()) == 119
    assert len(ibs.get_valid_nids()) == 41

    ibs.delete_all_imagesets()

    aid_list = ibs.get_valid_aids()

    unixtime_list = ibs.get_annot_image_unixtimes(aid_list)
    untimed_aids = ut.compress(aid_list, [t == -1 for t in unixtime_list])

    ibs.get_annot_groundtruth(untimed_aids, aid_list)

    aids_list, nid_list = ibs.group_annots_by_name(aid_list)

    hourdiffs_list = ibs.get_name_hourdiffs(nid_list)

    imageset_aids_list = [[] for _ in range(4)]

    imageset_idx = 0

    for hourdiffs, aids in zip(hourdiffs_list, aids_list):
        #import scipy.spatial.distance as spdist
        if len(aids) == 1:
            imageset_aids_list[imageset_idx].extend(aids)
            imageset_idx = (imageset_idx + 1) % len(imageset_aids_list)
        else:
            for chunk in list(ut.ichunks(aids, 2)):
                imageset_aids_list[imageset_idx].extend(chunk)
                imageset_idx = (imageset_idx + 1) % len(imageset_aids_list)

            #import vtool as vt
            #import networkx as netx
            #nodes = list(range(len(aids)))
            #edges_pairs = vt.pdist_argsort(hourdiffs)
            #edge_weights = -hourdiffs[hourdiffs.argsort()]
            #netx_graph = make_netx_graph(edges_pairs, nodes, edge_weights)
            #cut_edges = netx.minimum_edge_cut(netx_graph)
            #netx_graph.remove_edges_from(cut_edges)
            #components = list(netx.connected_components(netx_graph))
            #components = ut.sortedby(components, list(map(len, components)), reverse=True)
            #print(components)
            #imageset_aids_list[0].extend(components[0])
            #for compoment in components:

            # TODO do max-nway cut
        #day_diffs = spdist.squareform(hourdiffs) / 24.0
        #print(ut.numpy_str(day_diffs, precision=2, suppress_small=True))
        #import itertools
        #compare_idxs = [(r, c) for r, c in itertools.product(range(len(aids)), range(len(aids))) if (c > r)]
        #print(len(aids))
    #def make_netx_graph(edges_pairs, nodes=None, edge_weights=None):
    #    import networkx as netx
    #    node_lbls = [('id_', 'int')]

    #    edge_lbls = [('weight', 'float')]
    #    edges = [(pair[0], pair[1], weight) for pair, weight in zip(edges_pairs, edge_weights)]

    #    print('make_netx_graph')
    #    # Make a graph between the chips
    #    netx_nodes = [(ntup[0], {key[0]: val for (key, val) in zip(node_lbls, ntup[1:])})
    #                  for ntup in iter(zip(nodes))]

    #    netx_edges = [(etup[0], etup[1], {key[0]: val for (key, val) in zip(edge_lbls, etup[2:])})
    #                  for etup in iter(edges)]
    #    netx_graph = netx.Graph()
    #    netx_graph.add_nodes_from(netx_nodes)
    #    netx_graph.add_edges_from(netx_edges)
    #    return netx_graph

    # Group into imagesets based on old names
    gids_list = ibs.unflat_map(ibs.get_annot_image_rowids, imageset_aids_list)
    imgsetid_list = ibs.new_imagesets_from_images(gids_list)  # NOQA

    # Remove all names
    ibs.delete_annot_nids(aid_list)
コード例 #14
0
ファイル: scratch_old.py プロジェクト: whaozl/ibeis
import ibeis
import six
import vtool
import utool
import numpy as np
import numpy.linalg as npl  # NOQA
import pandas as pd
from vtool import clustering2 as clustertool
from vtool import nearest_neighbors as nntool
from plottool import draw_func2 as df2
np.set_printoptions(precision=2)
pd.set_option('display.max_rows', 10)
pd.set_option('display.max_columns', 10)
pd.set_option('isplay.notebook_repr_html', True)
ibeis.ensure_pz_mtest()

#taids = ibs.get_valid_aids()
#tvecs_list = ibs.get_annot_vecs(taids)
#tkpts_list = ibs.get_annot_kpts(taids)
#tvec_list = np.vstack(tvecs_list)
#print(idx2_vec)

#labels, words = vtool.clustering.cached_akmeans(tvec_list, 1000, 30, cache_dir='.')
#tvecdf_list = [pd.DataFrame(vecs) for vecs in  tvecs_list]
#tvecs_df = pd.DataFrame(tvecdf_list, index=taids)
#kpts_col = pd.DataFrame(tkpts_list, index=taids, columns=['kpts'])
#vecs_col = pd.DataFrame(tvecs_list, index=taids, columns=['vecs'])
#tvecs_dflist = [pd.DataFrame(vecs, index=np.arange(len(vecs))) for vecs in tvecs_list]
#pd.concat(tvecs_dflist)
## Bui