예제 #1
0
def classify_helper(weight_filepath, vector_list, index_list=None,
                    verbose=VERBOSE_SVM):
    ut.embed()
    if index_list is None:
        index_list = list(range(len(vector_list)))
    # Init score and class holders
    score_dict = { index: [] for index in index_list }
    class_dict = { index: [] for index in index_list }
    # Load models
    model_tup = ut.load_cPkl(weight_filepath, verbose=verbose)
    model, scaler = model_tup
    # Normalize
    vector_list = scaler.transform(vector_list)
    # calculate decisions and predictions
    # score_list = model.decision_function(vector_list)
    score_list = model.predict_proba(vector_list)
    # Take only the positive probability
    score_list = score_list[:, 1]
    class_list = model.predict(vector_list)
    # Zip together results
    zipped = zip(index_list, score_list, class_list)
    for index, score_, class_ in zipped:
        score_dict[index].append(score_)
        class_dict[index].append(class_)
    # Return scores and classes
    return score_dict, class_dict
예제 #2
0
 def assert_cache_hits(ibs, ismiss_list, rowid_list, kwargs_hash, **kwargs):
     cached_rowid_list = ut.filterfalse_items(rowid_list, ismiss_list)
     cache_ = ibs.table_cache[tblname][colname][kwargs_hash]
     # Load cached values for each rowid
     cache_vals_list = ut.dict_take_list(cache_, cached_rowid_list, None)
     db_vals_list = getter_func(ibs, cached_rowid_list, **kwargs)
     # Assert everything is valid
     msg_fmt = ut.codeblock(
         """
         [assert_cache_hits] tblname = %r
         [assert_cache_hits] colname = %r
         [assert_cache_hits] cfgkeys = %r
         [assert_cache_hits] CACHE INVALID: %r != %r
         """
     )
     msg = msg_fmt % (tblname, colname, cfgkeys, cache_vals_list, db_vals_list)
     try:
         list1 = cache_vals_list
         list2 = db_vals_list
         assert ut.lists_eq(list1, list2), msg
         # if isinstance(db_vals_list, list):
         #    assert cache_vals_list == db_vals_list, msg
         # else:
         #    assert np.all(cache_vals_list == db_vals_list), msg
     except AssertionError as ex:
         raise ex
     except Exception as ex2:
         print(type(cache_vals_list))
         print(type(db_vals_list))
         ut.printex(ex2)
         ut.embed()
         raise
예제 #3
0
def interactive_commandline_prompt(msg, decisiontype):
    prompt_fmtstr = ut.codeblock('''
        Accept system {decisiontype} decision?
        ==========

        {msg}

        ==========
        * press ENTER to ACCEPT
        * enter {no_phrase} to REJECT
        * enter {embed_phrase} to embed into ipython
        * any other inputs ACCEPT system decision
        * (input is case insensitive)
        ''')
    ans_list_embed = ['cmd', 'ipy', 'embed']
    ans_list_no = ['no', 'n']
    #ans_list_yes = ['yes', 'y']
    prompt_str = prompt_fmtstr.format(
        no_phrase=ut.conj_phrase(ans_list_no),
        embed_phrase=ut.conj_phrase(ans_list_embed),
        msg=msg,
        decisiontype=decisiontype,
    )
    prompt_block = ut.msgblock('USER_INPUT', prompt_str)
    ans = input(prompt_block).lower()
    if ans in ans_list_embed:
        ut.embed()
        #print(ibs2.get_dbinfo_str())
        #qreq_ = ut.search_stack_for_localvar('qreq_')
        #qreq_.normalizer
    elif ans in ans_list_no:
        return False
    else:
        return True
예제 #4
0
def translate_ibeis_webreturn(rawreturn, success=True, code=None, message=None,
                              jQuery_callback=None, cache=None):
    if code is None:
        code = ''
    if message is None:
        message = ''
    if cache is None:
        cache = -1
    template = {
        'status': {
            'success': success,
            'code':    code,
            'message': message,
            'cache':   cache,
            #'debug': {}  # TODO
        },
        'response' : rawreturn
    }
    try:
        response = ut.to_json(template)
    except:
        ut.embed()

    if jQuery_callback is not None and isinstance(jQuery_callback, six.string_types):
        print('[web] Including jQuery callback function: %r' % (jQuery_callback, ))
        response = '%s(%s)' % (jQuery_callback, response)
    return response
예제 #5
0
 def assert_cache_hits(ibs, ismiss_list, rowid_list, kwargs_hash, **kwargs):
     cached_rowid_list = ut.filterfalse_items(rowid_list, ismiss_list)
     cache_ = ibs.table_cache[tblname][colname][kwargs_hash]
     # Load cached values for each rowid
     cache_vals_list = ut.dict_take_list(cache_, cached_rowid_list, None)
     db_vals_list = getter_func(ibs, cached_rowid_list, **kwargs)
     # Assert everything is valid
     msg_fmt = ut.codeblock(
         '''
         [assert_cache_hits] tblname = %r
         [assert_cache_hits] colname = %r
         [assert_cache_hits] cfgkeys = %r
         [assert_cache_hits] CACHE INVALID: %r != %r
         '''
     )
     msg = msg_fmt % (tblname, colname, cfgkeys, cache_vals_list, db_vals_list, )
     try:
         list1 = cache_vals_list
         list2 = db_vals_list
         assert ut.lists_eq(list1, list2), msg
         #if isinstance(db_vals_list, list):
         #    assert cache_vals_list == db_vals_list, msg
         #else:
         #    assert np.all(cache_vals_list == db_vals_list), msg
     except AssertionError as ex:
         raise ex
     except Exception as ex2:
         print(type(cache_vals_list))
         print(type(db_vals_list))
         ut.printex(ex2)
         ut.embed()
         raise
예제 #6
0
def force_quit_akmeans(signal, frame):
    # FIXME OR DEPRICATE
    try:
        print(ut.unindedent('''
                              --- algos ---
                              Caught Ctrl+C in:
                              function: %r
                              stacksize: %r
                              line_no: %r
                              ''') % (frame.f_code.co_name,
                                      frame.f_code.co_stacksize,
                                      frame.f_lineno))
        #exec(df2.present())
        target_frame = frame
        target_frame_coname = '_akmeans_iterate'
        while True:
            if target_frame.f_code.co_name == target_frame_coname:
                break
            if target_frame.f_code.co_name == '<module>':
                print('Traced back to module level. Missed frame: %r ' %
                      target_frame_coname)
                break
            target_frame = target_frame.f_back
            print('Is target frame?: ' + target_frame.f_code.co_name)

        fpath = target_frame.f_back.f_back.f_locals['fpath']

        #data            = target_frame.f_locals['data']
        centroids        = target_frame.f_locals['centroids']
        datax2_clusterx = target_frame.f_locals['datax2_clusterx']
        ut.save_npz(fpath + '.earlystop', datax2_clusterx, centroids)
    except Exception as ex:
        print(repr(ex))
        ut.embed()
예제 #7
0
def embed(back):
    """ Allows for embedding in an environment with all imports """
    ibs = back.ibs
    front = back.front
    ibswgt = front
    #import IPython
    #IPython.embed()
    utool.embed()
예제 #8
0
def embed(back):
    """ Allows for embedding in an environment with all imports """
    ibs = back.ibs
    front = back.front
    ibswgt = front
    #import IPython
    #IPython.embed()
    utool.embed()
예제 #9
0
 def dev_embed(ibs=ibs, aid=aid, config2_=config2_):
     # import wbia.plottool as pt
     # pt.plt.ioff()
     # TODO need to disable matplotlib callbacks?
     # Causes can't re-enter readline error
     ut.embed()
     # pt.plt.ion()
     pass
예제 #10
0
def test_zmq_task():
    """
    CommandLine:
        python -m ibeis.web.zmq_task_queue --exec-test_zmq_task
        python -b -m ibeis.web.zmq_task_queue --exec-test_zmq_task

        python -m ibeis.web.zmq_task_queue --main
        python -m ibeis.web.zmq_task_queue --main --bg
        python -m ibeis.web.zmq_task_queue --main --fg

    Example:
        >>> # SCRIPT
        >>> from ibeis.web.zmq_task_queue import *  # NOQA
        >>> test_zmq_task()
    """
    _init_signals()
    # now start a few clients, and fire off some requests
    client_id = np.random.randint(1000)
    jobiface = JobInterface(client_id)
    reciever = JobBackend()
    if ut.get_argflag('--bg'):
        from ibeis.init import sysres
        dbdir = sysres.get_args_dbdir('cache', False, None, None,
                                      cache_priority=False)
        reciever.initialize_background_processes(dbdir)
        print('[testzmq] parent process is looping forever')
        while True:
            time.sleep(1)
    elif ut.get_argflag('--fg'):
        jobiface.initialize_client_thread()
    else:
        dbdir = sysres.get_args_dbdir('cache', False, None, None,
                                      cache_priority=False)
        reciever.initialize_background_processes(dbdir)
        jobiface.initialize_client_thread()

    # Foreground test script
    print('... waiting for jobs')
    if ut.get_argflag('--cmd'):
        ut.embed()
        jobiface.queue_job()
    else:
        print('[test] ... emit test1')
        jobid1 = jobiface.queue_job('helloworld', 1)
        jobiface.wait_for_job_result(jobid1)
        #jobiface.get_job_status(jobid1)
        #jobid_list = [jobiface.queue_job('helloworld', 5) for _ in range(NUM_JOBS)]
        #jobid_list += [jobiface.queue_job('get_valid_aids')]
        jobid_list = []

        #identify_jobid = jobiface.queue_job('query_chips', [1], [3, 4, 5], cfgdict={'K': 1})
        identify_jobid = jobiface.queue_job('query_chips_simple_dict', [1], [3, 4, 5], cfgdict={'K': 1})

        for jobid in jobid_list:
            jobiface.wait_for_job_result(jobid)

        jobiface.wait_for_job_result(identify_jobid)
    print('FINISHED TEST SCRIPT')
예제 #11
0
    def parent(model, qindex):
        """
        A common convention used in models that expose tree data structures is
        that only items in the first column have children. For that case, when
        reimplementing this function in a subclass the column of the returned
        QModelIndex would be 0.

        When reimplementing this function in a subclass, be careful to avoid
        calling QModelIndex member functions, such as QModelIndex.parent(),
        since indexes belonging to your model will simply call your
        implementation, leading to infinite recursion.

        FIXME:
            seems to segfault in here
            https://riverbankcomputing.com/pipermail/pyqt/2016-February/036977.html
            https://gist.github.com/estan/c051d1f798c4c46caa7d

        Returns:
            the parent of the model item with the given index. If the item has
            no parent, an invalid QModelIndex is returned.
        """
        # model.lazy_checks()
        if qindex.isValid():
            try:
                node = qindex.internalPointer()
                # <HACK>
                # A segfault happens in isinstance when updating rows?
                if not isinstance(node, _atn.TreeNode):
                    logger.info(
                        'WARNING: tried to access parent of %r type object' %
                        type(node))
                    return QtCore.QModelIndex()
                # assert node.__dict__, "node.__dict__=%r" % node.__dict__
                # </HACK>
                parent_node = node.get_parent()
                parent_id = parent_node.get_id()
                if parent_id == -1 or parent_id is None:
                    return QtCore.QModelIndex()
                row = parent_node.get_row()
                col = model.col_level_list.index(parent_node.get_level())
                return model.createIndex(row, col, parent_node)
            except Exception as ex:
                import utool

                with utool.embed_on_exception_context:
                    qindex_rc = (qindex.row(), qindex.column())  # NOQA
                    ut.printex(
                        ex,
                        'failed to do parenty things',
                        keys=['qindex_rc', 'model.name'],
                        tb=True,
                    )
                import utool

                utool.embed()
                raise
        return QtCore.QModelIndex()
예제 #12
0
def list_collate(inbatch):
    """
    Used for detection datasets with boxes.

    Example:
        >>> import torch
        >>> rng = np.random.RandomState(0)
        >>> inbatch = []
        >>> bsize = 4
        >>> for _ in range(bsize):
        >>>     # add an image and some dummy bboxes to the batch
        >>>     img = torch.rand(3, 8, 8)  # dummy 8x8 image
        >>>     boxes = torch.rand(rng.randint(0, 4), 4)
        >>>     item = (img, [boxes])
        >>>     inbatch.append(item)
        >>> out_batch = list_collate(inbatch)
        >>> assert len(out_batch) == 2
        >>> assert list(out_batch[0].shape) == [bsize, 3, 8, 8]
        >>> assert len(out_batch[1][0]) == bsize

    Example:
        >>> import torch
        >>> rng = np.random.RandomState(0)
        >>> inbatch = []
        >>> bsize = 4
        >>> for _ in range(bsize):
        >>>     # add an image and some dummy bboxes to the batch
        >>>     img = torch.rand(3, 8, 8)  # dummy 8x8 image
        >>>     boxes = torch.empty(0, 4)
        >>>     item = (img, [boxes])
        >>>     inbatch.append(item)
        >>> out_batch = list_collate(inbatch)
        >>> assert len(out_batch) == 2
        >>> assert list(out_batch[0].shape) == [bsize, 3, 8, 8]
        >>> assert len(out_batch[1][0]) == bsize
    """
    try:
        # if True:
        if torch.is_tensor(inbatch[0]):
            num_items = [len(item) for item in inbatch]
            if ub.allsame(num_items):
                if len(num_items) == 0 or num_items[0] == 0:
                    batch = inbatch
                else:
                    batch = default_collate(inbatch)
            else:
                batch = inbatch
        else:
            batch = [
                list_collate(item) for item in list(map(list, zip(*inbatch)))
            ]
    except Exception as ex:
        print('Failed to collate inbatch={}'.format(inbatch))
        import utool
        utool.embed()
        raise
    return batch
예제 #13
0
def gridsearch_ratio_thresh(matches):
    import sklearn
    import sklearn.metrics
    import vtool as vt

    # Param search for vsone
    import wbia.plottool as pt

    pt.qt4ensure()

    skf = sklearn.model_selection.StratifiedKFold(n_splits=10, random_state=119372)

    y = np.array([m.annot1['nid'] == m.annot2['nid'] for m in matches])

    basis = {'ratio_thresh': np.linspace(0.6, 0.7, 50).tolist()}
    grid = ut.all_dict_combinations(basis)
    xdata = np.array(ut.take_column(grid, 'ratio_thresh'))

    def _ratio_thresh(y_true, match_list):
        # Try and find optional ratio threshold
        auc_list = []
        for cfgdict in ut.ProgIter(grid, lbl='gridsearch'):
            y_score = [
                match.fs.compress(match.ratio_test_flags(cfgdict)).sum()
                for match in match_list
            ]
            auc = sklearn.metrics.roc_auc_score(y_true, y_score)
            auc_list.append(auc)
        auc_list = np.array(auc_list)
        return auc_list

    auc_list = _ratio_thresh(y, matches)
    pt.plot(xdata, auc_list)
    subx, suby = vt.argsubmaxima(auc_list, xdata)
    best_ratio_thresh = subx[suby.argmax()]

    skf_results = []
    y_true = y
    for train_idx, test_idx in skf.split(matches, y):
        match_list_ = ut.take(matches, train_idx)
        y_true = y.take(train_idx)
        auc_list = _ratio_thresh(y_true, match_list_)
        subx, suby = vt.argsubmaxima(auc_list, xdata, maxima_thresh=0.8)
        best_ratio_thresh = subx[suby.argmax()]
        skf_results.append(best_ratio_thresh)
    logger.info('skf_results.append = %r' % (np.mean(skf_results),))
    import utool

    utool.embed()
예제 #14
0
파일: util_dbg.py 프로젝트: animalus/utool
 def __exit__(self, type_, value, trace):
     if trace is not None:
         print('!!!!!!!!!!!!!!!!!!!')
         print('[util_dbg] %r in context manager!: %s ' % (type_, str(value)))
         import utool
         import traceback
         #traceback.print_stack(type_, value, trace)
         traceback.print_exception(type_, value, trace)
         #parent_locals = utool.get_parent_locals()
         #execstr_parent = utool.execstr_dict(parent_locals, 'parent_locals')
         #exec(execstr_parent)
         trace_locals = trace.tb_frame.f_locals
         execstr_trace = utool.execstr_dict(trace_locals, 'trace_locals')
         exec(execstr_trace)
         utool.embed()
예제 #15
0
 def __exit__(self, type_, value, trace):
     if trace is not None:
         print('!!! EMBED ON EXCEPTION !!!')
         print('[util_dbg] %r in context manager!: %s ' % (type_, str(value)))
         import utool
         import traceback
         #traceback.print_stack(type_, value, trace)
         traceback.print_exception(type_, value, trace)
         #parent_locals = utool.get_parent_locals()
         #execstr_parent = utool.execstr_dict(parent_locals, 'parent_locals')
         #exec(execstr_parent)
         trace_locals = trace.tb_frame.f_locals
         execstr_trace = utool.execstr_dict(trace_locals, 'trace_locals')
         exec(execstr_trace)
         utool.embed()
예제 #16
0
        def on_key_press(self, event):
            print(event)

            if event.key == 'r':
                self.show_page()
                self.draw()

            if event.key == 'i':
                ut.embed()

            if len(self.selected_aids) == 2:
                ibs = self.ibs
                aid1, aid2 = self.selected_aids
                _rowid = ibs.get_annotmatch_rowid_from_superkey([aid1], [aid2])
                if _rowid is None:
                    _rowid = ibs.get_annotmatch_rowid_from_superkey([aid2], [aid1])
                rowid = _rowid  # NOQA
예제 #17
0
def run_wbia():
    r"""
    CommandLine:
        python -m wbia
        python -m wbia find_installed_tomcat
        python -m wbia get_annot_groundtruth:1
    """
    import wbia  # NOQA

    # ut.set_process_title('wbia_main')
    # main_locals = wbia.main()
    # wbia.main_loop(main_locals)
    # ut.set_process_title('wbia_main')
    cmdline_varags = ut.get_cmdline_varargs()
    if len(cmdline_varags) > 0 and cmdline_varags[0] == 'rsync':
        rsync_ibsdb_main()
        sys.exit(0)

    if ub.argflag('--devcmd'):
        # Hack to let devs mess around when using an installer version
        # TODO: add more hacks
        ut.embed()

    if ub.argflag('-e'):
        """
        wbia -e print -a default -t default
        """
        # Run dev script if -e given

        devmain()
        logger.info('... exiting')
        sys.exit(0)

    main_locals = main()
    execstr = main_loop(main_locals)
    # <DEBUG CODE>
    if 'back' in main_locals and CMD:
        back = main_locals['back']
        front = getattr(back, 'front', None)  # NOQA
        # front = back.front
        # ui = front.ui
    ibs = main_locals['ibs']  # NOQA
    logger.info('-- EXECSTR --')
    logger.info(ub.codeblock(execstr))
    logger.info('-- /EXECSTR --')
    exec(execstr)
예제 #18
0
        def on_key_press(self, event):
            print(event)

            if event.key == 'r':
                self.show_page()
                self.draw()

            if event.key == 'i':
                ut.embed()

            if len(self.selected_aids) == 2:
                ibs = self.ibs
                aid1, aid2 = self.selected_aids
                _rowid = ibs.get_annotmatch_rowid_from_superkey([aid1], [aid2])
                if _rowid is None:
                    _rowid = ibs.get_annotmatch_rowid_from_superkey([aid2],
                                                                    [aid1])
                rowid = _rowid  # NOQA
예제 #19
0
파일: testem.py 프로젝트: warunanc/ibeis
def random_case_set():
    r"""
    Returns:
        tuple: (labels, pairwise_feats)

    CommandLine:
        python -m ibeis.algo.hots.testem random_case_set --show

    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis.algo.hots.testem import *  # NOQA
        >>> (labels, pairwise_feats) = random_case_set()
        >>> result = ('(labels, pairwise_feats) = %s' % (ut.repr2((labels, pairwise_feats)),))
        >>> print(result)
    """
    rng = np.random.RandomState(0)
    case_params = dict(num_names=5, rng=rng)
    num_annots = 600
    test_cases = [
        random_test_annot(**case_params)
        for _ in ut.ProgIter(range(num_annots), bs=1)
    ]
    pairxs = list(ut.product_nonsame(range(num_annots), range(num_annots)))
    import utool
    utool.embed()

    test_pairs = list(ut.unflat_take(test_cases, pairxs))
    cases1 = ut.instancelist(ut.take_column(test_pairs, 0), check=False)
    cases2 = ut.instancelist(ut.take_column(test_pairs, 1), check=False)
    # FIXME
    labels = labels1 = make_test_pairwise_labels2(cases1, cases2)  # NOQA

    #labels = np.array([make_test_pairwise_labels(case1, case2)
    #                   for case1, case2 in ut.ProgIter(test_pairs, bs=1)])
    pairwise_feats_ = [
        make_test_pairwise_fetaures(case1, case2, label, rng)
        for label, (case1,
                    case2) in ut.ProgIter(list(zip(labels, test_pairs)), bs=1)
    ]
    pairwise_feats = np.vstack(pairwise_feats_)
    print(ut.dict_hist(labels))
    return labels, pairwise_feats
예제 #20
0
파일: util_dbg.py 프로젝트: Erotemic/utool
 def __exit__(self, type_, value, trace):
     if trace is not None:
         print('!!! EMBED ON EXCEPTION !!!')
         print('[util_dbg] %r in context manager!: %s ' % (type_, str(value)))
         import utool as ut
         import traceback
         traceback.print_exception(type_, value, trace)
         # Grab the context of the frame where the failure occurred
         trace_globals = trace.tb_frame.f_globals
         trace_locals = trace.tb_frame.f_locals
         trace_ns = trace_globals.copy()
         trace_ns.update(trace_locals)
         # Hack to bring back self
         if 'self' in trace_ns:
             self = trace_ns['self']
         # execstr_trace_g = ut.execstr_dict(trace_globals, 'trace_globals')
         # execstr_trace_l = ut.execstr_dict(trace_locals, 'trace_locals')
         # execstr_trace = execstr_trace_g + '\n' + execstr_trace_l
         # exec(execstr_trace)
         locals().update(trace_ns)
         ut.embed()
예제 #21
0
def web_embed(*args, **kwargs):
    ibs = current_app.ibs  # NOQA

    if False:
        from wbia.algo.graph.state import POSTV

        payload = {
            'action': 'update_task_thresh',
            'task': 'match_state',
            'decision': POSTV,
            'value': 0.95,
        }

        for graph_uuid in current_app.GRAPH_CLIENT_DICT:
            graph_client = current_app.GRAPH_CLIENT_DICT.get(graph_uuid, None)
            if graph_client is None:
                continue
            if len(graph_client.futures) > 0:
                continue
            future = graph_client.post(payload)  # NOQA
            # future.result()  # Guarantee that this has happened before calling refresh

    ut.embed()
예제 #22
0
파일: testem.py 프로젝트: Erotemic/ibeis
def random_case_set():
    r"""
    Returns:
        tuple: (labels, pairwise_feats)

    CommandLine:
        python -m ibeis.algo.hots.testem random_case_set --show

    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis.algo.hots.testem import *  # NOQA
        >>> (labels, pairwise_feats) = random_case_set()
        >>> result = ('(labels, pairwise_feats) = %s' % (ut.repr2((labels, pairwise_feats)),))
        >>> print(result)
    """
    rng = np.random.RandomState(0)
    case_params = dict(num_names=5, rng=rng)
    num_annots = 600
    test_cases = [random_test_annot(**case_params) for _ in ut.ProgIter(range(num_annots), bs=1)]
    pairxs = list(ut.product_nonsame(range(num_annots), range(num_annots)))
    import utool
    utool.embed()

    test_pairs = list(ut.unflat_take(test_cases, pairxs))
    cases1 = ut.make_instancelist(ut.take_column(test_pairs, 0), check=False)
    cases2 = ut.make_instancelist(ut.take_column(test_pairs, 1), check=False)
    # FIXME
    labels = labels1 = make_test_pairwise_labels2(cases1, cases2)  # NOQA

    #labels = np.array([make_test_pairwise_labels(case1, case2)
    #                   for case1, case2 in ut.ProgIter(test_pairs, bs=1)])
    pairwise_feats_ = [make_test_pairwise_fetaures(case1, case2, label, rng)
                       for label, (case1, case2) in ut.ProgIter(list(zip(labels, test_pairs)), bs=1)]
    pairwise_feats = np.vstack(pairwise_feats_)
    print(ut.dict_hist(labels))
    return labels, pairwise_feats
예제 #23
0
def interactive_commandline_prompt(msg, decisiontype):
    prompt_fmtstr = ut.codeblock(
        '''
        Accept system {decisiontype} decision?
        ==========

        {msg}

        ==========
        * press ENTER to ACCEPT
        * enter {no_phrase} to REJECT
        * enter {embed_phrase} to embed into ipython
        * any other inputs ACCEPT system decision
        * (input is case insensitive)
        '''
    )
    ans_list_embed = ['cmd', 'ipy', 'embed']
    ans_list_no = ['no', 'n']
    #ans_list_yes = ['yes', 'y']
    prompt_str = prompt_fmtstr.format(
        no_phrase=ut.conj_phrase(ans_list_no),
        embed_phrase=ut.conj_phrase(ans_list_embed),
        msg=msg,
        decisiontype=decisiontype,
    )
    prompt_block = ut.msgblock('USER_INPUT', prompt_str)
    ans = input(prompt_block).lower()
    if ans in ans_list_embed:
        ut.embed()
        #print(ibs2.get_dbinfo_str())
        #qreq_ = ut.search_stack_for_localvar('qreq_')
        #qreq_.normalizer
    elif ans in ans_list_no:
        return False
    else:
        return True
예제 #24
0
def convert_ggr2018_to_wbia(ggr_path,
                            dbdir=None,
                            purge=True,
                            dry_run=False,
                            apply_updates=True,
                            **kwargs):
    r"""Convert the raw GGR2 (2018) data to an wbia database.

    Args
        ggr_path (str): Directory to folder *containing* raw GGR 2018 data
        dbdir (str): Output directory

    CommandLine:
        python -m wbia convert_ggr2018_to_wbia

    Example:
        >>> # SCRIPT
        >>> from wbia.dbio.ingest_ggr import *  # NOQA
        >>> default_ggr_path = join('/', 'data', 'wbia', 'GGR2', 'GGR2018data')
        >>> default_dbdir = join('/', 'data', 'wbia', 'GGR2-IBEIS')
        >>> dbdir = ut.get_argval('--dbdir', type_=str, default=default_dbdir)
        >>> ggr_path = ut.get_argval('--ggr', type_=str, default=default_ggr_path)
        >>> result = convert_ggr2018_to_wbia(ggr_path, dbdir=dbdir, purge=False, dry_run=True, apply_updates=False)
        >>> print(result)
    """
    ALLOWED_NUMBERS = list(range(1, 250))
    ALLOWED_LETTERS = ['A', 'B', 'C', 'D', 'E', 'F']

    ################################################################################

    if apply_updates:
        _fix_ggr2018_directory_structure(ggr_path)

    ################################################################################

    blacklist_filepath_set = set([
        join(ggr_path, 'Cameras info.numbers'),
        join(ggr_path, 'Cameras info.xlsx'),
        join(ggr_path, 'GGR_photos_MRC_29.1.18.ods'),
        join(ggr_path, 'Cameras info-2.numbers'),
    ])

    # Check root files
    direct = Directory(ggr_path)
    for filepath in direct.files(recursive=False):
        try:
            assert filepath in blacklist_filepath_set
            ut.delete(filepath)
        except AssertionError:
            logger.info('Unresolved root file found in %r' % (filepath, ))
            continue

    ################################################################################

    if purge:
        ut.delete(dbdir)
    ibs = wbia.opendb(dbdir=dbdir)

    ################################################################################

    # Check folder structure
    assert exists(ggr_path)
    direct = Directory(ggr_path, recursive=0)
    direct1_list = direct.directories()
    direct1_list.sort(key=lambda x: int(x.base()), reverse=False)
    for direct1 in direct1_list:
        if not dry_run:
            logger.info('Processing directory: %r' % (direct1, ))
        base1 = direct1.base()

        try:
            int(base1)
        except ValueError:
            logger.info('Error found in %r' % (direct1, ))
            continue

        try:
            assert len(direct1.files(recursive=False)) == 0
        except AssertionError:
            logger.info('Files found in %r' % (direct1, ))
            continue

        seen_letter_list = []
        direct1_ = Directory(direct1.absolute_directory_path, recursive=0)
        direct2_list = direct1_.directories()
        direct2_list.sort(key=lambda x: x.base(), reverse=False)
        for direct2 in direct2_list:
            base2 = direct2.base()

            try:
                assert base2.startswith(base1)
            except AssertionError:
                logger.info('Folder name heredity conflict %r with %r' %
                            (direct2, direct1))
                continue

            try:
                assert len(base2) >= 2
                assert ' ' not in base2
                number = base2[:-1]
                letter = base2[-1]
                number = int(number)
                letter = letter.upper()
                assert number in ALLOWED_NUMBERS
                assert letter in ALLOWED_LETTERS
                seen_letter_list.append(letter)
            except ValueError:
                logger.info('Error found in %r' % (direct2, ))
                continue
            except AssertionError:
                logger.info('Folder name format error found in %r' %
                            (direct2, ))
                continue

            direct2_ = Directory(direct2.absolute_directory_path,
                                 recursive=True,
                                 images=True)
            try:
                assert len(direct2_.directories()) == 0
            except AssertionError:
                logger.info('Folders exist in file only level %r' %
                            (direct2, ))
                continue

            filepath_list = sorted(direct2_.files())

            if not dry_run:
                try:
                    gid_list = ibs.add_images(filepath_list)
                    gid_list = ut.filter_Nones(gid_list)
                    gid_list = sorted(list(set(gid_list)))

                    imageset_text = 'GGR2,%d,%s' % (number, letter)
                    note_list = [
                        '%s,%05d' % (imageset_text, index + 1)
                        for index, gid in enumerate(gid_list)
                    ]
                    ibs.set_image_notes(gid_list, note_list)
                    ibs.set_image_imagesettext(gid_list,
                                               [imageset_text] * len(gid_list))
                except Exception as ex:  # NOQA
                    ut.embed()

        seen_letter_set = set(seen_letter_list)
        try:
            assert len(seen_letter_set) == len(seen_letter_list)
        except AssertionError:
            logger.info('Duplicate letters in %r with letters %r' %
                        (direct1, seen_letter_list))
            continue

        try:
            assert 'A' in seen_letter_set
        except AssertionError:
            logger.info('WARNING: A camera not found in %r' % (direct1, ))
            continue

    return ibs
예제 #25
0
def compute_data_gamma_(idx2_daid, wx2_rvecs, wx2_aids, wx2_idf,
                        alpha=3, thresh=0):
    """
    Computes gamma normalization scalar for the database annotations
    Internals step4
    >>> from ibeis.model.hots.smk.smk_index import *  # NOQA
    >>> from ibeis.model.hots.smk import smk_debug
    >>> ibs, annots_df, invindex, wx2_idxs, wx2_idf, wx2_rvecs, wx2_aids = smk_debug.testdata_raw_internals2()
    >>> alpha = ibs.cfg.query_cfg.smk_cfg.alpha
    >>> thresh = ibs.cfg.query_cfg.smk_cfg.thresh
    >>> idx2_daid  = invindex.idx2_daid
    >>> wx2_idf = wx2_idf
    >>> daids      = invindex.daids
    >>> use_cache  = USE_CACHE_GAMMA and False
    >>> daid2_gamma = compute_data_gamma_(idx2_daid, wx2_rvecs, wx2_aids, wx2_idf, daids, use_cache=use_cache)
    """
    if utool.DEBUG2:
        from ibeis.model.hots.smk import smk_debug
        smk_debug.rrr()
        smk_debug.check_wx2(wx2_rvecs=wx2_rvecs, wx2_aids=wx2_aids)
    wx_sublist = pdh.ensure_values(pdh.ensure_index(wx2_rvecs))
    if utool.VERBOSE:
        print('[smk_index] Compute Gamma alpha=%r, thresh=%r: ' % (alpha, thresh))
        mark1, end1_ = utool.log_progress(
            '[smk_index] Gamma group (by word): ', len(wx_sublist),
            flushfreq=100, writefreq=50, with_totaltime=True)
    # Get list of aids and rvecs w.r.t. words
    aids_list = pdh.ensure_values_subset(wx2_aids, wx_sublist)
    rvecs_list1 = pdh.ensure_values_subset(wx2_rvecs, wx_sublist)
    # Group by daids first and then by word index
    daid2_wx2_drvecs = utool.ddict(lambda: utool.ddict(list))
    for wx, aids, rvecs in zip(wx_sublist, aids_list, rvecs_list1):
        group_aids, groupxs = clustertool.group_indicies(aids)
        rvecs_group = clustertool.apply_grouping(rvecs, groupxs)  # 2.9 ms
        for aid, rvecs_ in zip(group_aids, rvecs_group):
            daid2_wx2_drvecs[aid][wx] = rvecs_

    if utool.VERBOSE:
        end1_()

    # For every daid, compute its gamma using pregrouped rvecs
    # Summation over words for each aid
    if utool.VERBOSE:
        mark2, end2_ = utool.log_progress(
            '[smk_index] Gamma Sum (over daid): ', len(daid2_wx2_drvecs),
            flushfreq=100, writefreq=25, with_totaltime=True)
    # Get lists w.r.t daids
    aid_list          = list(daid2_wx2_drvecs.keys())
    # list of mappings from words to rvecs foreach daid
    # [wx2_aidrvecs_1, ..., wx2_aidrvecs_nDaids,]
    _wx2_aidrvecs_list = list(daid2_wx2_drvecs.values())
    _aidwxs_iter    = (list(wx2_aidrvecs.keys()) for wx2_aidrvecs in _wx2_aidrvecs_list)
    aidrvecs_list  = [list(wx2_aidrvecs.values()) for wx2_aidrvecs in _wx2_aidrvecs_list]
    aididf_list = [[wx2_idf[wx] for wx in aidwxs] for aidwxs in _aidwxs_iter]

    #gamma_list = []
    if utool.DEBUG2:
        try:
            for count, (idf_list, rvecs_list) in enumerate(zip(aididf_list, aidrvecs_list)):
                assert len(idf_list) == len(rvecs_list), 'one list for each word'
                #gamma = smk_core.gamma_summation2(rvecs_list, idf_list, alpha, thresh)
        except Exception as ex:
            utool.printex(ex)
            utool.embed()
            raise
    gamma_list = [smk_core.gamma_summation2(rvecs_list, idf_list, alpha, thresh)
                  for idf_list, rvecs_list in zip(aididf_list, aidrvecs_list)]

    if WITH_PANDAS:
        daid2_gamma = pdh.IntSeries(gamma_list, index=aid_list, name='gamma')
    else:
        daid2_gamma = dict(zip(aid_list, gamma_list))
    if utool.VERBOSE:
        end2_()

    return daid2_gamma
예제 #26
0
 def dev_embed(ibs=ibs, aid=aid, config2_=config2_):
     ut.embed()
     pass
예제 #27
0
파일: _grave.py 프로젝트: obaiga/hesaff
def arrptr_to_np_OLD(c_arrptr, shape, arr_t, dtype):
    """
    Casts an array pointer from C to numpy

    Args:
        c_arrptr (uint64): a pointer to an array returned from C
        shape (tuple): shape of the underlying array being pointed to
        arr_t (PyCSimpleType): the ctypes datatype of c_arrptr
        dtype (dtype): numpy datatype the array will be to cast into

    CommandLine:
        python2 -m pyhesaff._pyhesaff --test-detect_feats_list:0 --rebuild-hesaff
        python2 -m pyhesaff._pyhesaff --test-detect_feats_list:0
        python3 -m pyhesaff._pyhesaff --test-detect_feats_list:0

    """
    try:
        byte_t = ctypes.c_char
        itemsize_ = dtype().itemsize
        #import utool
        #utool.printvar2('itemsize_')
        ###---------
        #dtype_t1 = C.c_voidp * itemsize_
        #dtype_ptr_t1 = C.POINTER(dtype_t1)  # size of each item
        #dtype_ptr_t = dtype_ptr_t1
        ###---------
        if True or six.PY2:
            # datatype of array elements
            dtype_t = byte_t * itemsize_
            dtype_ptr_t = C.POINTER(dtype_t)  # size of each item
            #typed_c_arrptr = c_arrptr.astype(C.c_long)
            typed_c_arrptr = c_arrptr.astype(int)
            c_arr = C.cast(typed_c_arrptr, dtype_ptr_t)  # cast to ctypes
            #raise Exception('fuuu. Why does 2.7 work? Why does 3.4 not!?!!!')
        else:
            dtype_t = C.c_char * itemsize_
            dtype_ptr_t = C.POINTER(dtype_t)  # size of each item
            #typed_c_arrptr = c_arrptr.astype(int)
            #typed_c_arrptr = c_arrptr.astype(C.c_size_t)
            typed_c_arrptr = c_arrptr.astype(int)
            c_arr = C.cast(c_arrptr.astype(C.c_size_t),
                           dtype_ptr_t)  # cast to ctypes
            c_arr = C.cast(c_arrptr.astype(int), dtype_ptr_t)  # cast to ctypes
            c_arr = C.cast(c_arrptr, dtype_ptr_t)  # cast to ctypes
            #typed_c_arrptr = c_arrptr.astype(int)
            #, order='C', casting='safe')
            #utool.embed()
            #typed_c_arrptr = c_arrptr.astype(dtype_t)
            #typed_c_arrptr = c_arrptr.astype(ptr_t2)
            #typed_c_arrptr = c_arrptr.astype(C.c_uint8)
            #typed_c_arrptr = c_arrptr.astype(C.c_void_p)
            #typed_c_arrptr = c_arrptr.astype(C.c_int)
            #typed_c_arrptr = c_arrptr.astype(C.c_char)  # WORKS BUT WRONG
            #typed_c_arrptr = c_arrptr.astype(bytes)  # WORKS BUT WRONG
            #typed_c_arrptr = c_arrptr.astype(int)
            #typed_c_arrptr = c_arrptr
            #typed_c_arrptr = c_arrptr.astype(np.int64)
            #typed_c_arrptr = c_arrptr.astype(int)
            """
            ctypes.cast(arg1, arg2)

            Input:
                arg1 - a ctypes object that is or can be converted to a pointer
                       of some kind
                arg2 - a ctypes pointer type.
            Output:
                 It returns an instance of the second argument, which references
                 the same memory block as the first argument
            """
            c_arr = C.cast(typed_c_arrptr, dtype_ptr_t)  # cast to ctypes
        np_arr = np.ctypeslib.as_array(c_arr, shape)  # cast to numpy
        np_arr.dtype = dtype  # fix numpy dtype
    except Exception as ex:
        import utool as ut
        #utool.embed()
        varnames = sorted(list(locals().keys()))
        vartypes = [(type, name) for name in varnames]
        spaces = [None for name in varnames]
        c_arrptr_dtype = c_arrptr.dtype  # NOQA
        #key_list = list(zip(varnames, vartypes, spaces))
        key_list = ['c_arrptr_dtype'
                    ] + 'c_arrptr, shape, arr_t, dtype'.split(', ')
        print('itemsize(float) = %r' % np.dtype(float).itemsize)
        print('itemsize(c_char) = %r' % np.dtype(C.c_char).itemsize)
        print('itemsize(c_wchar) = %r' % np.dtype(C.c_wchar).itemsize)
        print('itemsize(c_char_p) = %r' % np.dtype(C.c_char_p).itemsize)
        print('itemsize(c_wchar_p) = %r' % np.dtype(C.c_wchar_p).itemsize)
        print('itemsize(c_int) = %r' % np.dtype(C.c_int).itemsize)
        print('itemsize(c_int32) = %r' % np.dtype(C.c_int32).itemsize)
        print('itemsize(c_int64) = %r' % np.dtype(C.c_int64).itemsize)
        print('itemsize(int) = %r' % np.dtype(int).itemsize)
        print('itemsize(float32) = %r' % np.dtype(np.float32).itemsize)
        print('itemsize(float64) = %r' % np.dtype(np.float64).itemsize)
        ut.printex(ex, keys=key_list)
        ut.embed()
        raise
    return np_arr
예제 #28
0
def compute_vocab(depc, fid_list, config):
    r"""
    Depcache method for computing a new visual vocab

    CommandLine:
        python -m wbia.core_annots --exec-compute_neighbor_index --show
        python -m wbia show_depc_annot_table_input --show --tablename=neighbor_index

        python -m wbia.algo.smk.vocab_indexer --exec-compute_vocab:0
        python -m wbia.algo.smk.vocab_indexer --exec-compute_vocab:1

        # FIXME make util_tests register
        python -m wbia.algo.smk.vocab_indexer compute_vocab:0

    Ignore:
        >>> # Lev Oxford Debug Example
        >>> import wbia
        >>> ibs = wbia.opendb('Oxford')
        >>> depc = ibs.depc
        >>> table = depc['vocab']
        >>> # Check what currently exists in vocab table
        >>> table.print_configs()
        >>> table.print_table()
        >>> table.print_internal_info()
        >>> # Grab aids used to compute vocab
        >>> from wbia.expt.experiment_helpers import get_annotcfg_list
        >>> expanded_aids_list = get_annotcfg_list(ibs, ['oxford'])[1]
        >>> qaids, daids = expanded_aids_list[0]
        >>> vocab_aids = daids
        >>> config = {'num_words': 64000}
        >>> exists = depc.check_rowids('vocab', [vocab_aids], config=config)
        >>> print('exists = %r' % (exists,))
        >>> vocab_rowid = depc.get_rowids('vocab', [vocab_aids], config=config)[0]
        >>> print('vocab_rowid = %r' % (vocab_rowid,))
        >>> vocab = table.get_row_data([vocab_rowid], 'words')[0]
        >>> print('vocab = %r' % (vocab,))

    Example:
        >>> # DISABLE_DOCTEST
        >>> from wbia.algo.smk.vocab_indexer import *  # NOQA
        >>> # Test depcache access
        >>> import wbia
        >>> ibs, aid_list = wbia.testdata_aids('testdb1')
        >>> depc = ibs.depc_annot
        >>> input_tuple = [aid_list]
        >>> rowid_kw = {}
        >>> tablename = 'vocab'
        >>> vocabid_list = depc.get_rowids(tablename, input_tuple, **rowid_kw)
        >>> vocab = depc.get(tablename, input_tuple, 'words')[0]
        >>> assert vocab.wordflann is not None
        >>> assert vocab.wordflann._FLANN__curindex_data is not None
        >>> assert vocab.wordflann._FLANN__curindex_data is vocab.wx_to_word

    Example:
        >>> # DISABLE_DOCTEST
        >>> from wbia.algo.smk.vocab_indexer import *  # NOQA
        >>> import wbia
        >>> ibs, aid_list = wbia.testdata_aids('testdb1')
        >>> depc = ibs.depc_annot
        >>> fid_list = depc.get_rowids('feat', aid_list)
        >>> config = VocabConfig()
        >>> vocab, train_vecs = ut.exec_func_src(compute_vocab, keys=['vocab', 'train_vecs'])
        >>> idx_to_vec = depc.d.get_feat_vecs(aid_list)[0]
        >>> self = vocab
        >>> ut.quit_if_noshow()
        >>> data = train_vecs
        >>> centroids = vocab.wx_to_word
        >>> import wbia.plottool as pt
        >>> vt.plot_centroids(data, centroids, num_pca_dims=2)
        >>> ut.show_if_requested()
        >>> #config = ibs.depc_annot['vocab'].configclass()

    """
    logger.info('[IBEIS] COMPUTE_VOCAB:')
    vecs_list = depc.get_native('feat', fid_list, 'vecs')
    train_vecs = np.vstack(vecs_list).astype(np.float32)
    num_words = config['num_words']
    logger.info(
        '[smk_index] Train Vocab(nWords=%d) using %d annots and %d descriptors'
        % (num_words, len(fid_list), len(train_vecs)))
    if config['algorithm'] == 'kdtree':
        flann_params = vt.get_flann_params(random_seed=42)
        kwds = dict(max_iters=20, flann_params=flann_params)
        words = vt.akmeans(train_vecs, num_words, **kwds)
    elif config['algorithm'] == 'minibatch':
        logger.info('Using minibatch kmeans')
        import sklearn.cluster

        rng = np.random.RandomState(config['random_seed'])
        n_init = config['n_init']
        with warnings.catch_warnings():
            warnings.simplefilter('ignore')
            init_size = int(num_words * 4)
            batch_size = 1000
            n_batches = ut.get_num_chunks(train_vecs.shape[0], batch_size)
            minibatch_params = dict(
                n_clusters=num_words,
                init='k-means++',
                init_size=init_size,
                n_init=n_init,
                max_iter=30000 // n_batches,
                batch_size=batch_size,
                tol=0.0,
                max_no_improvement=10,
                reassignment_ratio=0.01,
            )
            logger.info('minibatch_params = %s' %
                        (ut.repr4(minibatch_params), ))
            clusterer = sklearn.cluster.MiniBatchKMeans(compute_labels=False,
                                                        random_state=rng,
                                                        verbose=2,
                                                        **minibatch_params)
            try:
                clusterer.fit(train_vecs)
            except (Exception, KeyboardInterrupt) as ex:
                ut.printex(ex, tb=True)
                if ut.is_developer():
                    ut.embed()
                else:
                    raise
        words = clusterer.cluster_centers_
        logger.info('Finished clustering')
    # if False:
    #     flann_params['checks'] = 64
    #     flann_params['trees'] = 4
    #     num_words = 128
    #     centroids = vt.initialize_centroids(num_words, train_vecs, 'akmeans++')
    #     words, hist = vt.akmeans_iterations(
    #         train_vecs, centroids, max_iters=1000, monitor=True,
    #         flann_params=flann_params)

    logger.info('Constructing vocab')
    vocab = VisualVocab(words)
    logger.info('Building vocab index')
    vocab.build()
    logger.info('Returning vocab')
    return (vocab, )
예제 #29
0
def test_vsone_errors(ibs, daids, qaid2_qres_vsmany, qaid2_qres_vsone,
                      incinfo):
    """
    ibs1 = ibs_gt
    ibs2 = ibs (the current test database, sorry for the backwardness)
    aid1_to_aid2 - maps annots from ibs1 to ibs2
    """
    WASH = 'wash'
    BOTH_FAIL = 'both_fail'
    SINGLETON = 'singleton'
    VSMANY_OUTPERFORMED = 'vsmany_outperformed'
    VSMANY_DOMINATES = 'vsmany_dominates'
    VSMANY_WINS = 'vsmany_wins'
    VSONE_WINS = 'vsone_wins'
    if 'testcases' not in incinfo:
        testcases = {}
        for case in [
                WASH, BOTH_FAIL, SINGLETON, VSMANY_OUTPERFORMED,
                VSMANY_DOMINATES, VSMANY_WINS, VSONE_WINS
        ]:
            testcases[case] = []
        incinfo['testcases'] = testcases
    testcases = incinfo['testcases']

    def append_case(case, testtup):
        print('APPENDED NEW TESTCASE: case=%r' % (case, ))
        print('* testup = %r' % (testtup, ))
        print('* vuuid = %r' %
              (ibs_gt.get_annot_visual_uuids(testtup.qaid_t), ))
        if ut.get_argflag('--interupt-case') and case in [
                VSMANY_WINS, VSMANY_DOMINATES
        ]:
            incinfo['interactive'] = True
            incinfo['use_oracle'] = False
            incinfo['STOP'] = True
            if ut.is_developer():
                import plottool as pt  # NOQA
                IPYTHON_COMMANDS = """
                >>> %pylab qt4
                >>> from ibeis.viz.interact import interact_matches  # NOQA
                >>> #qres_vsmany = ut.search_stack_for_localvar('qres_vsmany')
                >>> ibs        = ut.search_stack_for_localvar('ibs')
                >>> daids      = ut.search_stack_for_localvar('daids')
                >>> qnid_t     = ut.search_stack_for_localvar('qnid_t')
                >>> qres_vsone = ut.search_stack_for_localvar('qres_vsone')
                >>> all_nids_t = ut.search_stack_for_localvar('all_nids_t')
                >>> # Find index in daids of correct matches
                >>> cm = qres_vsone
                >>> correct_indices = np.where(np.array(all_nids_t) == qnid_t)[0]
                >>> correct_aids2 = ut.take(daids, correct_indices)
                >>> qaid = cm.qaid
                >>> aid = correct_aids2[0]
                >>> # Report visual uuid for inclusion or exclusion in script
                >>> print(ibs.get_annot_visual_uuids([qaid, aid]))

                >>> # Feature match things
                >>> print('cm.filtkey_list = %r' % (cm.filtkey_list,))
                >>> fm  = cm.aid2_fm[aid]
                >>> fs  = cm.aid2_fs[aid]
                >>> fsv = cm.aid2_fsv[aid]
                >>> mx = 2
                >>> qfx, dfx = fm[mx]
                >>> fsv_single = fsv[mx]
                >>> fs_single = fs[mx]
                >>> # check featweights
                >>> data_featweights = ibs.get_annot_fgweights([aid])[0]
                >>> data_featweights[dfx]
                >>> fnum = pt.next_fnum()
                >>> bad_aid = cm.get_top_aids()[0]
                >>> #match_interaction_good = interact_matches.MatchInteraction(ibs, cm, aid, annot_mode=1)
                >>> #match_interaction_bad = interact_matches.MatchInteraction(ibs, cm, bad_aid)
                >>> match_interaction_good = cm.ishow_matches(ibs, aid, annot_mode=1, fnum=1)
                >>> match_interaction_bad = cm.ishow_matches(ibs, bad_aid, annot_mode=1, fnum=2)
                >>> match_interaction = match_interaction_good
                >>> self = match_interaction
                >>> self.select_ith_match(mx)
                >>> #impossible_to_match = len(correct_indices) > 0
                """
                y = """
                >>> from os.path import exists
                >>> import vtool as vt
                >>> import vtool.patch as vtpatch
                >>> import vtool.image as vtimage  # NOQA
                >>> chip_list = ibs.get_annot_chips([aid])
                >>> kpts_list = ibs.get_annot_kpts([aid])
                >>> probchip_fpath_list = ibs.get_probchip_fpath(aid)
                >>> probchip_list = [vt.imread(fpath, grayscale=True) if exists(fpath) else None for fpath in probchip_fpath_list]
                >>> kpts  = kpts_list[0]
                >>> probchip = probchip_list[0]
                >>> kp = kpts[dfx]
                >>> patch  = vt.get_warped_patch(probchip, kp)[0].astype(np.float32) / 255.0
                >>> fnum2 = pt.next_fnum()
                >>> pt.figure(fnum2, pnum=(1, 2, 1), doclf=True, docla=True)
                >>> pt.imshow(probchip)
                >>> pt.draw_kpts2([kp])
                >>> pt.figure(fnum2, pnum=(1, 2, 2))
                >>> pt.imshow(patch * 255)
                >>> pt.update()
                >>> vt.gaussian_average_patch(patch)
                >>> cm.ishow_top(ibs, annot_mode=1)
                """
                y
                ut.set_clipboard(IPYTHON_COMMANDS)
                #ut.spawn_delayed_ipython_paste()
                ut.embed(remove_pyqt_hook=False)
                IPYTHON_COMMANDS
예제 #30
0
 def dev_embed(ibs=ibs, aid1=aid1, aid2=aid2, cm=cm, qreq_=qreq_):
     ut.embed()
예제 #31
0
파일: dev.py 프로젝트: Emily-Ke/houston
def embed(context):
    import utool as ut

    ut.embed()
예제 #32
0
def myquery():
    r"""

    BUG::
        THERE IS A BUG SOMEWHERE: HOW IS THIS POSSIBLE?
        if everything is weightd ) how di the true positive even get a score
        while the true negative did not
        qres_copy.filtkey_list = ['ratio', 'fg', 'homogerr', 'distinctiveness']
        CORRECT STATS
        {
            'max'  : [0.832, 0.968, 0.604, 0.000],
            'min'  : [0.376, 0.524, 0.000, 0.000],
            'mean' : [0.561, 0.924, 0.217, 0.000],
            'std'  : [0.114, 0.072, 0.205, 0.000],
            'nMin' : [1, 1, 1, 51],
            'nMax' : [1, 1, 1, 1],
            'shape': (52, 4),
        }
        INCORRECT STATS
        {
            'max'  : [0.759, 0.963, 0.264, 0.000],
            'min'  : [0.379, 0.823, 0.000, 0.000],
            'mean' : [0.506, 0.915, 0.056, 0.000],
            'std'  : [0.125, 0.039, 0.078, 0.000],
            'nMin' : [1, 1, 1, 24],
            'nMax' : [1, 1, 1, 1],
            'shape': (26, 4),
        #   score_diff,  tp_score,  tn_score,       p,   K,  dcvs_clip_max,  fg_power,  homogerr_power
             0.494,     0.494,     0.000,  73.000,   2,          0.500,     0.100,          10.000

    see how seperability changes as we very things

    CommandLine:
        python -m ibeis.algo.hots.devcases --test-myquery
        python -m ibeis.algo.hots.devcases --test-myquery --show --index 0
        python -m ibeis.algo.hots.devcases --test-myquery --show --index 1
        python -m ibeis.algo.hots.devcases --test-myquery --show --index 2

    References:
        http://en.wikipedia.org/wiki/Pareto_distribution <- look into

    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis.all_imports import *  # NOQA
        >>> from ibeis.algo.hots.devcases import *  # NOQA
        >>> ut.dev_ipython_copypaster(myquery) if ut.inIPython() else myquery()
        >>> pt.show_if_requested()
    """
    from ibeis.algo.hots import special_query  # NOQA
    from ibeis.algo.hots import distinctiveness_normalizer  # NOQA
    from ibeis import viz  # NOQA
    import plottool as pt
    index = ut.get_argval('--index', int, 0)
    ibs, aid1, aid2, tn_aid = testdata_my_exmaples(index)
    qaids = [aid1]
    daids = [aid2] + [tn_aid]
    qvuuid = ibs.get_annot_visual_uuids(aid1)

    cfgdict_vsone = dict(
        sv_on=True,
        #sv_on=False,
        #codename='vsone_unnorm_dist_ratio_extern_distinctiveness',
        codename='vsone_unnorm_ratio_extern_distinctiveness',
        sver_output_weighting=True,
    )

    use_cache = False
    save_qcache = False

    qres_list, qreq_ = ibs.query_chips(qaids,
                                       daids,
                                       cfgdict=cfgdict_vsone,
                                       return_request=True,
                                       use_cache=use_cache,
                                       save_qcache=save_qcache,
                                       verbose=True)

    qreq_.load_distinctiveness_normalizer()
    qres = qres_list[0]
    top_aids = qres.get_top_aids()  # NOQA
    qres_orig = qres  # NOQA

    def test_config(qreq_, qres_orig, cfgdict):
        """ function to grid search over """
        qres_copy = copy.deepcopy(qres_orig)
        qreq_vsone_ = qreq_
        qres_vsone = qres_copy
        filtkey = hstypes.FiltKeys.DISTINCTIVENESS
        newfsv_list, newscore_aids = special_query.get_extern_distinctiveness(
            qreq_, qres_copy, **cfgdict)
        special_query.apply_new_qres_filter_scores(qreq_vsone_, qres_vsone,
                                                   newfsv_list, newscore_aids,
                                                   filtkey)
        tp_score = qres_copy.aid2_score[aid2]
        tn_score = qres_copy.aid2_score[tn_aid]
        return qres_copy, tp_score, tn_score

    #[.01, .1, .2, .5, .6, .7, .8, .9, 1.0]),
    #FiltKeys = hstypes.FiltKeys
    # FIXME: Use other way of doing gridsearch
    grid_basis = distinctiveness_normalizer.DCVS_DEFAULT.get_grid_basis()
    gridsearch = ut.GridSearch(grid_basis, label='qvuuid=%r' % (qvuuid, ))
    print('Begin Grid Search')
    for cfgdict in ut.ProgressIter(gridsearch, lbl='GridSearch'):
        qres_copy, tp_score, tn_score = test_config(qreq_, qres_orig, cfgdict)
        gridsearch.append_result(tp_score, tn_score)
    print('Finish Grid Search')

    # Get best result
    best_cfgdict = gridsearch.get_rank_cfgdict()
    qres_copy, tp_score, tn_score = test_config(qreq_, qres_orig, best_cfgdict)

    # Examine closely what you can do with scores
    if False:
        qres_copy = copy.deepcopy(qres_orig)
        qreq_vsone_ = qreq_
        filtkey = hstypes.FiltKeys.DISTINCTIVENESS
        newfsv_list, newscore_aids = special_query.get_extern_distinctiveness(
            qreq_, qres_copy, **cfgdict)
        ut.embed()

        def make_cm_very_old_tuple(qres_copy):
            assert ut.listfind(qres_copy.filtkey_list, filtkey) is None
            weight_filters = hstypes.WEIGHT_FILTERS
            weight_filtxs, nonweight_filtxs = special_query.index_partition(
                qres_copy.filtkey_list, weight_filters)

            aid2_fsv = {}
            aid2_fs = {}
            aid2_score = {}

            for new_fsv_vsone, daid in zip(newfsv_list, newscore_aids):
                pass
                break
                #scorex_vsone  = ut.listfind(qres_copy.filtkey_list, filtkey)
                #if scorex_vsone is None:
                # TODO: add spatial verification as a filter score
                # augment the vsone scores
                # TODO: paramaterize
                weighted_ave_score = True
                if weighted_ave_score:
                    # weighted average scoring
                    new_fs_vsone = special_query.weighted_average_scoring(
                        new_fsv_vsone, weight_filtxs, nonweight_filtxs)
                else:
                    # product scoring
                    new_fs_vsone = special_query.product_scoring(new_fsv_vsone)
                new_score_vsone = new_fs_vsone.sum()
                aid2_fsv[daid] = new_fsv_vsone
                aid2_fs[daid] = new_fs_vsone
                aid2_score[daid] = new_score_vsone
            return aid2_fsv, aid2_fs, aid2_score

        # Look at plot of query products
        for new_fsv_vsone, daid in zip(newfsv_list, newscore_aids):
            new_fs_vsone = special_query.product_scoring(new_fsv_vsone)
            scores_list = np.array(new_fs_vsone)[:, None].T
            pt.plot_sorted_scores(scores_list,
                                  logscale=False,
                                  figtitle=str(daid))
        pt.iup()
        special_query.apply_new_qres_filter_scores(qreq_vsone_, qres_copy,
                                                   newfsv_list, newscore_aids,
                                                   filtkey)

    # PRINT INFO
    import functools
    #ut.rrrr()
    get_stats_str = functools.partial(ut.get_stats_str,
                                      axis=0,
                                      newlines=True,
                                      precision=3)
    tp_stats_str = ut.align(get_stats_str(qres_copy.aid2_fsv[aid2]), ':')
    tn_stats_str = ut.align(get_stats_str(qres_copy.aid2_fsv[tn_aid]), ':')
    info_str_list = []
    info_str_list.append('qres_copy.filtkey_list = %r' %
                         (qres_copy.filtkey_list, ))
    info_str_list.append('CORRECT STATS')
    info_str_list.append(tp_stats_str)
    info_str_list.append('INCORRECT STATS')
    info_str_list.append(tn_stats_str)
    info_str = '\n'.join(info_str_list)
    print(info_str)

    # SHOW BEST RESULT
    #qres_copy.ishow_top(ibs, fnum=pt.next_fnum())
    #qres_orig.ishow_top(ibs, fnum=pt.next_fnum())

    # Text Informatio
    param_lbl = 'dcvs_power'
    param_stats_str = gridsearch.get_dimension_stats_str(param_lbl)
    print(param_stats_str)

    csvtext = gridsearch.get_csv_results(10)
    print(csvtext)

    # Paramter visuzliation
    fnum = pt.next_fnum()
    # plot paramter influence
    param_label_list = gridsearch.get_param_lbls()
    pnum_ = pt.get_pnum_func(2, len(param_label_list))
    for px, param_label in enumerate(param_label_list):
        gridsearch.plot_dimension(param_label, fnum=fnum, pnum=pnum_(px))
    # plot match figure
    pnum2_ = pt.get_pnum_func(2, 2)
    qres_copy.show_matches(ibs, aid2, fnum=fnum, pnum=pnum2_(2))
    qres_copy.show_matches(ibs, tn_aid, fnum=fnum, pnum=pnum2_(3))
    # Add figure labels
    figtitle = 'Effect of parameters on vsone separation for a single case'
    subtitle = 'qvuuid = %r' % (qvuuid)
    figtitle += '\n' + subtitle
    pt.set_figtitle(figtitle)
    # Save Figure
    #fig_fpath = pt.save_figure(usetitle=True)
    #print(fig_fpath)
    # Write CSV Results
    #csv_fpath = fig_fpath + '.csv.txt'
    #ut.write_to(csv_fpath, csvtext)

    #qres_copy.ishow_top(ibs)
    #from matplotlib import pyplot as plt
    #plt.show()
    #print(ut.list_str()))
    # TODO: plot max variation dims
    #import plottool as pt
    #pt.plot(p_list, diff_list)
    """
def augment_nnindexer_experiment():
    """

    References:
        http://answers.opencv.org/question/44592/flann-index-training-fails-with-segfault/

    CommandLine:
        utprof.py -m wbia.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment
        python -m wbia.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment

        python -m wbia.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment --db PZ_MTEST --diskshow --adjust=.1 --save "augment_experiment_{db}.png" --dpath='.' --dpi=180 --figsize=9,6
        python -m wbia.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment --db PZ_Master0 --diskshow --adjust=.1 --save "augment_experiment_{db}.png" --dpath='.' --dpi=180 --figsize=9,6 --nosave-flann --show
        python -m wbia.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment --db PZ_Master0 --diskshow --adjust=.1 --save "augment_experiment_{db}.png" --dpath='.' --dpi=180 --figsize=9,6 --nosave-flann --show


        python -m wbia.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment --db PZ_Master0 --diskshow --adjust=.1 --save "augment_experiment_{db}.png" --dpath='.' --dpi=180 --figsize=9,6 --nosave-flann --no-api-cache --nocache-uuids

        python -m wbia.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment --db PZ_MTEST --show
        python -m wbia.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment --db PZ_Master0 --show

        # RUNS THE SEGFAULTING CASE
        python -m wbia.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment --db PZ_Master0 --show
        # Debug it
        gdb python
        run -m wbia.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment --db PZ_Master0 --show
        gdb python
        run -m wbia.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment --db PZ_Master0 --diskshow --adjust=.1 --save "augment_experiment_{db}.png" --dpath='.' --dpi=180 --figsize=9,6


    Example:
        >>> # DISABLE_DOCTEST
        >>> from wbia.algo.hots._neighbor_experiment import *  # NOQA
        >>> # execute function
        >>> augment_nnindexer_experiment()
        >>> # verify results
        >>> ut.show_if_requested()

    """
    import wbia

    # build test data
    # ibs = wbia.opendb('PZ_MTEST')
    ibs = wbia.opendb(defaultdb='PZ_Master0')
    if ibs.get_dbname() == 'PZ_MTEST':
        initial = 1
        addition_stride = 4
        max_ceiling = 100
    elif ibs.get_dbname() == 'PZ_Master0':
        initial = 128
        # addition_stride = 64
        # addition_stride = 128
        addition_stride = 256
        max_ceiling = 10000
        # max_ceiling = 4000
        # max_ceiling = 2000
        # max_ceiling = 600
    else:
        assert False
    all_daids = ibs.get_valid_aids(species='zebra_plains')
    qreq_ = ibs.new_query_request(all_daids, all_daids)
    max_num = min(max_ceiling, len(all_daids))

    # Clear Caches
    ibs.delete_flann_cachedir()
    neighbor_index_cache.clear_memcache()
    neighbor_index_cache.clear_uuid_cache(qreq_)

    # Setup
    all_randomize_daids_ = ut.deterministic_shuffle(all_daids[:])
    # ensure all features are computed

    nnindexer_list = []
    addition_lbl = 'Addition'
    _addition_iter = list(range(initial + 1, max_num, addition_stride))
    addition_iter = iter(
        ut.ProgressIter(_addition_iter,
                        lbl=addition_lbl,
                        freq=1,
                        autoadjust=False))
    time_list_addition = []
    # time_list_reindex = []
    addition_count_list = []
    tmp_cfgstr_list = []

    # for _ in range(80):
    #    next(addition_iter)
    try:
        memtrack = ut.MemoryTracker(disable=False)
        for count in addition_iter:
            aid_list_ = all_randomize_daids_[0:count]
            # Request an indexer which could be an augmented version of an existing indexer.
            with ut.Timer(verbose=False) as t:
                memtrack.report('BEFORE AUGMENT')
                nnindexer_ = neighbor_index_cache.request_augmented_wbia_nnindexer(
                    qreq_, aid_list_)
                memtrack.report('AFTER AUGMENT')
            nnindexer_list.append(nnindexer_)
            addition_count_list.append(count)
            time_list_addition.append(t.ellapsed)
            tmp_cfgstr_list.append(nnindexer_.cfgstr)
            logger.info('===============\n\n')
        logger.info(ut.repr2(time_list_addition))
        logger.info(ut.repr2(list(map(id, nnindexer_list))))
        logger.info(ut.repr2(tmp_cfgstr_list))
        logger.info(
            ut.repr2(list([nnindxer.cfgstr for nnindxer in nnindexer_list])))

        IS_SMALL = False

        if IS_SMALL:
            nnindexer_list = []
        reindex_label = 'Reindex'
        # go backwards for reindex
        _reindex_iter = list(range(initial + 1, max_num,
                                   addition_stride))[::-1]
        reindex_iter = ut.ProgressIter(_reindex_iter, lbl=reindex_label)
        time_list_reindex = []
        # time_list_reindex = []
        reindex_count_list = []

        for count in reindex_iter:
            logger.info('\n+===PREDONE====================\n')
            # check only a single size for memory leaks
            # count = max_num // 16 + ((x % 6) * 1)
            # x += 1

            aid_list_ = all_randomize_daids_[0:count]
            # Call the same code, but force rebuilds
            memtrack.report('BEFORE REINDEX')
            with ut.Timer(verbose=False) as t:
                nnindexer_ = neighbor_index_cache.request_augmented_wbia_nnindexer(
                    qreq_, aid_list_, force_rebuild=True, memtrack=memtrack)
            memtrack.report('AFTER REINDEX')
            ibs.print_cachestats_str()
            logger.info('[nnindex.MEMCACHE] size(NEIGHBOR_CACHE) = %s' %
                        (ut.get_object_size_str(
                            neighbor_index_cache.NEIGHBOR_CACHE.items()), ))
            logger.info('[nnindex.MEMCACHE] len(NEIGHBOR_CACHE) = %s' %
                        (len(neighbor_index_cache.NEIGHBOR_CACHE.items()), ))
            logger.info('[nnindex.MEMCACHE] size(UUID_MAP_CACHE) = %s' %
                        (ut.get_object_size_str(
                            neighbor_index_cache.UUID_MAP_CACHE), ))
            logger.info('totalsize(nnindexer) = ' +
                        ut.get_object_size_str(nnindexer_))
            memtrack.report_type(neighbor_index_cache.NeighborIndex)
            ut.print_object_size_tree(nnindexer_, lbl='nnindexer_')
            if IS_SMALL:
                nnindexer_list.append(nnindexer_)
            reindex_count_list.append(count)
            time_list_reindex.append(t.ellapsed)
            # import cv2
            # import matplotlib as mpl
            # logger.info(mem_top.mem_top(limit=30, width=120,
            #                      #exclude_refs=[cv2.__dict__, mpl.__dict__]
            #     ))
            logger.info('L___________________\n\n\n')
        logger.info(ut.repr2(time_list_reindex))
        if IS_SMALL:
            logger.info(ut.repr2(list(map(id, nnindexer_list))))
            logger.info(
                ut.repr2(list([nnindxer.cfgstr
                               for nnindxer in nnindexer_list])))
    except KeyboardInterrupt:
        logger.info('\n[train] Caught CRTL+C')
        resolution = ''
        from six.moves import input

        while not (resolution.isdigit()):
            logger.info('\n[train] What do you want to do?')
            logger.info('[train]     0 - Continue')
            logger.info('[train]     1 - Embed')
            logger.info('[train]  ELSE - Stop network training')
            resolution = input('[train] Resolution: ')
        resolution = int(resolution)
        # We have a resolution
        if resolution == 0:
            logger.info('resuming training...')
        elif resolution == 1:
            ut.embed()

    import wbia.plottool as pt

    next_fnum = iter(range(0, 1)).next  # python3 PY3
    pt.figure(fnum=next_fnum())
    if len(addition_count_list) > 0:
        pt.plot2(
            addition_count_list,
            time_list_addition,
            marker='-o',
            equal_aspect=False,
            x_label='num_annotations',
            label=addition_lbl + ' Time',
        )

    if len(reindex_count_list) > 0:
        pt.plot2(
            reindex_count_list,
            time_list_reindex,
            marker='-o',
            equal_aspect=False,
            x_label='num_annotations',
            label=reindex_label + ' Time',
        )

    pt.set_figtitle('Augmented indexer experiment')

    pt.legend()
예제 #34
0
def analyize_multiple_drives(drives):
    """
    CommandLine:
        export PYTHONPATH=$PYTHONPATH:~/local/scripts

        python -m register_files --exec-analyize_multiple_drives --drives ~ E:/ D:/

        python -m register_files --exec-analyize_multiple_drives --drives ~ /media/Store
        python register_files.py --exec-analyize_multiple_drives --drives /media/joncrall/media/ /media/joncrall/store/
        /media/joncrall/backup

        cd ~/local/scripts

    Example:
        >>> from register_files import *  # NOQA
        >>> dpaths = ut.get_argval('--drives', type_=list, default=['E://', 'D://'])#'D:/', 'E:/', 'F:/'])
        >>> drives = [Drive(root_dpath) for root_dpath in dpaths]
        >>> drive = Broadcaster(drives)
        >>> drive.compute_info()
        >>> #drive.build_fpath_hashes()
        >>> drive.check_consistency()
        >>> E = drive = drives[0]
        >>> analyize_multiple_drives(drives)
        >>> #D, E, F = drives
        >>> #drive = D
    """
    # -----
    ## Find the files shared on all disks
    #allhave = reduce(ut.dict_isect_combine, [drive.hash_to_fpaths for drive in drives])
    #print('#allhave = %r' % (len(allhave),))
    #allhave.keys()[0:3]
    #allhave.values()[0:3]
    #ut.embed()
    #for drive in drives:
    #drive.rrr()
    #print(drive.root_dpath)
    #print(len(drive.hash_to_unique_fpaths))
    #print(len(drive.hash_to_fpaths))
    #print(len(drive.hash_to_unique_fpaths) / len(drive.hash_to_fpaths))

    # Build dict to map from dpath to file pointers of unique descendants
    #unique_fidxs_list = drive.hash_to_fidxs.values()
    #fidxs = ut.flatten(unique_fidxs_list)

    esc = re.escape

    # Find which files exist on all drives
    hashes_list = [set(drive_.hash_to_fidxs.keys()) for drive_ in drives]
    allhave_hashes = reduce(set.intersection, hashes_list)
    print('Drives %r have %d file hashes in common' % (drives, len(allhave_hashes)))

    lbls = [drive_.root_dpath for drive_ in drives]
    isect_lens = np.zeros((len(drives), len(drives)))
    for idx1, (hashes1, drive1) in enumerate(zip(hashes_list, drives)):
        for idx2, (hashes2, drive2) in enumerate(zip(hashes_list, drives)):
            if drive1 is not drive2:
                common = set.intersection(hashes1, hashes2)
                isect_lens[idx1, idx2] = len(common)
            else:
                isect_lens[idx1, idx2] = len(hashes2)
    import pandas as pd
    print(pd.DataFrame(isect_lens, index=lbls, columns=lbls))

    # for drive in drives
    drive = drives[0]
    print('Finding unique files in drive=%r' % (drive,))
    # Get subset of fidxs on this drive
    unflat_valid_fidxs = ut.take(drive.hash_to_fidxs, allhave_hashes)
    valid_fidxs = sorted(ut.flatten(unflat_valid_fidxs))

    # Filter fpaths by patterns
    ignore_patterns = [
        esc('Thumbs.db')
    ]
    ignore_paths = [
        'Spotify'
    ]
    patterns = ignore_paths + ignore_patterns
    valid_fpaths = ut.take(drive.fpath_list, valid_fidxs)
    valid_flags = [not any([re.search(p, fpath) for p in patterns])
                   for fpath in valid_fpaths]
    valid_flags = np.array(valid_flags)
    valid_fidxs = ut.compress(valid_fidxs, valid_flags)

    print(ut.filtered_infostr(valid_flags, 'invalid fpaths'))

    fidxs = valid_fidxs
    valid_fpaths = sorted(ut.take(drive.fpath_list, fidxs))

    dpath_to_unique_fidx = build_dpath_to_fidx(valid_fpaths, valid_fidxs,
                                                drive.root_dpath)

    def make_tree_structure(valid_fpaths):
        root = {}

        def dict_getitem_default(dict_, key, type_):
            try:
                val = dict_[key]
            except KeyError:
                val = type_()
                dict_[key] = val
            return val

        for fpath in ut.ProgIter(valid_fpaths, 'building tree', freq=30000):
            path_components = ut.dirsplit(fpath)
            current = root
            for comp in path_components[:-1]:
                current = dict_getitem_default(current, comp, dict)
            contents = dict_getitem_default(current, '.', list)
            contents.append(path_components[-1])
        return root

    root = make_tree_structure(valid_fpaths)

    def print_tree(root, path, dpath_to_unique_fidx=dpath_to_unique_fidx, drive=drive, depth=None):
        print('path = %r' % (path,))
        print(ut.byte_str2(drive.get_total_nbytes(dpath_to_unique_fidx[path])))
        path_components = ut.dirsplit(path)
        # Navigate to correct spot in tree
        current = root
        for c in path_components:
            current = current[c]
        print(ut.repr3(current, truncate=1))

    def get_tree_info(root, path, dpath_to_unique_fidx=dpath_to_unique_fidx, drive=drive, depth=0):
        path_components = ut.dirsplit(path)
        current = root
        for c in path_components:
            current = current[c]
        if isinstance(current, list):
            tree_tmp = []
        else:
            key_list = list(current.keys())
            child_list = [join(path, key) for key in key_list]
            dpath_nbytes_list = [
                drive.get_total_nbytes(dpath_to_unique_fidx.get(child, []))
                for child in child_list
            ]
            nfiles_list = [
                len(dpath_to_unique_fidx.get(child, []))
                for child in child_list
            ]
            tree_tmp = sorted([
                (key, ut.byte_str2(nbytes), nfiles)
                if depth == 0 else
                (key, ut.byte_str2(nbytes), nfiles,
                    get_tree_info(root, path=child,
                                  dpath_to_unique_fidx=dpath_to_unique_fidx, drive=drive,
                                  depth=depth - 1))
                for key, child, nbytes, nfiles in zip(key_list, child_list, dpath_nbytes_list, nfiles_list)
            ])
        return tree_tmp

    def print_tree_struct(*args, **kwargs):
        tree_str = (ut.indent(ut.repr3(get_tree_info(*args, **kwargs), nl=1)))
        print(tree_str)
        #bytes_str = ut.byte_str2(drive.get_total_nbytes(dpath_to_unique_fidx[path]))
        #print('path = %r, %s' % (path, bytes_str))
        #print(ut.repr3(key_list))
        return tree_str

    dpath_to_unique_fidx
    dpath_to_fidxs = ut.map_dict_vals(set, drive.dpath_to_fidx)
    complete_unique_dpaths = ut.dict_isect(dpath_to_fidxs, dpath_to_unique_fidx)
    complete_root = make_tree_structure(complete_unique_dpaths.keys())

    globals()['ut'] = ut
    globals()['os'] = os
    globals()['join'] = join

    print(ut.byte_str2(drive.get_total_nbytes(dpath_to_unique_fidx['E:\\'])))
    get_tree_info(root, path='E:\\', depth=0)

    get_tree_info(complete_root, path='E:\\', depth=0)

    get_tree_info(root, path='E:\\', depth=1)
    print(print_tree_struct(root, path='E:\\Clutter', depth=0))
    print_tree(root, path=r'E:\TV')
    print_tree(root, path=r'E:\Movies')
    print_tree(root, path=r'E:\Boot')

    print_tree(root, path='E:\\')
    print_tree(root, path=r'E:\Downloaded')
    print_tree(root, path=r'E:\Recordings')
    print_tree(root, path=r'E:\Clutter')
    print_tree(root, path=r'E:\Audio Books')

    # TODO:
    # * Ignore list
    # * Find and rectify internal duplicates
    # * Update registry with new files and deleted ones
    # * Ensure that all unique files are backed up
    # Index the C: Drive as well.
    # * Lazy properties of drive
    # * Multiple types of identifiers (hash, fname, ext, fsize)
    # Drive subsets
    # Export/Import Drive for analysis on other machines

    ut.embed()
예제 #35
0
def embed(back):
    """ Allows for embedding in an environment with all imports """
    ibs = back.ibs
    front = back.front
    utool.embed()
예제 #36
0
    def append_case(case, testtup):
        print('APPENDED NEW TESTCASE: case=%r' % (case,))
        print('* testup = %r' % (testtup,))
        print('* vuuid = %r' % (ibs_gt.get_annot_visual_uuids(testtup.qaid_t),))
        if ut.get_argflag('--interupt-case') and case in [VSMANY_WINS, VSMANY_DOMINATES]:
            incinfo['interactive'] = True
            incinfo['use_oracle'] = False
            incinfo['STOP'] = True
            if ut.is_developer():
                import plottool as pt  # NOQA
                IPYTHON_COMMANDS = """
                >>> %pylab qt4
                >>> from ibeis.viz.interact import interact_matches  # NOQA
                >>> #qres_vsmany = ut.search_stack_for_localvar('qres_vsmany')
                >>> ibs        = ut.search_stack_for_localvar('ibs')
                >>> daids      = ut.search_stack_for_localvar('daids')
                >>> qnid_t     = ut.search_stack_for_localvar('qnid_t')
                >>> qres_vsone = ut.search_stack_for_localvar('qres_vsone')
                >>> all_nids_t = ut.search_stack_for_localvar('all_nids_t')
                >>> # Find index in daids of correct matches
                >>> cm = qres_vsone
                >>> correct_indices = np.where(np.array(all_nids_t) == qnid_t)[0]
                >>> correct_aids2 = ut.take(daids, correct_indices)
                >>> qaid = cm.qaid
                >>> aid = correct_aids2[0]
                >>> # Report visual uuid for inclusion or exclusion in script
                >>> print(ibs.get_annot_visual_uuids([qaid, aid]))

                >>> # Feature match things
                >>> print('cm.filtkey_list = %r' % (cm.filtkey_list,))
                >>> fm  = cm.aid2_fm[aid]
                >>> fs  = cm.aid2_fs[aid]
                >>> fsv = cm.aid2_fsv[aid]
                >>> mx = 2
                >>> qfx, dfx = fm[mx]
                >>> fsv_single = fsv[mx]
                >>> fs_single = fs[mx]
                >>> # check featweights
                >>> data_featweights = ibs.get_annot_fgweights([aid])[0]
                >>> data_featweights[dfx]
                >>> fnum = pt.next_fnum()
                >>> bad_aid = cm.get_top_aids()[0]
                >>> #match_interaction_good = interact_matches.MatchInteraction(ibs, cm, aid, annot_mode=1)
                >>> #match_interaction_bad = interact_matches.MatchInteraction(ibs, cm, bad_aid)
                >>> match_interaction_good = cm.ishow_matches(ibs, aid, annot_mode=1, fnum=1)
                >>> match_interaction_bad = cm.ishow_matches(ibs, bad_aid, annot_mode=1, fnum=2)
                >>> match_interaction = match_interaction_good
                >>> self = match_interaction
                >>> self.select_ith_match(mx)
                >>> #impossible_to_match = len(correct_indices) > 0
                """
                y = """
                >>> from os.path import exists
                >>> import vtool as vt
                >>> import vtool.patch as vtpatch
                >>> import vtool.image as vtimage  # NOQA
                >>> chip_list = ibs.get_annot_chips([aid])
                >>> kpts_list = ibs.get_annot_kpts([aid])
                >>> probchip_fpath_list = ibs.get_probchip_fpath(aid)
                >>> probchip_list = [vt.imread(fpath, grayscale=True) if exists(fpath) else None for fpath in probchip_fpath_list]
                >>> kpts  = kpts_list[0]
                >>> probchip = probchip_list[0]
                >>> kp = kpts[dfx]
                >>> patch  = vt.get_warped_patch(probchip, kp)[0].astype(np.float32) / 255.0
                >>> fnum2 = pt.next_fnum()
                >>> pt.figure(fnum2, pnum=(1, 2, 1), doclf=True, docla=True)
                >>> pt.imshow(probchip)
                >>> pt.draw_kpts2([kp])
                >>> pt.figure(fnum2, pnum=(1, 2, 2))
                >>> pt.imshow(patch * 255)
                >>> pt.update()
                >>> vt.gaussian_average_patch(patch)
                >>> cm.ishow_top(ibs, annot_mode=1)
                """
                y
                ut.set_clipboard(IPYTHON_COMMANDS)
                #ut.spawn_delayed_ipython_paste()
                ut.embed(remove_pyqt_hook=False)
                IPYTHON_COMMANDS

        testcases[case].append(testtup)
예제 #37
0
 def dev_embed(self):
     ut.embed()
예제 #38
0
    def new_cpd(self, parents=None, pmf_func=None):
        """
        Makes a new random variable that is an instance of this tempalte

        parents : only used to define the name of this node.
        """
        if pmf_func is None:
            pmf_func = self.pmf_func

        # --- MAKE VARIABLE ID
        def _getid(obj):
            if isinstance(obj, int):
                return str(obj)
            elif isinstance(obj, six.string_types):
                return obj
            else:
                return obj._template_id

        if not ut.isiterable(parents):
            parents = [parents]

        template_ids = [_getid(cpd) for cpd in parents]
        HACK_SAME_IDS = True
        # TODO: keep track of parent index inheritence
        # then rectify uniqueness based on that
        if HACK_SAME_IDS and ut.allsame(template_ids):
            _id = template_ids[0]
        else:
            _id = ''.join(template_ids)
        variable = ''.join([self.varpref, _id])
        # variable = '_'.join([self.varpref, '{' + _id + '}'])
        # variable = '$%s$' % (variable,)

        evidence_cpds = [cpd for cpd in parents if hasattr(cpd, 'ttype')]
        if len(evidence_cpds) == 0:
            evidence_cpds = None

        variable_card = len(self.basis)
        statename_dict = {
            variable: self.basis,
        }
        if self.evidence_ttypes is not None:
            if any(cpd.ttype != tcpd.ttype
                   for cpd, tcpd in zip(evidence_cpds, evidence_cpds)):
                raise ValueError('Evidence is not of appropriate type')
            evidence_bases = [cpd.variable_statenames for cpd in evidence_cpds]
            evidence_card = list(map(len, evidence_bases))
            evidence_states = list(ut.iprod(*evidence_bases))

            for cpd in evidence_cpds:
                _dict = ut.dict_subset(cpd.statename_dict, [cpd.variable])
                statename_dict.update(_dict)

            evidence = [cpd.variable for cpd in evidence_cpds]
        else:
            if evidence_cpds is not None:
                raise ValueError('Gave evidence for evidence-less template')
            evidence = None
            evidence_card = None

        # --- MAKE TABLE VALUES
        if pmf_func is not None:
            if isinstance(pmf_func, list):
                values = np.array(pmf_func)
            else:
                values = np.array([[
                    pmf_func(vstate, *estates) for estates in evidence_states
                ] for vstate in self.basis])
            ensure_normalized = True
            if ensure_normalized:
                values = values / values.sum(axis=0)
        else:
            # assume uniform
            fill_value = 1.0 / variable_card
            if evidence_card is None:
                values = np.full((1, variable_card), fill_value)
            else:
                values = np.full([variable_card] + list(evidence_card),
                                 fill_value)

        try:
            cpd = pgmpy.factors.TabularCPD(
                variable=variable,
                variable_card=variable_card,
                values=values,
                evidence=evidence,
                evidence_card=evidence_card,
                # statename_dict=statename_dict,
                state_names=statename_dict,
            )
        except Exception as ex:
            ut.printex(
                ex,
                'Failed to create TabularCPD',
                keys=[
                    'variable',
                    'variable_card',
                    'statename_dict',
                    'evidence_card',
                    'evidence',
                    'values.shape',
                ],
            )
            ut.embed()
            raise

        cpd.ttype = self.ttype
        cpd._template_ = self
        cpd._template_id = _id
        return cpd
예제 #39
0
def get_annotation_special_monica_laurel_max(desired_species=None, **kwargs):
    ibs = current_app.ibs
    filename = 'special.monica-laurel-max.csv'

    def _process_annot_name_uuids_dict(ibs, filepath):
        import uuid
        auuid_list = []
        nuuid_list = []
        with open(filepath, 'r') as file_:
            for line in file_.readlines():
                line = line.strip().split(',')
                auuid = uuid.UUID(line[0])
                nuuid = None if line[1] == 'None' else uuid.UUID(line[1])
                auuid_list.append(auuid)
                nuuid_list.append(nuuid)

        annot_rowid_list = ibs.get_annot_aids_from_uuid(auuid_list)
        name_rowid_list = ibs.get_name_rowids_from_uuid(nuuid_list)

        zipped = zip(annot_rowid_list, name_rowid_list)
        mapping_dict = {aid: nid for aid, nid in zipped if aid is not None}
        return mapping_dict

    aid_list = sorted(ibs.get_valid_aids())
    annot_uuid_list = ibs.get_annot_uuids(aid_list)
    print('Found %d aids' % (len(aid_list), ))
    nid_list = ibs.get_annot_nids(aid_list)
    name_uuid_list = ibs.get_name_uuids(nid_list)
    name_list = ibs.get_name_texts(nid_list)
    species_list = ibs.get_annot_species_texts(aid_list)
    sex_list = ibs.get_name_sex_text(nid_list)
    age_list = ibs.get_annot_age_months_est_texts(aid_list)
    gid_list = ibs.get_annot_gids(aid_list)
    contrib_list = ibs.get_image_contributor_tag(gid_list)
    gname_list = ibs.get_image_gnames(gid_list)
    imageset_rowids_list = ibs.get_image_imgsetids(gid_list)
    imageset_rowids_set = map(set, imageset_rowids_list)

    special_imageset_rowid_set = set(ibs.get_valid_imgsetids(is_special=True))
    imagesets_list = [
        list(imageset_rowid_set - special_imageset_rowid_set)
        for imageset_rowid_set in imageset_rowids_set
    ]

    imageset_list = [_[0] if len(_) > 0 else None for _ in imagesets_list]
    imageset_text_list = ibs.get_imageset_text(imageset_list)
    imageset_metadata_list = ibs.get_imageset_metadata(imageset_list)
    annot_metadata_list = ibs.get_annot_metadata(aid_list)

    assert len(imageset_metadata_list) == len(annot_metadata_list)

    imageset_metadata_list_ = ibs.get_imageset_metadata(
        ibs.get_valid_imgsetids())
    imageset_metadata_key_list = sorted(
        set(
            ut.flatten([
                imageset_metadata_dict_.keys()
                for imageset_metadata_dict_ in imageset_metadata_list_
            ])))
    imageset_metadata_key_str = ','.join(imageset_metadata_key_list)

    annot_metadata_list_ = ibs.get_annot_metadata(ibs.get_valid_aids())
    annot_metadata_key_list = sorted(
        set(
            ut.flatten([
                annot_metadata_dict_.keys()
                for annot_metadata_dict_ in annot_metadata_list_
            ])))
    annot_metadata_key_str = ','.join(annot_metadata_key_list)

    if 'Monica-Laurel' in ibs.dbdir:
        import ibeis
        ibs1 = ibeis.opendb('/home/zebra/Desktop/Monica/', web=False)
        ibs2 = ibeis.opendb('/home/zebra/Desktop/Laurel/', web=False)
    if 'Monica-Max' in ibs.dbdir:
        import ibeis
        ibs1 = ibeis.opendb('/home/zebra/Desktop/Monica/', web=False)
        ibs2 = ibeis.opendb('/home/zebra/Desktop/Max/', web=False)
    else:
        ibs1 = None
        ibs2 = None

    line_list = []
    zipped = zip(nid_list, aid_list, name_uuid_list, annot_uuid_list,
                 name_list, species_list, sex_list, age_list, gname_list,
                 contrib_list, imageset_list, imageset_text_list,
                 imageset_metadata_list, annot_metadata_list)
    zipped = sorted(list(zipped))
    for args in zipped:
        (nid, aid, name_uuid, annot_uuid, name, species, sex, age, gname,
         contrib, imageset_rowid, imageset_text, imageset_metadata_dict,
         annot_metadata_dict) = args

        contrib_str = '' if contrib is None else contrib.split(',')[0].upper()

        if desired_species is not None and species != desired_species:
            continue

        if nid <= 0:
            continue

        nid_old = ''
        name_old = ''
        name_changed = False
        cross_database_match = False

        try:
            if ibs1 is not None and ibs2 is not None:
                aid1 = ibs1.get_annot_aids_from_uuid(annot_uuid)
                aid2 = ibs2.get_annot_aids_from_uuid(annot_uuid)

                if aid1 is not None:
                    assert aid2 is None
                    name_uuid_old = ibs1.get_annot_name_uuids(aid1)
                elif aid2 is not None:
                    assert aid1 is None
                    name_uuid_old = ibs2.get_annot_name_uuids(aid2)
                else:
                    raise AssertionError(
                        'Should be in one of these original databases')

                if name_uuid_old != name_uuid:
                    name_changed = True
                    if name_uuid_old is None:
                        nid_old = 'UNKNOWN NID'
                        name_old = 'UNKNOWN NAME'
                    else:
                        nid_old = ibs.get_name_rowids_from_uuid(name_uuid_old)
                        name_old = ibs.get_name_texts(nid_old)

                    cross_database_match = not name.startswith(contrib_str)
        except:
            print('ERROR WITH ABOVE')
            ut.embed()

        line_list_ = [
            contrib_str,
            annot_uuid,
            aid,
            annot_uuid,
            nid,
            name,
            'Yes' if name_changed else '',
            'Yes' if cross_database_match else '',
            nid_old,
            name_old,
            species,
            sex,
            age,
            gname,
            imageset_rowid,
            imageset_text,
            '|',
        ] + [
            imageset_metadata_dict.get(imageset_metadata_key, '')
            for imageset_metadata_key in imageset_metadata_key_list
        ] + [
            '|',
        ] + [
            annot_metadata_dict.get(annot_metadata_key, '')
            for annot_metadata_key in annot_metadata_key_list
        ]
        line_list_ = ['' if item is None else item for item in line_list_]
        line = ','.join(map(str, line_list_))
        line_list.append(line)

    combined_str = '\n'.join(line_list)
    combined_str = 'DB,Annotation UUID,AID,NID,Name,Name Changed,Cross-Database Match,Old NID,Old Name,Species,Sex,Age,Image Name,Encounter ID,Encounter Name,| SEPERATOR |,%s,| SEPERATOR |,%s\n' % (
        imageset_metadata_key_str,
        annot_metadata_key_str,
    ) + combined_str
    return appf.send_csv_file(combined_str, filename)
예제 #40
0
def augment_nnindexer_experiment():
    """

    References:
        http://answers.opencv.org/question/44592/flann-index-training-fails-with-segfault/

    CommandLine:
        utprof.py -m ibeis.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment
        python -m ibeis.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment

        python -m ibeis.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment --db PZ_MTEST --diskshow --adjust=.1 --save "augment_experiment_{db}.png" --dpath='.' --dpi=180 --figsize=9,6
        python -m ibeis.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment --db PZ_Master0 --diskshow --adjust=.1 --save "augment_experiment_{db}.png" --dpath='.' --dpi=180 --figsize=9,6 --nosave-flann --show
        python -m ibeis.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment --db PZ_Master0 --diskshow --adjust=.1 --save "augment_experiment_{db}.png" --dpath='.' --dpi=180 --figsize=9,6 --nosave-flann --show


        python -m ibeis.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment --db PZ_Master0 --diskshow --adjust=.1 --save "augment_experiment_{db}.png" --dpath='.' --dpi=180 --figsize=9,6 --nosave-flann --no-api-cache --nocache-uuids

        python -m ibeis.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment --db PZ_MTEST --show
        python -m ibeis.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment --db PZ_Master0 --show

        # RUNS THE SEGFAULTING CASE
        python -m ibeis.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment --db PZ_Master0 --show
        # Debug it
        gdb python
        run -m ibeis.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment --db PZ_Master0 --show
        gdb python
        run -m ibeis.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment --db PZ_Master0 --diskshow --adjust=.1 --save "augment_experiment_{db}.png" --dpath='.' --dpi=180 --figsize=9,6


    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis.algo.hots._neighbor_experiment import *  # NOQA
        >>> # execute function
        >>> augment_nnindexer_experiment()
        >>> # verify results
        >>> ut.show_if_requested()

    """
    import ibeis
    # build test data
    #ibs = ibeis.opendb('PZ_MTEST')
    ibs = ibeis.opendb(defaultdb='PZ_Master0')
    if ibs.get_dbname() == 'PZ_MTEST':
        initial = 1
        addition_stride = 4
        max_ceiling = 100
    elif ibs.get_dbname() == 'PZ_Master0':
        initial = 128
        #addition_stride = 64
        #addition_stride = 128
        addition_stride = 256
        max_ceiling = 10000
        #max_ceiling = 4000
        #max_ceiling = 2000
        #max_ceiling = 600
    else:
        assert False
    all_daids = ibs.get_valid_aids(species='zebra_plains')
    qreq_ = ibs.new_query_request(all_daids, all_daids)
    max_num = min(max_ceiling, len(all_daids))

    # Clear Caches
    ibs.delete_flann_cachedir()
    neighbor_index_cache.clear_memcache()
    neighbor_index_cache.clear_uuid_cache(qreq_)

    # Setup
    all_randomize_daids_ = ut.deterministic_shuffle(all_daids[:])
    # ensure all features are computed
    #ibs.get_annot_vecs(all_randomize_daids_, ensure=True)
    #ibs.get_annot_fgweights(all_randomize_daids_, ensure=True)

    nnindexer_list = []
    addition_lbl = 'Addition'
    _addition_iter = list(range(initial + 1, max_num, addition_stride))
    addition_iter = iter(ut.ProgressIter(_addition_iter, lbl=addition_lbl,
                                         freq=1, autoadjust=False))
    time_list_addition = []
    #time_list_reindex = []
    addition_count_list = []
    tmp_cfgstr_list = []

    #for _ in range(80):
    #    next(addition_iter)
    try:
        memtrack = ut.MemoryTracker(disable=False)
        for count in addition_iter:
            aid_list_ = all_randomize_daids_[0:count]
            # Request an indexer which could be an augmented version of an existing indexer.
            with ut.Timer(verbose=False) as t:
                memtrack.report('BEFORE AUGMENT')
                nnindexer_ = neighbor_index_cache.request_augmented_ibeis_nnindexer(qreq_, aid_list_)
                memtrack.report('AFTER AUGMENT')
            nnindexer_list.append(nnindexer_)
            addition_count_list.append(count)
            time_list_addition.append(t.ellapsed)
            tmp_cfgstr_list.append(nnindexer_.cfgstr)
            print('===============\n\n')
        print(ut.list_str(time_list_addition))
        print(ut.list_str(list(map(id, nnindexer_list))))
        print(ut.list_str(tmp_cfgstr_list))
        print(ut.list_str(list([nnindxer.cfgstr for nnindxer in nnindexer_list])))

        IS_SMALL = False

        if IS_SMALL:
            nnindexer_list = []
        reindex_label = 'Reindex'
        # go backwards for reindex
        _reindex_iter = list(range(initial + 1, max_num, addition_stride))[::-1]
        reindex_iter = ut.ProgressIter(_reindex_iter, lbl=reindex_label)
        time_list_reindex = []
        #time_list_reindex = []
        reindex_count_list = []

        for count in reindex_iter:
            print('\n+===PREDONE====================\n')
            # check only a single size for memory leaks
            #count = max_num // 16 + ((x % 6) * 1)
            #x += 1

            aid_list_ = all_randomize_daids_[0:count]
            # Call the same code, but force rebuilds
            memtrack.report('BEFORE REINDEX')
            with ut.Timer(verbose=False) as t:
                nnindexer_ = neighbor_index_cache.request_augmented_ibeis_nnindexer(
                    qreq_, aid_list_, force_rebuild=True, memtrack=memtrack)
            memtrack.report('AFTER REINDEX')
            ibs.print_cachestats_str()
            print('[nnindex.MEMCACHE] size(NEIGHBOR_CACHE) = %s' % (
                ut.get_object_size_str(neighbor_index_cache.NEIGHBOR_CACHE.items()),))
            print('[nnindex.MEMCACHE] len(NEIGHBOR_CACHE) = %s' % (
                len(neighbor_index_cache.NEIGHBOR_CACHE.items()),))
            print('[nnindex.MEMCACHE] size(UUID_MAP_CACHE) = %s' % (
                ut.get_object_size_str(neighbor_index_cache.UUID_MAP_CACHE),))
            print('totalsize(nnindexer) = ' + ut.get_object_size_str(nnindexer_))
            memtrack.report_type(neighbor_index_cache.NeighborIndex)
            ut.print_object_size_tree(nnindexer_, lbl='nnindexer_')
            if IS_SMALL:
                nnindexer_list.append(nnindexer_)
            reindex_count_list.append(count)
            time_list_reindex.append(t.ellapsed)
            #import cv2
            #import matplotlib as mpl
            #print(mem_top.mem_top(limit=30, width=120,
            #                      #exclude_refs=[cv2.__dict__, mpl.__dict__]
            #     ))
            print('L___________________\n\n\n')
        print(ut.list_str(time_list_reindex))
        if IS_SMALL:
            print(ut.list_str(list(map(id, nnindexer_list))))
            print(ut.list_str(list([nnindxer.cfgstr for nnindxer in nnindexer_list])))
    except KeyboardInterrupt:
            print('\n[train] Caught CRTL+C')
            resolution = ''
            from six.moves import input
            while not (resolution.isdigit()):
                print('\n[train] What do you want to do?')
                print('[train]     0 - Continue')
                print('[train]     1 - Embed')
                print('[train]  ELSE - Stop network training')
                resolution = input('[train] Resolution: ')
            resolution = int(resolution)
            # We have a resolution
            if resolution == 0:
                print('resuming training...')
            elif resolution == 1:
                ut.embed()

    import plottool as pt

    next_fnum = iter(range(0, 1)).next  # python3 PY3
    pt.figure(fnum=next_fnum())
    if len(addition_count_list) > 0:
        pt.plot2(addition_count_list, time_list_addition, marker='-o', equal_aspect=False,
                 x_label='num_annotations', label=addition_lbl + ' Time')

    if len(reindex_count_list) > 0:
        pt.plot2(reindex_count_list, time_list_reindex, marker='-o', equal_aspect=False,
                 x_label='num_annotations', label=reindex_label + ' Time')

    pt.set_figtitle('Augmented indexer experiment')

    pt.legend()
예제 #41
0
def breakpoint(*tags):
    import utool as ut
    if ut.get_argflag('--break'):
        ut.embed(N=1)
        return True
    return False
예제 #42
0
파일: __main__.py 프로젝트: Erotemic/ibeis
def run_ibeis():
    r"""
    CommandLine:
        python -m ibeis
        python -m ibeis find_installed_tomcat
        python -m ibeis get_annot_groundtruth:1
    """
    #ut.set_process_title('IBEIS_main')
    #main_locals = ibeis.main()
    #ibeis.main_loop(main_locals)
    #ut.set_process_title('IBEIS_main')
    cmdline_varags = ut.get_cmdline_varargs()
    if len(cmdline_varags) > 0 and cmdline_varags[0] == 'rsync':
        from ibeis.scripts import rsync_ibeisdb
        rsync_ibeisdb.rsync_ibsdb_main()
        sys.exit(0)

    if ut.get_argflag('--devcmd'):
        # Hack to let devs mess around when using an installer version
        # TODO: add more hacks
        #import utool.tests.run_tests
        #utool.tests.run_tests.run_tests()
        ut.embed()
    # Run the tests of other modules
    elif ut.get_argflag('--run-utool-tests'):
        import utool.tests.run_tests
        retcode = utool.tests.run_tests.run_tests()
        print('... exiting')
        sys.exit(retcode)
    elif ut.get_argflag('--run-vtool-tests'):
        import vtool.tests.run_tests
        retcode = vtool.tests.run_tests.run_tests()
        print('... exiting')
        sys.exit(retcode)
    elif ut.get_argflag(('--run-ibeis-tests', '--run-tests')):
        from ibeis.tests import run_tests
        retcode = run_tests.run_tests()
        print('... exiting')
        sys.exit(retcode)

    if ut.get_argflag('-e'):
        """
        ibeis -e print -a default -t default
        """
        # Run dev script if -e given
        import ibeis.dev  # NOQA
        ibeis.dev.devmain()
        print('... exiting')
        sys.exit(0)

    # Attempt to run a test using the funciton name alone
    # with the --tf flag
    import ibeis.tests.run_tests
    import ibeis.tests.reset_testdbs
    ignore_prefix = [
        #'ibeis.tests',
        'ibeis.control.__SQLITE3__',
        '_autogen_explicit_controller']
    ignore_suffix = ['_grave']
    func_to_module_dict = {
        'demo_bayesnet': 'ibeis.algo.hots.demobayes',
    }
    ut.main_function_tester('ibeis', ignore_prefix, ignore_suffix,
                            func_to_module_dict=func_to_module_dict)

    #if ut.get_argflag('-e'):
    #    import ibeis
    #    expt_kw = ut.get_arg_dict(ut.get_func_kwargs(ibeis.run_experiment),
    #    prefix_list=['--', '-'])
    #    ibeis.run_experiment(**expt_kw)
    #    sys.exit(0)

    doctest_modname = ut.get_argval(
        ('--doctest-module', '--tmod', '-tm', '--testmod'),
        type_=str, default=None, help_='specify a module to doctest')
    if doctest_modname is not None:
        """
        Allow any doctest to be run the main ibeis script

        python -m ibeis --tmod utool.util_str --test-align:0
        python -m ibeis --tmod ibeis.algo.hots.pipeline --test-request_ibeis_query_L0:0 --show
        python -m ibeis --tf request_ibeis_query_L0:0 --show
        ./dist/ibeis/IBEISApp --tmod ibeis.algo.hots.pipeline --test-request_ibeis_query_L0:0 --show  # NOQA
        ./dist/ibeis/IBEISApp --tmod utool.util_str --test-align:0
        ./dist/IBEIS.app/Contents/MacOS/IBEISApp --tmod utool.util_str --test-align:0
        ./dist/IBEIS.app/Contents/MacOS/IBEISApp --run-utool-tests
        ./dist/IBEIS.app/Contents/MacOS/IBEISApp --run-vtool-tests
        """
        print('[ibeis] Testing module')
        mod_alias_list = {
            'exptdraw': 'ibeis.expt.experiment_drawing'
        }
        doctest_modname = mod_alias_list.get(doctest_modname, doctest_modname)
        module = ut.import_modname(doctest_modname)
        (nPass, nTotal, failed_list, error_report_list) = ut.doctest_funcs(module=module)
        retcode = 1 - (len(failed_list) == 0)
        #print(module)
        sys.exit(retcode)

    import ibeis
    main_locals = ibeis.main()
    execstr = ibeis.main_loop(main_locals)
    # <DEBUG CODE>
    if 'back' in main_locals and CMD:
        #from ibeis.all_imports import *  # NOQA
        back = main_locals['back']
        front = getattr(back, 'front', None)  # NOQA
        #front = back.front
        #ui = front.ui
    ibs = main_locals['ibs']  # NOQA
    exec(execstr)
예제 #43
0
def requery_knn(get_neighbors, get_axs, qfx2_vec, num_neighbs, invalid_axs=[],
                pad=2, limit=4, recover=True):
    """
    Searches for `num_neighbs`, while ignoring certain matches.  K is
    increassed until enough valid neighbors are found or a limit is reached.

    Example:
        >>> # ENABLE_DOCTEST
        >>> from ibeis.algo.hots.neighbor_index import *  # NOQA
        >>> import ibeis
        >>> qreq_ = ibeis.testdata_qreq_(defaultdb='testdb1', a='default')
        >>> qreq_.load_indexer()
        >>> indexer = qreq_.indexer
        >>> qannot = qreq_.internal_qannots[1]
        >>> qfx2_vec = qannot.vecs
        >>> ibs = qreq_.ibs
        >>> qaid = qannot.aid
        >>> impossible_aids = ibs.get_annot_groundtruth(qaid, noself=False)
        >>> invalid_axs = np.array(ut.take(indexer.aid2_ax, impossible_aids))
        >>> pad = 0
        >>> limit = 1
        >>> num_neighbs = 3
        >>> def get_neighbors(vecs, temp_K):
        >>>     return indexer.flann.nn_index(vecs, temp_K, checks=indexer.checks,
        >>>                                   cores=indexer.cores)
        >>> get_axs = indexer.get_nn_axs
        >>> res = requery_knn(
        >>>     get_neighbors, get_axs, qfx2_vec, num_neighbs, invalid_axs, pad,
        >>>     limit, recover=True)
        >>> qfx2_idx, qfx2_dist = res
        >>> assert np.all(np.diff(qfx2_dist, axis=1) >= 0)

    Ignore:
        >>> from ibeis.algo.hots.neighbor_index import *  # NOQA
        >>> from ibeis.algo.hots.requery_knn import *  # NOQA
        >>> max_k = 9
        >>> n_pts = 5
        >>> num_neighbs = 3
        >>> temp_K = num_neighbs * 2
        >>> #
        >>> # Create dummy data
        >>> rng = np.random.RandomState(0)
        >>> tx2_idx_full = rng.randint(0, 10, size=(n_pts, max_k))
        >>> tx2_idx_full[:, 0] = 0
        >>> tx2_dist_full = np.meshgrid(np.arange(max_k), np.arange(n_pts))[0] / 10
        >>> tx2_dist_full += (rng.rand(n_pts, max_k) * 10).astype(np.int) / 100
        >>> qfx2_vec = np.arange(n_pts)[:, None]
        >>> vecs = qfx2_vec
        >>> #
        >>> pad = 0
        >>> limit = 1
        >>> recover = True
        >>> #
        >>> invalid_axs = np.array([0, 1, 2, 5, 7, 9])
        >>> get_axs = ut.identity
        >>> #
        >>> def get_neighbors(vecs, temp_K):
        >>>     # simulates finding k nearest neighbors
        >>>     idxs = tx2_idx_full[vecs.ravel(), 0:temp_K]
        >>>     dists = tx2_dist_full[vecs.ravel(), 0:temp_K]
        >>>     return idxs, dists
        >>> #
        >>> res = requery_knn(
        >>>     get_neighbors, get_axs, qfx2_vec, num_neighbs, invalid_axs, pad,
        >>>     limit, recover=True)
        >>> qfx2_idx, qfx2_dist = res
    """

    # Alloc space for final results
    shape = (len(qfx2_vec), num_neighbs)
    final = FinalResults(shape)  # NOQA
    query = TempQuery(qfx2_vec, invalid_axs, get_neighbors, get_axs)

    temp_K = num_neighbs + pad
    assert limit > 0, 'must have at least one iteration'
    at_limit = False

    for count in it.count():
        # print('count = %r' % (count,))
        cand = query.neighbors(temp_K)
        # Find which query features have found enough neighbors
        done_flags = cand.done_flags(num_neighbs)
        if DEBUG_REQUERY:
            print('count = %r' % (count,))
            assert np.all(np.diff(cand.dists, axis=1) >= 0)
            print('done_flags = %r' % (done_flags,))
        # Move any done queries into results and compress the query
        if np.any(done_flags):
            # Get the valid part of the results
            done = cand.compress(done_flags)
            idxs, dists, trueks = done.done_part(num_neighbs)
            final.assign(done.index, idxs, dists, trueks)
            if DEBUG_REQUERY:
                assert np.all(np.diff(dists, axis=1) >= 0)
                blocks = final.qfx2_dist
                nanelem_flags = np.isnan(blocks)
                nanrow_flags = np.any(nanelem_flags, axis=1)
                assert np.all(nanelem_flags.sum(axis=1)[nanrow_flags] == num_neighbs)
                assert np.all(np.diff(blocks[~nanrow_flags], axis=1) >= 0)
                print('final.qfx2_dist')
                print(final.qfx2_dist)
            if np.all(done_flags):
                # If everything was found then we are done
                break
            else:
                # Continue query with remaining invalid results
                query.compress_inplace(~done_flags)

        # double the search space
        temp_K *= 2

        at_limit = limit is not None and count >= limit
        if at_limit:
            if len(done_flags) == 0:
                import utool
                utool.embed()
            print('[knn] Hit limit=%r and found %d/%d' % (
                limit, sum(done_flags), len(done_flags)))
            break

    if at_limit and recover:
        # If over the limit, then we need to do the best with what we have
        # otherwise we would just return nan
        best = cand.compress(~done_flags)
        print('[knn] Recover for %d features' % (len(best.index)))
        # Simply override the last indices to be valid and use those
        best.validflags[:, -num_neighbs:] = True
        # Now we can find a valid part
        idxs, dists, trueks = best.done_part(num_neighbs)
        final.assign(best.index, idxs, dists, trueks)
        if DEBUG_REQUERY:
            print('final.qfx2_dist')
            print(final.qfx2_dist)
    return final.qfx2_idx, final.qfx2_dist
예제 #44
0
 def dev_embed(self):
     ut.embed()
예제 #45
0
def test_job_engine():
    """
    CommandLine:
        python -m ibeis.web.job_engine --exec-test_job_engine
        python -b -m ibeis.web.job_engine --exec-test_job_engine

        python -m ibeis.web.job_engine test_job_engine
        python -m ibeis.web.job_engine test_job_engine --bg
        python -m ibeis.web.job_engine test_job_engine --fg

    Example:
        >>> # SCRIPT
        >>> from ibeis.web.job_engine import *  # NOQA
        >>> test_job_engine()
    """
    _init_signals()
    # now start a few clients, and fire off some requests
    client_id = np.random.randint(1000)
    jobiface = JobInterface(client_id)
    reciever = JobBackend()
    from ibeis.init import sysres
    if ut.get_argflag('--bg'):
        dbdir = sysres.get_args_dbdir('cache', False, None, None,
                                      cache_priority=False)
        reciever.initialize_background_processes(dbdir)
        print('[testzmq] parent process is looping forever')
        while True:
            time.sleep(1)
    elif ut.get_argflag('--fg'):
        jobiface.initialize_client_thread()
    else:
        dbdir = sysres.get_args_dbdir('cache', False, None, None,
                                      cache_priority=False)
        reciever.initialize_background_processes(dbdir)
        jobiface.initialize_client_thread()

    # Foreground test script
    print('... waiting for jobs')
    if ut.get_argflag('--cmd'):
        ut.embed()
        #jobiface.queue_job()
    else:
        print('[test] ... emit test1')
        callback_url = None
        callback_method = None
        args = (1,)
        jobid1 = jobiface.queue_job('helloworld', callback_url,
                                    callback_method, *args)
        jobiface.wait_for_job_result(jobid1)
        jobid_list = []

        args = ([1], [3, 4, 5])
        kwargs = dict(cfgdict={'K': 1})
        identify_jobid = jobiface.queue_job('query_chips_simple_dict',
                                            callback_url, callback_method,
                                            *args, **kwargs)
        for jobid in jobid_list:
            jobiface.wait_for_job_result(jobid)

        jobiface.wait_for_job_result(identify_jobid)
    print('FINISHED TEST SCRIPT')
예제 #46
0
파일: pgm_ext.py 프로젝트: Erotemic/ibeis
    def new_cpd(self, parents=None, pmf_func=None):
        """
        Makes a new random variable that is an instance of this tempalte

        parents : only used to define the name of this node.
        """
        if pmf_func is None:
            pmf_func = self.pmf_func

        # --- MAKE VARIABLE ID
        def _getid(obj):
            if isinstance(obj, int):
                return str(obj)
            elif isinstance(obj, six.string_types):
                return obj
            else:
                return obj._template_id

        if not ut.isiterable(parents):
            parents = [parents]

        template_ids = [_getid(cpd) for cpd in parents]
        HACK_SAME_IDS = True
        # TODO: keep track of parent index inheritence
        # then rectify uniqueness based on that
        if HACK_SAME_IDS and ut.allsame(template_ids):
            _id = template_ids[0]
        else:
            _id = ''.join(template_ids)
        variable = ''.join([self.varpref, _id])
        #variable = '_'.join([self.varpref, '{' + _id + '}'])
        #variable = '$%s$' % (variable,)

        evidence_cpds = [cpd for cpd in parents if hasattr(cpd, 'ttype')]
        if len(evidence_cpds) == 0:
            evidence_cpds = None

        variable_card = len(self.basis)
        statename_dict = {
            variable: self.basis,
        }
        if self.evidence_ttypes is not None:
            if any(cpd.ttype != tcpd.ttype
                   for cpd, tcpd in zip(evidence_cpds, evidence_cpds)):
                raise ValueError('Evidence is not of appropriate type')
            evidence_bases = [cpd.variable_statenames for cpd in evidence_cpds]
            evidence_card = list(map(len, evidence_bases))
            evidence_states = list(ut.iprod(*evidence_bases))

            for cpd in evidence_cpds:
                _dict = ut.dict_subset(cpd.statename_dict, [cpd.variable])
                statename_dict.update(_dict)

            evidence = [cpd.variable for cpd in evidence_cpds]
        else:
            if evidence_cpds is not None:
                raise ValueError('Gave evidence for evidence-less template')
            evidence = None
            evidence_card = None

        # --- MAKE TABLE VALUES
        if pmf_func is not None:
            if isinstance(pmf_func, list):
                values = np.array(pmf_func)
            else:
                values = np.array([
                    [pmf_func(vstate, *estates) for estates in evidence_states]
                    for vstate in self.basis
                ])
            ensure_normalized = True
            if ensure_normalized:
                values = values / values.sum(axis=0)
        else:
            # assume uniform
            fill_value = 1.0 / variable_card
            if evidence_card is None:
                values = np.full((1, variable_card), fill_value)
            else:
                values = np.full([variable_card] + list(evidence_card), fill_value)

        try:
            cpd = pgmpy.factors.TabularCPD(
                variable=variable,
                variable_card=variable_card,
                values=values,
                evidence=evidence,
                evidence_card=evidence_card,
                #statename_dict=statename_dict,
                state_names=statename_dict,
            )
        except Exception as ex:
            ut.printex(ex, 'Failed to create TabularCPD',
                       keys=[
                           'variable',
                           'variable_card',
                           'statename_dict',
                           'evidence_card',
                           'evidence',
                           'values.shape',
                       ])
            ut.embed()
            raise

        cpd.ttype = self.ttype
        cpd._template_ = self
        cpd._template_id = _id
        return cpd
예제 #47
0
파일: _grave.py 프로젝트: Erotemic/hesaff
def arrptr_to_np_OLD(c_arrptr, shape, arr_t, dtype):
    """
    Casts an array pointer from C to numpy

    Args:
        c_arrptr (uint64): a pointer to an array returned from C
        shape (tuple): shape of the underlying array being pointed to
        arr_t (PyCSimpleType): the ctypes datatype of c_arrptr
        dtype (dtype): numpy datatype the array will be to cast into

    CommandLine:
        python2 -m pyhesaff._pyhesaff --test-detect_feats_list:0 --rebuild-hesaff
        python2 -m pyhesaff._pyhesaff --test-detect_feats_list:0
        python3 -m pyhesaff._pyhesaff --test-detect_feats_list:0

    """
    try:
        byte_t = ctypes.c_char
        itemsize_ = dtype().itemsize
        #import utool
        #utool.printvar2('itemsize_')
        ###---------
        #dtype_t1 = C.c_voidp * itemsize_
        #dtype_ptr_t1 = C.POINTER(dtype_t1)  # size of each item
        #dtype_ptr_t = dtype_ptr_t1
        ###---------
        if True or six.PY2:
            # datatype of array elements
            dtype_t = byte_t * itemsize_
            dtype_ptr_t = C.POINTER(dtype_t)  # size of each item
            #typed_c_arrptr = c_arrptr.astype(C.c_long)
            typed_c_arrptr = c_arrptr.astype(int)
            c_arr = C.cast(typed_c_arrptr, dtype_ptr_t)   # cast to ctypes
            #raise Exception('fuuu. Why does 2.7 work? Why does 3.4 not!?!!!')
        else:
            dtype_t = C.c_char * itemsize_
            dtype_ptr_t = C.POINTER(dtype_t)  # size of each item
            #typed_c_arrptr = c_arrptr.astype(int)
            #typed_c_arrptr = c_arrptr.astype(C.c_size_t)
            typed_c_arrptr = c_arrptr.astype(int)
            c_arr = C.cast(c_arrptr.astype(C.c_size_t), dtype_ptr_t)   # cast to ctypes
            c_arr = C.cast(c_arrptr.astype(int), dtype_ptr_t)   # cast to ctypes
            c_arr = C.cast(c_arrptr, dtype_ptr_t)   # cast to ctypes
            #typed_c_arrptr = c_arrptr.astype(int)
            #, order='C', casting='safe')
            #utool.embed()
            #typed_c_arrptr = c_arrptr.astype(dtype_t)
            #typed_c_arrptr = c_arrptr.astype(ptr_t2)
            #typed_c_arrptr = c_arrptr.astype(C.c_uint8)
            #typed_c_arrptr = c_arrptr.astype(C.c_void_p)
            #typed_c_arrptr = c_arrptr.astype(C.c_int)
            #typed_c_arrptr = c_arrptr.astype(C.c_char)  # WORKS BUT WRONG
            #typed_c_arrptr = c_arrptr.astype(bytes)  # WORKS BUT WRONG
            #typed_c_arrptr = c_arrptr.astype(int)
            #typed_c_arrptr = c_arrptr
            #typed_c_arrptr = c_arrptr.astype(np.int64)
            #typed_c_arrptr = c_arrptr.astype(int)

            """
            ctypes.cast(arg1, arg2)

            Input:
                arg1 - a ctypes object that is or can be converted to a pointer
                       of some kind
                arg2 - a ctypes pointer type.
            Output:
                 It returns an instance of the second argument, which references
                 the same memory block as the first argument
            """
            c_arr = C.cast(typed_c_arrptr, dtype_ptr_t)   # cast to ctypes
        np_arr = np.ctypeslib.as_array(c_arr, shape)       # cast to numpy
        np_arr.dtype = dtype                               # fix numpy dtype
    except Exception as ex:
        import utool as ut
        #utool.embed()
        varnames = sorted(list(locals().keys()))
        vartypes = [(type, name) for name in varnames]
        spaces    = [None for name in varnames]
        c_arrptr_dtype = c_arrptr.dtype  # NOQA
        #key_list = list(zip(varnames, vartypes, spaces))
        key_list = ['c_arrptr_dtype'] + 'c_arrptr, shape, arr_t, dtype'.split(', ')
        print('itemsize(float) = %r' % np.dtype(float).itemsize)
        print('itemsize(c_char) = %r' % np.dtype(C.c_char).itemsize)
        print('itemsize(c_wchar) = %r' % np.dtype(C.c_wchar).itemsize)
        print('itemsize(c_char_p) = %r' % np.dtype(C.c_char_p).itemsize)
        print('itemsize(c_wchar_p) = %r' % np.dtype(C.c_wchar_p).itemsize)
        print('itemsize(c_int) = %r' % np.dtype(C.c_int).itemsize)
        print('itemsize(c_int32) = %r' % np.dtype(C.c_int32).itemsize)
        print('itemsize(c_int64) = %r' % np.dtype(C.c_int64).itemsize)
        print('itemsize(int) = %r' % np.dtype(int).itemsize)
        print('itemsize(float32) = %r' % np.dtype(np.float32).itemsize)
        print('itemsize(float64) = %r' % np.dtype(np.float64).itemsize)
        ut.printex(ex, keys=key_list)
        ut.embed()
        raise
    return np_arr
예제 #48
0
파일: devcases.py 프로젝트: Erotemic/ibeis
def myquery():
    r"""

    BUG::
        THERE IS A BUG SOMEWHERE: HOW IS THIS POSSIBLE?
        if everything is weightd ) how di the true positive even get a score
        while the true negative did not
        qres_copy.filtkey_list = ['ratio', 'fg', 'homogerr', 'distinctiveness']
        CORRECT STATS
        {
            'max'  : [0.832, 0.968, 0.604, 0.000],
            'min'  : [0.376, 0.524, 0.000, 0.000],
            'mean' : [0.561, 0.924, 0.217, 0.000],
            'std'  : [0.114, 0.072, 0.205, 0.000],
            'nMin' : [1, 1, 1, 51],
            'nMax' : [1, 1, 1, 1],
            'shape': (52, 4),
        }
        INCORRECT STATS
        {
            'max'  : [0.759, 0.963, 0.264, 0.000],
            'min'  : [0.379, 0.823, 0.000, 0.000],
            'mean' : [0.506, 0.915, 0.056, 0.000],
            'std'  : [0.125, 0.039, 0.078, 0.000],
            'nMin' : [1, 1, 1, 24],
            'nMax' : [1, 1, 1, 1],
            'shape': (26, 4),
        #   score_diff,  tp_score,  tn_score,       p,   K,  dcvs_clip_max,  fg_power,  homogerr_power
             0.494,     0.494,     0.000,  73.000,   2,          0.500,     0.100,          10.000

    see how seperability changes as we very things

    CommandLine:
        python -m ibeis.algo.hots.devcases --test-myquery
        python -m ibeis.algo.hots.devcases --test-myquery --show --index 0
        python -m ibeis.algo.hots.devcases --test-myquery --show --index 1
        python -m ibeis.algo.hots.devcases --test-myquery --show --index 2

    References:
        http://en.wikipedia.org/wiki/Pareto_distribution <- look into

    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis.all_imports import *  # NOQA
        >>> from ibeis.algo.hots.devcases import *  # NOQA
        >>> ut.dev_ipython_copypaster(myquery) if ut.inIPython() else myquery()
        >>> pt.show_if_requested()
    """
    from ibeis.algo.hots import special_query  # NOQA
    from ibeis.algo.hots import distinctiveness_normalizer  # NOQA
    from ibeis import viz  # NOQA
    import plottool as pt
    index = ut.get_argval('--index', int, 0)
    ibs, aid1, aid2, tn_aid = testdata_my_exmaples(index)
    qaids = [aid1]
    daids = [aid2] + [tn_aid]
    qvuuid = ibs.get_annot_visual_uuids(aid1)

    cfgdict_vsone = dict(
        sv_on=True,
        #sv_on=False,
        #codename='vsone_unnorm_dist_ratio_extern_distinctiveness',
        codename='vsone_unnorm_ratio_extern_distinctiveness',
        sver_output_weighting=True,
    )

    use_cache   = False
    save_qcache = False

    qres_list, qreq_ = ibs.query_chips(qaids, daids, cfgdict=cfgdict_vsone,
                                       return_request=True, use_cache=use_cache,
                                       save_qcache=save_qcache, verbose=True)

    qreq_.load_distinctiveness_normalizer()
    qres = qres_list[0]
    top_aids = qres.get_top_aids()  # NOQA
    qres_orig = qres  # NOQA

    def test_config(qreq_, qres_orig, cfgdict):
        """ function to grid search over """
        qres_copy = copy.deepcopy(qres_orig)
        qreq_vsone_ = qreq_
        qres_vsone = qres_copy
        filtkey = hstypes.FiltKeys.DISTINCTIVENESS
        newfsv_list, newscore_aids = special_query.get_extern_distinctiveness(qreq_, qres_copy, **cfgdict)
        special_query.apply_new_qres_filter_scores(qreq_vsone_, qres_vsone, newfsv_list, newscore_aids, filtkey)
        tp_score  = qres_copy.aid2_score[aid2]
        tn_score  = qres_copy.aid2_score[tn_aid]
        return qres_copy, tp_score, tn_score

    #[.01, .1, .2, .5, .6, .7, .8, .9, 1.0]),
    #FiltKeys = hstypes.FiltKeys
    # FIXME: Use other way of doing gridsearch
    grid_basis = distinctiveness_normalizer.DCVS_DEFAULT.get_grid_basis()
    gridsearch = ut.GridSearch(grid_basis, label='qvuuid=%r' % (qvuuid,))
    print('Begin Grid Search')
    for cfgdict in ut.ProgressIter(gridsearch, lbl='GridSearch'):
        qres_copy, tp_score, tn_score = test_config(qreq_, qres_orig, cfgdict)
        gridsearch.append_result(tp_score, tn_score)
    print('Finish Grid Search')

    # Get best result
    best_cfgdict = gridsearch.get_rank_cfgdict()
    qres_copy, tp_score, tn_score = test_config(qreq_, qres_orig, best_cfgdict)

    # Examine closely what you can do with scores
    if False:
        qres_copy = copy.deepcopy(qres_orig)
        qreq_vsone_ = qreq_
        filtkey = hstypes.FiltKeys.DISTINCTIVENESS
        newfsv_list, newscore_aids = special_query.get_extern_distinctiveness(qreq_, qres_copy, **cfgdict)
        ut.embed()
        def make_cm_very_old_tuple(qres_copy):
            assert ut.listfind(qres_copy.filtkey_list, filtkey) is None
            weight_filters = hstypes.WEIGHT_FILTERS
            weight_filtxs, nonweight_filtxs = special_query.index_partition(qres_copy.filtkey_list, weight_filters)

            aid2_fsv = {}
            aid2_fs = {}
            aid2_score = {}

            for new_fsv_vsone, daid in zip(newfsv_list, newscore_aids):
                pass
                break
                #scorex_vsone  = ut.listfind(qres_copy.filtkey_list, filtkey)
                #if scorex_vsone is None:
                # TODO: add spatial verification as a filter score
                # augment the vsone scores
                # TODO: paramaterize
                weighted_ave_score = True
                if weighted_ave_score:
                    # weighted average scoring
                    new_fs_vsone = special_query.weighted_average_scoring(new_fsv_vsone, weight_filtxs, nonweight_filtxs)
                else:
                    # product scoring
                    new_fs_vsone = special_query.product_scoring(new_fsv_vsone)
                new_score_vsone = new_fs_vsone.sum()
                aid2_fsv[daid]   = new_fsv_vsone
                aid2_fs[daid]    = new_fs_vsone
                aid2_score[daid] = new_score_vsone
            return aid2_fsv, aid2_fs, aid2_score

        # Look at plot of query products
        for new_fsv_vsone, daid in zip(newfsv_list, newscore_aids):
            new_fs_vsone = special_query.product_scoring(new_fsv_vsone)
            scores_list = np.array(new_fs_vsone)[:, None].T
            pt.plot_sorted_scores(scores_list, logscale=False, figtitle=str(daid))
        pt.iup()
        special_query.apply_new_qres_filter_scores(qreq_vsone_, qres_copy, newfsv_list, newscore_aids, filtkey)

    # PRINT INFO
    import functools
    #ut.rrrr()
    get_stats_str = functools.partial(ut.get_stats_str, axis=0, newlines=True, precision=3)
    tp_stats_str = ut.align(get_stats_str(qres_copy.aid2_fsv[aid2]), ':')
    tn_stats_str = ut.align(get_stats_str(qres_copy.aid2_fsv[tn_aid]), ':')
    info_str_list = []
    info_str_list.append('qres_copy.filtkey_list = %r' % (qres_copy.filtkey_list,))
    info_str_list.append('CORRECT STATS')
    info_str_list.append(tp_stats_str)
    info_str_list.append('INCORRECT STATS')
    info_str_list.append(tn_stats_str)
    info_str = '\n'.join(info_str_list)
    print(info_str)

    # SHOW BEST RESULT
    #qres_copy.ishow_top(ibs, fnum=pt.next_fnum())
    #qres_orig.ishow_top(ibs, fnum=pt.next_fnum())

    # Text Informatio
    param_lbl = 'dcvs_power'
    param_stats_str = gridsearch.get_dimension_stats_str(param_lbl)
    print(param_stats_str)

    csvtext = gridsearch.get_csv_results(10)
    print(csvtext)

    # Paramter visuzliation
    fnum = pt.next_fnum()
    # plot paramter influence
    param_label_list = gridsearch.get_param_lbls()
    pnum_ = pt.get_pnum_func(2, len(param_label_list))
    for px, param_label in enumerate(param_label_list):
        gridsearch.plot_dimension(param_label, fnum=fnum, pnum=pnum_(px))
    # plot match figure
    pnum2_ = pt.get_pnum_func(2, 2)
    qres_copy.show_matches(ibs, aid2, fnum=fnum, pnum=pnum2_(2))
    qres_copy.show_matches(ibs, tn_aid, fnum=fnum, pnum=pnum2_(3))
    # Add figure labels
    figtitle = 'Effect of parameters on vsone separation for a single case'
    subtitle = 'qvuuid = %r' % (qvuuid)
    figtitle += '\n' + subtitle
    pt.set_figtitle(figtitle)
    # Save Figure
    #fig_fpath = pt.save_figure(usetitle=True)
    #print(fig_fpath)
    # Write CSV Results
    #csv_fpath = fig_fpath + '.csv.txt'
    #ut.write_to(csv_fpath, csvtext)

    #qres_copy.ishow_top(ibs)
    #from matplotlib import pyplot as plt
    #plt.show()
    #print(ut.list_str()))
    # TODO: plot max variation dims
    #import plottool as pt
    #pt.plot(p_list, diff_list)
    """
예제 #49
0
    def wgt_embed(wgt):
        view = wgt.view  # NOQA
        import utool

        utool.embed()
예제 #50
0
    def _init_sqldbcore(ibs, request_dbversion=None):
        """
        Example:
            >>> # DISABLE_DOCTEST
            >>> from wbia.control.IBEISControl import *  # NOQA
            >>> import wbia  # NOQA
            >>> #ibs = wbia.opendb('PZ_MTEST')
            >>> #ibs = wbia.opendb('PZ_Master0')
            >>> ibs = wbia.opendb('testdb1')
            >>> #ibs = wbia.opendb('PZ_Master0')

        Ignore:
            aid_list = ibs.get_valid_aids()
            #ibs.update_annot_visual_uuids(aid_list)
            vuuid_list = ibs.get_annot_visual_uuids(aid_list)
            aid_list2 =  ibs.get_annot_aids_from_visual_uuid(vuuid_list)
            assert aid_list2 == aid_list
            # v1.3.0 testdb1:264us, PZ_MTEST:3.93ms, PZ_Master0:11.6s
            %timeit ibs.get_annot_aids_from_visual_uuid(vuuid_list)
            # v1.3.1 testdb1:236us, PZ_MTEST:1.83ms, PZ_Master0:140ms

            ibs.print_imageset_table(exclude_columns=['imageset_uuid'])
        """
        from wbia.control import _sql_helpers
        from wbia.control import DB_SCHEMA

        # Before load, ensure database has been backed up for the day
        backup_idx = ut.get_argval('--loadbackup', type_=int, default=None)
        sqldb_fpath = None
        if backup_idx is not None:
            backups = _sql_helpers.get_backup_fpaths(ibs)
            logger.info('backups = %r' % (backups, ))
            sqldb_fpath = backups[backup_idx]
            logger.info('CHOSE BACKUP sqldb_fpath = %r' % (sqldb_fpath, ))
        if backup_idx is None and ibs._needs_backup():
            try:
                _sql_helpers.ensure_daily_database_backup(
                    ibs.get_ibsdir(), ibs.sqldb_fname, ibs.backupdir)
            except IOError as ex:
                ut.printex(
                    ex,
                    ('Failed making daily backup. '
                     'Run with --nobackup to disable'),
                )
                import utool

                utool.embed()
                raise
        # IBEIS SQL State Database
        if request_dbversion is None:
            ibs.db_version_expected = '2.0.0'
        else:
            ibs.db_version_expected = request_dbversion
        # TODO: add this functionality to SQLController
        if backup_idx is None:
            new_version, new_fname = dtool.sql_control.dev_test_new_schema_version(
                ibs.get_dbname(),
                ibs.get_ibsdir(),
                ibs.sqldb_fname,
                ibs.db_version_expected,
                version_next='2.0.0',
            )
            ibs.db_version_expected = new_version
            ibs.sqldb_fname = new_fname
        if sqldb_fpath is None:
            assert backup_idx is None
            sqldb_fpath = join(ibs.get_ibsdir(), ibs.sqldb_fname)
            readonly = None
        else:
            readonly = True
        ibs.db = dtool.SQLDatabaseController(
            fpath=sqldb_fpath,
            inmemory=False,
            readonly=readonly,
            always_check_metadata=False,
        )
        ibs.readonly = ibs.db.readonly

        if backup_idx is None:
            # Ensure correct schema versions
            _sql_helpers.ensure_correct_version(
                ibs,
                ibs.db,
                ibs.db_version_expected,
                DB_SCHEMA,
                verbose=ut.VERBOSE,
                dobackup=not ibs.readonly,
            )
예제 #51
0
def job_engine_tester():
    """
    CommandLine:
        python -m ibeis.web.job_engine --exec-job_engine_tester
        python -b -m ibeis.web.job_engine --exec-job_engine_tester

        python -m ibeis.web.job_engine job_engine_tester
        python -m ibeis.web.job_engine job_engine_tester --bg
        python -m ibeis.web.job_engine job_engine_tester --fg

    Example:
        >>> # SCRIPT
        >>> from ibeis.web.job_engine import *  # NOQA
        >>> job_engine_tester()
    """
    _init_signals()
    # now start a few clients, and fire off some requests
    client_id = np.random.randint(1000)
    reciever = JobBackend(use_static_ports=True)
    jobiface = JobInterface(client_id, reciever.port_dict)
    from ibeis.init import sysres
    if ut.get_argflag('--bg'):
        dbdir = sysres.get_args_dbdir(defaultdb='cache',
                                      allow_newdir=False,
                                      db=None,
                                      dbdir=None)
        reciever.initialize_background_processes(dbdir)
        print('[testzmq] parent process is looping forever')
        while True:
            time.sleep(1)
    elif ut.get_argflag('--fg'):
        jobiface.initialize_client_thread()
    else:
        dbdir = sysres.get_args_dbdir(defaultdb='cache',
                                      allow_newdir=False,
                                      db=None,
                                      dbdir=None)
        reciever.initialize_background_processes(dbdir)
        jobiface.initialize_client_thread()

    # Foreground test script
    print('... waiting for jobs')
    if ut.get_argflag('--cmd'):
        ut.embed()
        #jobiface.queue_job()
    else:
        print('[test] ... emit test1')
        callback_url = None
        callback_method = None
        args = (1, )
        jobid1 = jobiface.queue_job('helloworld', callback_url,
                                    callback_method, *args)
        jobiface.wait_for_job_result(jobid1)
        jobid_list = []

        args = ([1], [3, 4, 5])
        kwargs = dict(cfgdict={'K': 1})
        identify_jobid = jobiface.queue_job('query_chips_simple_dict',
                                            callback_url, callback_method,
                                            *args, **kwargs)
        for jobid in jobid_list:
            jobiface.wait_for_job_result(jobid)

        jobiface.wait_for_job_result(identify_jobid)
    print('FINISHED TEST SCRIPT')