Example #1
0
def _test_indent_print():
    # Indent test code doesnt work in doctest blocks.
    import utool as ut
    flag = ut.ensure_logging()
    print('Checking indent. Should have none')
    with ut.Indenter('[INDENT] '):
        print('Checking indent. Should be indented')
    print('Should no longer be indented')
    text = ut.get_current_log_text()
    # The last line might sometimes be empty or not.
    # Not sure.
    # New hack: had to put in stride. Seems like logs get written
    # with two line breaks now
    last_lines = text.split('\n')[-8::2]
    if last_lines[-1] != '':
        assert False, 'DEV ERROR. REMOVE FIRST LINE INSTEAD OF LAST'
        last_lines = last_lines[:-1]

    #print('last_lines = %r' % (ut.repr3(last_lines)))
    try:
        assert last_lines[0].find('[INDENT] ') == -1, last_lines[0]
        assert last_lines[1].find('[INDENT] ') >= 0, 'did not indent %r' % (
            last_lines[1], )
        assert last_lines[2].find('[INDENT] ') == -1, last_lines[2]
    except AssertionError:
        print('Error. Last 3 lines')
        print(ut.repr3(last_lines))
        raise
    if not flag:
        ut.stop_logging()
Example #2
0
    def queue_loop(port_dict):
        iface1, iface2 = port_dict['collect_url1'], port_dict['collect_url2']
        print = partial(ut.colorprint, color='green')
        update_proctitle(queue_name)

        with ut.Indenter('[%s] ' % (queue_name, )):
            if VERBOSE_JOBS:
                print('Init make_queue_loop: name=%r' % (name, ))
            # bind the client dealer to the queue router
            rout_sock = ctx.socket(zmq.ROUTER)
            rout_sock.setsockopt_string(zmq.IDENTITY,
                                        'queue.' + name + '.' + 'ROUTER')
            rout_sock.bind(iface1)
            if VERBOSE_JOBS:
                print('bind %s_url1 = %r' % (
                    name,
                    iface1,
                ))
            # bind the server router to the queue dealer
            deal_sock = ctx.socket(zmq.DEALER)
            deal_sock.setsockopt_string(zmq.IDENTITY,
                                        'queue.' + name + '.' + 'DEALER')
            deal_sock.bind(iface2)
            if VERBOSE_JOBS:
                print('bind %s_url2 = %r' % (
                    name,
                    iface2,
                ))
            try:
                if 1:
                    # the remainder of this function can be entirely replaced with
                    zmq.device(zmq.QUEUE, rout_sock, deal_sock)
                else:
                    # but this shows what is really going on:
                    poller = zmq.Poller()
                    poller.register(rout_sock, zmq.POLLIN)
                    poller.register(deal_sock, zmq.POLLIN)
                    while True:
                        evts = dict(poller.poll())
                        # poll() returns a list of tuples [(socket, evt), (socket, evt)]
                        # dict(poll()) turns this into {socket:evt, socket:evt}
                        if rout_sock in evts:
                            msg = rout_sock.recv_multipart()
                            # ROUTER sockets prepend the identity of the jobiface,
                            # for routing replies
                            if VERBOSE_JOBS:
                                print('ROUTER relayed %r via DEALER' % (msg, ))
                            deal_sock.send_multipart(msg)
                        if deal_sock in evts:
                            msg = deal_sock.recv_multipart()
                            if VERBOSE_JOBS:
                                print('DEALER relayed %r via ROUTER' % (msg, ))
                            rout_sock.send_multipart(msg)
            except KeyboardInterrupt:
                print('Caught ctrl+c in collector loop. Gracefully exiting')
            if VERBOSE_JOBS:
                print('Exiting %s' % (loop_name, ))
Example #3
0
def cut_step(G, nodes, edges, n_annots, n_names, lookup_annot_idx, edge_probs, pass_values, fail_values):
    # Create nodes in the graphical model.  In this case there are <num_vars>
    # nodes and each node can be assigned to one of <num_vars> possible labels
    space = np.full((n_annots,), fill_value=n_names, dtype=opengm.index_type)
    gm = opengm.gm(space, operator='adder')

    # Use one potts function for each edge
    gm = build_factor_graph(G, nodes, edges , n_annots, n_names,
                            lookup_annot_idx, use_unaries=False,
                            edge_probs=edge_probs, operator='adder')

    with ut.Indenter('[CUTS]'):
        ut.cprint('Brute Force Labels: (energy minimization)', 'blue')
        infr = opengm.inference.Bruteforce(gm, accumulator='minimizer')
        infr.infer()
        labels = rectify_labels(G, infr.arg())
        print(pd.DataFrame(labels, columns=['nid'], index=pd.Series(nodes)).T)
        print('value = %r' % (infr.value(),))

        mc_params = opengm.InfParam(maximalNumberOfConstraintsPerRound=1000000,
                                    initializeWith3Cycles=True,
                                    edgeRoundingValue=1e-08, timeOut=36000000.0,
                                    cutUp=1e+75, reductionMode=3, numThreads=0,
                                    # allowCutsWithin=?
                                    # workflow=workflow
                                    verbose=False, verboseCPLEX=False)
        infr = opengm.inference.Multicut(gm, parameter=mc_params,
                                         accumulator='minimizer')

        infr.infer()
        labels = infr.arg()
        labels = rectify_labels(G, infr.arg())

        ut.cprint('Multicut Labels: (energy minimization)', 'blue')
        print(pd.DataFrame(labels, columns=['nid'], index=pd.Series(nodes)).T)
        print('value = %r' % (infr.value(),))

        if pass_values is not None:
            gotany = False
            for pval in pass_values:
                if all(labels == pval):
                    gotany = True
                    break
            if not gotany:
                ut.cprint('INCORRECT DID NOT GET PASS VALUES', 'red')
                print('pass_values = %r' % (pass_values,))

        if fail_values is not None:
            for fail in fail_values:
                if all(labels == fail):
                    ut.cprint('INCORRECT', 'red')
Example #4
0
 def get_job_result(jobiface, jobid):
     with ut.Indenter('[client %d] ' % (jobiface.id_)):
         if jobiface.verbose >= 1:
             print = partial(ut.colorprint, color='teal')
             print('----')
             print('Request result of jobid=%r' % (jobid, ))
         pair_msg = dict(action='job_result', jobid=jobid)
         # CALLER: collector_request_result
         jobiface.collect_deal_sock.send_json(pair_msg)
         if jobiface.verbose >= 3:
             print('... waiting for collector reply')
         reply = jobiface.collect_deal_sock.recv_json()
         if jobiface.verbose >= 2:
             print('got reply = %s' % (ut.repr2(reply, truncate=True), ))
     return reply
Example #5
0
 def debug_nnindexer(nnindexer):
     r"""
     Makes sure the indexer has valid SIFT descriptors
     """
     # FIXME: they might not agree if data has been added / removed
     init_data, extra_data = nnindexer.flann.get_indexed_data()
     with ut.Indenter('[NNINDEX_DEBUG]'):
         print('extra_data = %r' % (extra_data,))
         print('init_data = %r' % (init_data,))
         print('nnindexer.max_distance_sqrd = %r' % (nnindexer.max_distance_sqrd,))
         data_agrees = nnindexer.idx2_vec is nnindexer.flann.get_indexed_data()[0]
         if data_agrees:
             print('indexed_data agrees')
         assert vt.check_sift_validity(init_data), 'bad SIFT properties'
         assert data_agrees, 'indexed data does not agree'
Example #6
0
def collector_loop(port_dict, dbdir, containerized):
    """
    Service that stores completed algorithm results
    """
    import ibeis
    update_proctitle('collector_loop')
    print = partial(ut.colorprint, color='yellow')
    with ut.Indenter('[collect] '):
        collect_rout_sock = ctx.socket(zmq.ROUTER)
        collect_rout_sock.setsockopt_string(zmq.IDENTITY, 'collect.ROUTER')
        collect_rout_sock.connect(port_dict['collect_url2'])
        if VERBOSE_JOBS:
            print('connect collect_url2  = %r' % (port_dict['collect_url2'], ))

        ibs = ibeis.opendb(dbdir=dbdir, use_cache=False, web=False)
        # shelve_path = join(ut.get_shelves_dir(appname='ibeis'), 'engine')
        shelve_path = ibs.get_shelves_path()
        ut.delete(shelve_path)
        ut.ensuredir(shelve_path)

        collecter_data = {}
        awaiting_data = {}
        try:
            while True:
                # several callers here
                # CALLER: collector_notify
                # CALLER: collector_store
                # CALLER: collector_request_status
                # CALLER: collector_request_result
                idents, collect_request = rcv_multipart_json(collect_rout_sock,
                                                             print=print)
                try:
                    reply = on_collect_request(collect_request,
                                               collecter_data,
                                               awaiting_data,
                                               shelve_path,
                                               containerized=containerized)
                except Exception as ex:
                    print(ut.repr3(collect_request))
                    ut.printex(ex, 'ERROR in collection')
                send_multipart_json(collect_rout_sock, idents, reply)
        except KeyboardInterrupt:
            print('Caught ctrl+c in collector loop. Gracefully exiting')
        if VERBOSE_JOBS:
            print('Exiting collector')
Example #7
0
        def wrp_cache_invalidator(self, *args, **kwargs):
            # the class must have a table_cache property
            colscache_ = self.table_cache[tblname]
            colnames_ = list(
                six.iterkeys(colscache_)) if colnames is None else colnames
            if DEBUG_API_CACHE:
                indenter = ut.Indenter('[%s]' % (tblname, ))
                indenter.start()
                print('+------')
                print(
                    'INVALIDATING tblname=%r, colnames=%r, rowidx=%r, force=%r'
                    % (tblname, colnames, rowidx, force))
                print('self = %r' % (self, ))
                print('args = %r' % (args, ))
                print('kwargs = %r' % (kwargs, ))
                print('colscache_ = ' + ut.dict_str(colscache_, truncate=1))

            # Clear the cache of any specified colname
            # when the invalidator is called
            if rowidx is None:
                for colname in colnames_:
                    kwargs_cache_ = colscache_[colname]
                    # We dont know the rowsids so clear everything
                    for cache_ in six.itervalues(kwargs_cache_):
                        cache_.clear()
            else:
                rowid_list = args[rowidx]
                for colname in colnames_:
                    kwargs_cache_ = colscache_[colname]
                    # We know the rowids to delete
                    # iterate over all getter kwargs values
                    for cache_ in six.itervalues(kwargs_cache_):
                        ut.delete_dict_keys(cache_, rowid_list)

            # Preform set/delete action
            if DEBUG_API_CACHE:
                print('After:')
                print('colscache_ = ' + ut.dict_str(colscache_, truncate=1))
                print('L__________')

            writer_result = writer_func(self, *args, **kwargs)

            if DEBUG_API_CACHE:
                indenter.stop()
            return writer_result
Example #8
0
    def queue_job(jobiface,
                  action,
                  callback_url=None,
                  callback_method=None,
                  *args,
                  **kwargs):
        r"""
        IBEIS:
            This is just a function that lives in the main thread and ships off
            a job.

        FIXME: I do not like having callback_url and callback_method specified
               like this with args and kwargs. If these must be there then
               they should be specified first, or
               THE PREFERED OPTION IS
               args and kwargs should not be specified without the * syntax

        The client - sends messages, and receives replies after they
        have been processed by the
        """
        # NAME: job_client
        with ut.Indenter('[client %d] ' % (jobiface.id_)):
            print = partial(ut.colorprint, color='blue')
            if jobiface.verbose >= 1:
                print('----')
            engine_request = {
                'action': action,
                'args': args,
                'kwargs': kwargs,
                'callback_url': callback_url,
                'callback_method': callback_method
            }
            if jobiface.verbose >= 2:
                print('Queue job: %s' % (engine_request))
            # Flow of information tags:
            # CALLS: engine_queue
            jobiface.engine_deal_sock.send_json(engine_request)
            if jobiface.verbose >= 3:
                print('..sent, waiting for response')
            # RETURNED FROM: job_client_return
            reply_notify = jobiface.engine_deal_sock.recv_json()
            if jobiface.verbose >= 2:
                print('Got reply: %s' % (reply_notify))
            jobid = reply_notify['jobid']
            return jobid
Example #9
0
 def find_offending_part(datetime_str_, timefmt, splitchar=' '):
     import utool as ut
     parts_list = datetime_str_.split(splitchar)
     fmt_list = timefmt.split(splitchar)
     if len(parts_list) == 1:
         return
     for part, fmt in zip(parts_list, fmt_list):
         print('Trying:')
         with ut.Indenter('  '):
             try:
                 print('fmt = %r' % (fmt, ))
                 print('part = %r' % (part, ))
                 datetime.datetime.strptime(part, fmt)
             except ValueError:
                 find_offending_part(part, fmt, '/')
                 print('Failed')
             else:
                 print('Passed')
Example #10
0
def evaluate_training_data(ibs,
                           qaids,
                           daids_list,
                           varydict,
                           nDaids_basis,
                           verbose=True):
    nError_list = []
    nDaids_list = []
    cfgdict_list2 = []
    cfgdict_list = ut.all_dict_combinations(varydict)
    for daids in ut.ProgressIter(daids_list, lbl='testing dbsize'):
        nDaids = len(daids)
        print('\n---------------------------')
        with ut.Indenter('[nDaids=%r]' % (nDaids)):
            print('nDaids = %r' % nDaids)
            for cfgdict in ut.ProgressIter(cfgdict_list,
                                           lbl='testing cfgdict'):
                qreq_ = ibs.new_query_request(qaids,
                                              daids,
                                              cfgdict=cfgdict,
                                              verbose=verbose)
                qres_list = ibs.query_chips(qreq_=qreq_, verbose=verbose)
                gt_ranks_list = [
                    qres.get_gt_ranks(ibs=ibs) for qres in qres_list
                ]
                incorrect_list = [
                    len(gt_ranks) == 0 or min(gt_ranks) != 0
                    for gt_ranks in gt_ranks_list
                ]
                nErrors = sum(incorrect_list)
                nError_list.append(nErrors)
                nDaids_list.append(nDaids)
                cfgdict_list2.append(cfgdict.copy())

    nError_list = np.array(nError_list)
    nDaids_list = np.array(nDaids_list)
    K_list = np.array([cfgdict['K'] for cfgdict in cfgdict_list2])
    return nDaids_list, K_list, nError_list
Example #11
0
def parse_cfgstr_list2(cfgstr_list, named_defaults_dict=None, cfgtype=None,
                       alias_keys=None, valid_keys=None, expand_nested=True,
                       strict=True, special_join_dict=None, is_nestedcfgtype=False,
                       metadata=None):
    r"""
    Parses config strings. By looking up name in a dict of configs

    DEPRICATE

    Args:
        cfgstr_list (list):
        named_defaults_dict (dict): (default = None)
        cfgtype (None): (default = None)
        alias_keys (None): (default = None)
        valid_keys (None): (default = None)
        expand_nested (bool): (default = True)
        strict (bool): (default = True)
        is_nestedcfgtype - used for annot configs so special joins arent geometrically combined

    Note:
        Normal Case:
            --flag name

        Custom Arugment Cases:
            --flag name:custom_key1=custom_val1,custom_key2=custom_val2

        Multiple Config Case:
            --flag name1:custom_args1 name2:custom_args2

        Multiple Config (special join) Case:
            (here name2 and name3 have some special interaction)
            --flag name1:custom_args1 name2:custom_args2::name3:custom_args3

        Varied Argument Case:
            --flag name:key1=[val1,val2]

    Returns:
        list: cfg_combos_list

    CommandLine:
        python -m ibeis.expt.cfghelpers --exec-parse_cfgstr_list2
        python -m ibeis.expt.cfghelpers --test-parse_cfgstr_list2

    Example:
        >>> # ENABLE_DOCTET
        >>> from ibeis.expt.cfghelpers import *  # NOQA
        >>> cfgstr_list = ['name', 'name:f=1', 'name:b=[1,2]', 'name1:f=1::name2:f=1,b=2']
        >>> #cfgstr_list = ['name', 'name1:f=1::name2:f=1,b=2']
        >>> named_defaults_dict = None
        >>> cfgtype = None
        >>> alias_keys = None
        >>> valid_keys = None
        >>> expand_nested = True
        >>> strict = False
        >>> special_join_dict = {'joined': True}
        >>> cfg_combos_list = parse_cfgstr_list2(cfgstr_list, named_defaults_dict,
        >>>                                      cfgtype, alias_keys, valid_keys,
        >>>                                      expand_nested, strict,
        >>>                                      special_join_dict)
        >>> print('cfg_combos_list = %s' % (ut.list_str(cfg_combos_list, nl=2),))
        >>> print(ut.depth_profile(cfg_combos_list))
        >>> cfg_list = ut.flatten(cfg_combos_list)
        >>> cfg_list = ut.flatten([cfg if isinstance(cfg, list) else [cfg] for cfg in cfg_list])
        >>> result = ut.repr2(ut.get_varied_cfg_lbls(cfg_list))
        >>> print(result)
        ['name:', 'name:f=1', 'name:b=1', 'name:b=2', 'name1:f=1,joined=True', 'name2:b=2,f=1,joined=True']
    """
    with ut.Indenter('    '):
        cfg_combos_list = []
        for cfgstr in cfgstr_list:
            cfg_combos = []
            # Parse special joined cfg case
            if cfgstr.find('::') > -1:
                special_cfgstr_list = cfgstr.split('::')
                special_combo_list = parse_cfgstr_list2(
                    special_cfgstr_list,
                    named_defaults_dict=named_defaults_dict, cfgtype=cfgtype,
                    alias_keys=alias_keys, valid_keys=valid_keys,
                    strict=strict, expand_nested=expand_nested,
                    is_nestedcfgtype=False, metadata=metadata)
                OLD = False
                if OLD:
                    special_combo = ut.flatten(special_combo_list)
                    if special_join_dict is not None:
                        for cfg in special_combo:
                            cfg.update(special_join_dict)
                else:
                    if special_join_dict is not None:
                        for special_combo in special_combo_list:
                            for cfg in special_combo:
                                cfg.update(special_join_dict)
                if is_nestedcfgtype:
                    cfg_combo = tuple([combo for combo in special_combo_list])
                else:
                    # not sure if this is right
                    cfg_combo = special_combo_list
                # FIXME DUPLICATE CODE
                if expand_nested:
                    cfg_combos.extend(cfg_combo)
                else:
                    #print('Appending: ' + str(ut.depth_profile(cfg_combo)))
                    #if ut.depth_profile(cfg_combo) == [1, 9]:
                    #    ut.embed()
                    cfg_combos_list.append(cfg_combo)
            else:
                cfgname, cfgopt_strs, subx = ut.parse_cfgstr_name_options(cfgstr)
                # --
                # Lookup named default settings
                try:
                    base_cfg_list = ut.lookup_base_cfg_list(cfgname, named_defaults_dict, metadata=metadata)
                except Exception as ex:
                    ut.printex(ex, keys=['cfgstr_list'])
                    raise
                # --
                for base_cfg in base_cfg_list:
                    cfg_combo = customize_base_cfg(
                        cfgname, cfgopt_strs, base_cfg, cfgtype, alias_keys,
                        valid_keys, strict=strict, offset=len(cfg_combos))
                    if is_nestedcfgtype:
                        cfg_combo = [cfg_combo]
                    if expand_nested:
                        cfg_combos.extend(cfg_combo)
                    else:
                        cfg_combos_list.append(cfg_combo)
            # SUBX Cannot work here because of acfg hackiness
            #if subx is not None:
            #    cfg_combo = ut.take(cfg_combo, subx)
            if expand_nested:
                cfg_combos_list.append(cfg_combos)
        #    print('Updated to: ' + str(ut.depth_profile(cfg_combos_list)))
        #print('Returning len(cfg_combos_list) = %r' % (len(cfg_combos_list),))
    return cfg_combos_list
Example #12
0
def get_ibeis_patch_siam_dataset(**kwargs):
    """
    CommandLine:
        python -m ibeis_cnn.ingest_data --test-get_ibeis_patch_siam_dataset --show
        python -m ibeis_cnn.ingest_data --test-get_ibeis_patch_siam_dataset --show --db PZ_Master1 --acfg_name default
        python -m ibeis_cnn.ingest_data --test-get_ibeis_patch_siam_dataset --show --db PZ_Master1 --acfg_name timectrl
        python -m ibeis_cnn.ingest_data --test-get_ibeis_patch_siam_dataset --show --db PZ_MTEST --acfg_name unctrl --dryrun

    Example:
        >>> # ENABLE_DOCTEST
        >>> from ibeis_cnn.ingest_data import *  # NOQA
        >>> from ibeis_cnn import draw_results
        >>> import ibeis
        >>> kwargs = {}  # ut.argparse_dict({'max_examples': None, 'num_top': 3})
        >>> dataset = get_ibeis_patch_siam_dataset(**kwargs)
        >>> ut.quit_if_noshow()
        >>> dataset.interact()
        >>> ut.show_if_requested()
    """
    datakw = ut.argparse_dict(
        {
            #'db': 'PZ_MTEST',
            'max_examples': None,
            #'num_top': 3,
            'num_top': None,
            'min_featweight': .8 if not ut.WIN32 else None,
            'controlled': True,
            'colorspace': 'gray',
            'acfg_name': None,
        },
        alias_dict={'acfg_name': ['acfg', 'a']},
        verbose=True)

    datakw.update(kwargs)

    #ut.get_func_kwargs(ingest_ibeis.get_aidpairs_and_matches)

    if datakw['acfg_name'] is not None:
        del datakw['controlled']
    if datakw['max_examples'] is None:
        del datakw['max_examples']
    if datakw['num_top'] is None:
        del datakw['num_top']

    with ut.Indenter('[LOAD IBEIS DB]'):
        import ibeis
        dbname = ut.get_argval('--db', default='PZ_MTEST')
        ibs = ibeis.opendb(dbname=dbname, defaultdb='PZ_MTEST')

    # Nets dir is the root dir for all training on this data
    training_dpath = ibs.get_neuralnet_dir()
    ut.ensuredir(training_dpath)
    print('\n\n[get_ibeis_patch_siam_dataset] START')
    #log_dir = join(training_dpath, 'logs')
    #ut.start_logging(log_dir=log_dir)

    alias_key = ibs.get_dbname() + ';' + ut.dict_str(
        datakw, nl=False, explicit=True)
    try:
        if NOCACHE_DATASET:
            raise Exception('forced cache off')
        # Try and short circut cached loading
        dataset = DataSet.from_alias_key(alias_key)
        dataset.setprop('ibs', lambda: ibeis.opendb(db=dbname))
        return dataset
    except Exception as ex:
        ut.printex(ex,
                   'alias definitions have changed. alias_key=%r' %
                   (alias_key, ),
                   iswarning=True)

    with ut.Indenter('[BuildDS]'):
        # Get training data pairs
        colorspace = datakw.pop('colorspace')
        patchmatch_tup = ingest_ibeis.get_aidpairs_and_matches(ibs, **datakw)
        aid1_list, aid2_list, kpts1_m_list, kpts2_m_list, fm_list, metadata_lists = patchmatch_tup
        # Extract and cache the data
        # TODO: metadata
        if ut.get_argflag('--dryrun'):
            print('exiting due to dry run')
            import sys
            sys.exit(0)
        tup = ingest_ibeis.cached_patchmetric_training_data_fpaths(
            ibs,
            aid1_list,
            aid2_list,
            kpts1_m_list,
            kpts2_m_list,
            fm_list,
            metadata_lists,
            colorspace=colorspace)
        data_fpath, labels_fpath, metadata_fpath, training_dpath, data_shape = tup
        print('\n[get_ibeis_patch_siam_dataset] FINISH\n\n')

    # hack for caching num_labels
    labels = ut.load_data(labels_fpath)
    num_labels = len(labels)

    dataset = DataSet.new_training_set(
        alias_key=alias_key,
        data_fpath=data_fpath,
        labels_fpath=labels_fpath,
        metadata_fpath=metadata_fpath,
        training_dpath=training_dpath,
        data_shape=data_shape,
        data_per_label=2,
        output_dims=1,
        num_labels=num_labels,
    )
    dataset.setprop('ibs', ibs)
    return dataset
Example #13
0
def get_ibeis_part_siam_dataset(**kwargs):
    """
    PARTS based network data

    CommandLine:
        python -m ibeis_cnn.ingest_data --test-get_ibeis_part_siam_dataset --show
        python -m ibeis_cnn.ingest_data --test-get_ibeis_part_siam_dataset --show --db PZ_Master1 --acfg_name timectrl
        python -m ibeis_cnn.ingest_data --test-get_ibeis_part_siam_dataset --show --db PZ_MTEST --acfg_name unctrl --dryrun

    Example:
        >>> # ENABLE_DOCTEST
        >>> from ibeis_cnn.ingest_data import *  # NOQA
        >>> from ibeis_cnn import draw_results
        >>> import ibeis
        >>> kwargs = {}  # ut.argparse_dict({'max_examples': None, 'num_top': 3})
        >>> dataset = get_ibeis_part_siam_dataset(**kwargs)
        >>> ut.quit_if_noshow()
        >>> dataset.interact(ibs=dataset.getprop('ibs'))
        >>> ut.show_if_requested()
    """
    import ibeis
    datakw = ut.argparse_dict(
        {
            'colorspace': 'gray',
            'acfg_name': 'ctrl',
            #'db': None,
            'db': 'PZ_MTEST',
        },
        alias_dict={'acfg_name': ['acfg']},
        verbose=True)

    datakw.update(kwargs)
    print('\n\n[get_ibeis_part_siam_dataset] START')

    alias_key = ut.dict_str(datakw, nl=False, explicit=True)

    dbname = datakw.pop('db')

    try:
        if NOCACHE_DATASET:
            raise Exception('forced cache off')
        # Try and short circut cached loading
        dataset = DataSet.from_alias_key(alias_key)
        dataset.setprop('ibs', lambda: ibeis.opendb(db=dbname))
        return dataset
    except Exception as ex:
        ut.printex(ex,
                   'alias definitions have changed. alias_key=%r' %
                   (alias_key, ),
                   iswarning=True)

    with ut.Indenter('[LOAD IBEIS DB]'):
        ibs = ibeis.opendb(db=dbname)

    # Nets dir is the root dir for all training on this data
    training_dpath = ibs.get_neuralnet_dir()
    ut.ensuredir(training_dpath)

    with ut.Indenter('[BuildDS]'):
        # Get training data pairs
        colorspace = datakw.pop('colorspace')
        (aid_pairs, label_list,
         flat_metadata) = ingest_ibeis.get_aidpairs_partmatch(ibs, **datakw)
        # Extract and cache the data, labels, and metadata
        if ut.get_argflag('--dryrun'):
            print('exiting due to dry run')
            import sys
            sys.exit(0)
        tup = ingest_ibeis.cached_part_match_training_data_fpaths(
            ibs, aid_pairs, label_list, flat_metadata, colorspace=colorspace)
        data_fpath, labels_fpath, metadata_fpath, training_dpath, data_shape = tup
        print('\n[get_ibeis_part_siam_dataset] FINISH\n\n')

    # hack for caching num_labels
    labels = ut.load_data(labels_fpath)
    num_labels = len(labels)

    dataset = DataSet.new_training_set(
        alias_key=alias_key,
        data_fpath=data_fpath,
        labels_fpath=labels_fpath,
        metadata_fpath=metadata_fpath,
        training_dpath=training_dpath,
        data_shape=data_shape,
        data_per_label=2,
        output_dims=1,
        num_labels=num_labels,
    )
    dataset.setprop('ibs', ibs)
    return dataset
Example #14
0
def get_query_components(ibs, qaids):
    r"""
    Args:
        ibs (IBEISController):  ibeis controller object
        qaids (?):

    Returns:
        ?:

    CommandLine:
        python -m ibeis.algo.hots.query_helpers --test-get_query_components

    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis.algo.hots.query_helpers import *  # NOQA
        >>> import ibeis
        >>> # build test data
        >>> ibs = ibeis.opendb('testdb1')
        >>> qaids = ibs.get_valid_aids()
        >>> # execute function
        >>> result = get_query_components(ibs, qaids)
        >>> # verify results
        >>> print(result)
    """
    from ibeis.algo.hots import pipeline
    from ibeis.algo.hots import query_request
    daids = ibs.get_valid_aids()
    cfgdict = dict(with_metadata=True)
    qreq_ = query_request.new_ibeis_query_request(ibs, qaids, daids, cfgdict)
    qaid = qaids[0]
    assert len(daids) > 0, '!!! nothing to search'
    assert len(qaids) > 0, '!!! nothing to query'
    qreq_.lazy_load()
    pipeline_locals_ = pipeline.testrun_pipeline_upto(qreq_, None)
    qaid2_nns            = pipeline_locals_['qaid2_nns']
    qaid2_nnvalid0       = pipeline_locals_['qaid2_nnvalid0']
    qaid2_filtweights    = pipeline_locals_['qaid2_filtweights']
    qaid2_nnfilts        = pipeline_locals_['qaid2_nnfilts']
    qaid2_chipmatch_FILT = pipeline_locals_['qaid2_chipmatch_FILT']
    qaid2_chipmatch_SVER = pipeline_locals_['qaid2_chipmatch_SVER']
    qaid2_svtups = qreq_.metadata['qaid2_svtups']
    #---
    qaid2_qres = pipeline.chipmatch_to_resdict(qreq_, qaid2_chipmatch_SVER)
    #####################
    # Testing components
    #####################
    with ut.Indenter('[components]'):
        qfx2_idx, qfx2_dist = qaid2_nns[qaid]
        qfx2_aid = qreq_.indexer.get_nn_aids(qfx2_idx)
        qfx2_fx  = qreq_.indexer.get_nn_featxs(qfx2_idx)
        qfx2_gid = ibs.get_annot_gids(qfx2_aid)  # NOQA
        qfx2_nid = ibs.get_annot_name_rowids(qfx2_aid)  # NOQA
        filtkey_list, qfx2_scores, qfx2_valids = qaid2_nnfilts[qaid]
        qaid2_nnfilt_ORIG    = pipeline.identity_filter(qreq_, qaid2_nns)
        qaid2_chipmatch_ORIG = pipeline.build_chipmatches(qreq_, qaid2_nns, qaid2_nnfilt_ORIG)
        qaid2_qres_ORIG = pipeline.chipmatch_to_resdict(qaid2_chipmatch_ORIG, qreq_)
        qaid2_qres_FILT = pipeline.chipmatch_to_resdict(qaid2_chipmatch_FILT, qreq_)
        qaid2_qres_SVER = qaid2_qres
    #####################
    # Relevant components
    #####################
    qaid = qaids[0]
    qres_ORIG = qaid2_qres_ORIG[qaid]
    qres_FILT = qaid2_qres_FILT[qaid]
    qres_SVER = qaid2_qres_SVER[qaid]

    return locals()
Example #15
0
def bp_step(G, nodes, edges, n_annots, n_names, lookup_annot_idx):
    gm = build_factor_graph(G,
                            nodes,
                            edges,
                            n_annots,
                            n_names,
                            lookup_annot_idx,
                            use_unaries=False,
                            edge_probs=None,
                            operator='multiplier')

    with ut.Indenter('[BELIEF]'):
        ut.cprint('Brute Force Labels: (probability maximization)', 'blue')
        infr = opengm.inference.Bruteforce(gm, accumulator='maximizer')
        infr.infer()
        labels = rectify_labels(G, infr.arg())
        print(pd.DataFrame(labels, columns=['nid'], index=pd.Series(nodes)).T)
        print('value = %r' % (infr.value(), ))

        lpb_parmas = opengm.InfParam(
            damping=0.00,
            steps=10000,
            # convergenceBound=0,
            isAcyclic=False)
        # http://www.andres.sc/publications/opengm-2.0.2-beta-manual.pdf
        # I believe multiplier + integrator = marginalization
        # Manual says multiplier + adder = marginalization
        # Manual says multiplier + maximizer = probability maximization
        # infr = opengm.inference.TreeReweightedBp(
        LBP_algorithm = opengm.inference.BeliefPropagation
        # LBP_algorithm = opengm.inference.TreeReweightedBp

        ut.cprint('Belief Propogation (maximization)', 'blue')
        infr = LBP_algorithm(gm, parameter=lpb_parmas, accumulator='maximizer')
        infr.infer()
        labels = rectify_labels(G, infr.arg())
        pairwise_factor_idxs = gm.pairwise_factor_idxs
        factor_marginals = infr.factorMarginals(pairwise_factor_idxs)
        # print('factor_marginals =\n%r' % (factor_marginals,))
        edge_marginals_same_diff_ = [(np.diag(f).sum(),
                                      f[~np.eye(f.shape[0], dtype=bool)].sum())
                                     for f in factor_marginals]
        edge_marginals_same_diff_ = np.array(edge_marginals_same_diff_)
        edge_marginals_same_diff = edge_marginals_same_diff_.copy()
        edge_marginals_same_diff /= edge_marginals_same_diff.sum(axis=1,
                                                                 keepdims=True)
        print('Unnormalized Edge Marginals:')
        print(
            pd.DataFrame(edge_marginals_same_diff,
                         columns=['same', 'diff'],
                         index=pd.Series(edges)))
        # print('Edge marginals after Belief Propogation')
        # print(pd.DataFrame(edge_marginals_same_diff, columns=['same', 'diff'], index=pd.Series(edges)))
        print('Labels:')
        print(pd.DataFrame(labels, columns=['nid'], index=pd.Series(nodes)).T)
        print('value = %r' % (infr.value(), ))

        ut.cprint('Belief Propogation (marginalization)', 'blue')
        infr = LBP_algorithm(gm,
                             parameter=lpb_parmas,
                             accumulator='integrator')
        infr.infer()
        labels = rectify_labels(G, infr.arg())
        pairwise_factor_idxs = gm.pairwise_factor_idxs
        factor_marginals = infr.factorMarginals(pairwise_factor_idxs)
        # print('factor_marginals =\n%r' % (factor_marginals,))
        edge_marginals_same_diff_ = [(np.diag(f).sum(),
                                      f[~np.eye(f.shape[0], dtype=bool)].sum())
                                     for f in factor_marginals]
        edge_marginals_same_diff_ = np.array(edge_marginals_same_diff_)
        edge_marginals_same_diff = edge_marginals_same_diff_.copy()
        edge_marginals_same_diff /= edge_marginals_same_diff.sum(axis=1,
                                                                 keepdims=True)
        print('Unnormalized Edge Marginals:')
        print(
            pd.DataFrame(edge_marginals_same_diff,
                         columns=['same', 'diff'],
                         index=pd.Series(edges)))
        # print('Edge marginals after Belief Propogation')
        # print(pd.DataFrame(edge_marginals_same_diff, columns=['same', 'diff'], index=pd.Series(edges)))
        print('Labels:')
        print(pd.DataFrame(labels, columns=['nid'], index=pd.Series(nodes)).T)
        print('value = %r' % (infr.value(), ))

    # import plottool as pt
    # viz_factor_graph(gm)
    # # _ = pt.show_nx(G)
    # print("SHOW")
    # pt.plt.show()

    # marginals = infr.marginals(annot_idxs)
    # print('node marginals are')
    # print(pd.DataFrame(marginals, index=pd.Series(nodes)))
    return edge_marginals_same_diff
Example #16
0
def test_sver_wrapper():
    """
    Test to ensure cpp and python agree and that cpp is faster

    CommandLine:
        python -m vtool.sver_c_wrapper --test-test_sver_wrapper
        python -m vtool.sver_c_wrapper --test-test_sver_wrapper --rebuild-sver
        python -m vtool.sver_c_wrapper --test-test_sver_wrapper --show
        python -m vtool.sver_c_wrapper --test-test_sver_wrapper --show --dummy
        python -m vtool.sver_c_wrapper --test-test_sver_wrapper --show --fname1=easy1.png --fname2=easy2.png
        python -m vtool.sver_c_wrapper --test-test_sver_wrapper --show --fname1=easy1.png --fname2=hard3.png
        python -m vtool.sver_c_wrapper --test-test_sver_wrapper --show --fname1=carl.jpg --fname2=hard3.png

    Example:
        >>> # ENABLE_DOCTEST
        >>> from vtool.sver_c_wrapper import *  # NOQA
        >>> test_sver_wrapper()

    Ignore:
        %timeit call_python_version(*args)
        %timeit get_affine_inliers_cpp(*args)
    """
    import vtool.spatial_verification as sver
    import vtool.tests.dummy as dummy
    xy_thresh_sqrd = ktool.KPTS_DTYPE(.4)
    scale_thresh_sqrd = ktool.KPTS_DTYPE(2.0)
    ori_thresh = ktool.KPTS_DTYPE(TAU / 4.0)
    keys = 'xy_thresh_sqrd, scale_thresh_sqrd, ori_thresh'.split(', ')
    print(ut.dict_str(ut.dict_subset(locals(), keys)))

    def report_errors():
        pass

    if ut.get_argflag('--dummy'):
        testtup = dummy.testdata_dummy_matches()
        (kpts1, kpts2, fm_input, fs_input, rchip1, rchip2) = testtup
        fm_input = fm_input.astype(fm_dtype)
        #fm_input = fm_input[0:10].astype(fm_dtype)
        #fs_input = fs_input[0:10].astype(np.float32)
    else:
        fname1 = ut.get_argval('--fname1', type_=str, default='easy1.png')
        fname2 = ut.get_argval('--fname2', type_=str, default='easy2.png')
        testtup = dummy.testdata_ratio_matches(fname1, fname2)
        (kpts1, kpts2, fm_input, fs_input, rchip1, rchip2) = testtup

    # pack up call to aff hypothesis
    import vtool as vt
    import scipy.stats.mstats
    scales1 = vt.get_scales(kpts1.take(fm_input.T[0], axis=0))
    scales2 = vt.get_scales(kpts2.take(fm_input.T[1], axis=0))
    #fs_input = 1 / scipy.stats.mstats.gmean(np.vstack((scales1, scales2)))
    fs_input = scipy.stats.mstats.gmean(np.vstack((scales1, scales2)))
    print('fs_input = ' + ut.numpy_str(fs_input))
    #fs_input[0:-9] = 0
    #fs_input = np.ones(len(fm_input), dtype=fs_dtype)
    #ut.embed()
    #fs_input = scales1 * scales2
    args = (kpts1, kpts2, fm_input, fs_input, xy_thresh_sqrd,
            scale_thresh_sqrd, ori_thresh)

    ex_list = []

    try:
        with ut.Indenter('[TEST1] '):
            inlier_tup = vt.compare_implementations(
                sver.get_affine_inliers,
                get_affine_inliers_cpp,
                args,
                lbl1='py',
                lbl2='c',
                output_lbl=('aff_inliers_list', 'aff_errors_list', 'Aff_mats'))
            out_inliers, out_errors, out_mats = inlier_tup
    except AssertionError as ex:
        ex_list.append(ex)
        raise

    try:
        import functools
        with ut.Indenter('[TEST2] '):
            bestinlier_tup = vt.compare_implementations(
                functools.partial(sver.get_best_affine_inliers, forcepy=True),
                get_best_affine_inliers_cpp,
                args,
                show_output=True,
                lbl1='py',
                lbl2='c',
                output_lbl=('bestinliers', 'besterror', 'bestmat'))
            bestinliers, besterror, bestmat = bestinlier_tup
    except AssertionError as ex:
        ex_list.append(ex)
        raise

    if len(ex_list) > 0:
        raise AssertionError('some tests failed. see previous stdout')

    #num_inliers_list = np.array(map(len, out_inliers_c))
    #best_argx = num_inliers_list.argmax()
    ##best_inliers_py = out_inliers_py[best_argx]
    #best_inliers_c = out_inliers_c[best_argx]
    if ut.show_was_requested():
        import plottool as pt
        fm_output = fm_input.take(bestinliers, axis=0)
        fnum = pt.next_fnum()
        pt.figure(fnum=fnum, doclf=True, docla=True)
        pt.show_chipmatch2(rchip1,
                           rchip2,
                           kpts1,
                           kpts2,
                           fm_input,
                           ell_linewidth=5,
                           fnum=fnum,
                           pnum=(2, 1, 1))
        pt.show_chipmatch2(rchip1,
                           rchip2,
                           kpts1,
                           kpts2,
                           fm_output,
                           ell_linewidth=5,
                           fnum=fnum,
                           pnum=(2, 1, 2))
        pt.show_if_requested()
Example #17
0
def analyze(ibsmap, qreq_dict, species_dict, path_to_file_list, params):
    print('[analyze] Beginning Analyze')
    print('[analyze] Received %d file paths' % (len(path_to_file_list)))
    # decompose the filename to get the car/person to whom this image belongs
    info_tup_list = [preprocess_fpath(ibsmap, species_dict, path_to_file, params) for path_to_file in path_to_file_list]
    is_valid_list = [tup_ is not None for tup_ in info_tup_list]

    # get the ungrouped tuples that were not None
    valid_tup_list_ug = ut.filter_items(info_tup_list, is_valid_list)
    valid_path_list_ug = ut.filter_items(path_to_file_list, is_valid_list)

    # group by species
    valid_species_list_ug = ut.get_list_column(valid_tup_list_ug, 3)
    seen_species = {}
    def get_species_tmpid(txt):
        if txt in seen_species:
            return seen_species[txt]
        else:
            seen_species[txt] = len(seen_species)
            return get_species_tmpid(txt)
    species_tmpid_list = np.array([get_species_tmpid(txt) for txt in valid_species_list_ug])
    #ibs.get_species_rowids_from_text(valid_species_list_ug)
    unique_species_rowids, groupxs = vt.group_indices(np.array(species_tmpid_list))

    grouped_valid_tup_list = vt.apply_grouping(np.array(valid_tup_list_ug, dtype=object), groupxs)
    grouped_path_list = vt.apply_grouping(np.array(valid_path_list_ug, dtype=object), groupxs)

    print('[analyze] Created  %d species groups' % (len(grouped_valid_tup_list)))
    print('[analyze] grouped_valid_tup_list = ' + ut.list_str(grouped_valid_tup_list))
    print('[analyze] grouped_path_list      = ' + ut.list_str(grouped_path_list))

    assert len(grouped_valid_tup_list) == len(grouped_path_list), 'lengths must match for zip'
    for groupx, (tup, valid_path_list) in enumerate(zip(grouped_valid_tup_list, grouped_path_list)):
        car_list, person_list, animal_list, species_list, offset_list, contributor_row_id_list = zip(*tup)

        assert ut.list_allsame(species_list)

        animal = animal_list[0]
        species = species_list[0]
        ibs = ibsmap[animal]
        with ut.Indenter('[GROUP-%d-%s]' % (groupx, species)):
            assert ((animal == 'zebra' and species == species_dict['zebra']) or
                    (animal == 'giraffe' and species == species_dict['giraffe'])), 'animal/species mismatch!'
            # Add image to database
            gid_list = ibs.add_images(valid_path_list, auto_localize=False)

            reported_time_list = list(map(vt.parse_exif_unixtime, valid_path_list))
            actual_unixtime_list = [
                reported_unixtime + offset
                for reported_unixtime, offset in
                zip(reported_time_list, offset_list)
            ]
            ibs.set_image_unixtime(gid_list, actual_unixtime_list, duplicate_behavior='filter')
            ibs.set_image_contributor_rowid(gid_list, contributor_row_id_list, duplicate_behavior='filter')

            print('[analyze] starting detection for %d images and species %s...' % (len(valid_path_list), species))
            qaids_list = ibs.detect_random_forest(gid_list, species=species)
            qaid_list, reverse_list = ut.invertible_flatten2(qaids_list)
            print('\n[analyze] detected %d animals of species %s' % (len(qaid_list), species))

            # if there were no detections, don't bother
            if not qaid_list:
                continue

            # because qreq_ is persistent we need only to update the qaid_list
            qreq_ = qreq_dict[animal]  # there is a qreq_ for each species
            qaid_list_unique, unique_inverse = np.unique(qaid_list, return_inverse=True)
            qreq_.set_external_qaids(qaid_list_unique)
            qres_list_unique = ibs.query_chips(qreq_=qreq_, verbose=False)
            qres_list = ut.list_take(qres_list_unique, unique_inverse)

            # so that we can draw a new bounding box for each detection
            detection_bbox_list = ibs.get_annot_verts(qaid_list)
            detection_bboxes_list = ut.unflatten2(detection_bbox_list, reverse_list)
            qreses_list = ut.unflatten2(qres_list, reverse_list)

            with ut.Indenter('[POSTPROCESS]'):
                for _tup in zip(valid_path_list, detection_bboxes_list, qreses_list,
                                car_list, person_list, animal_list, gid_list, qaids_list):
                    postprocess_result(ibs, _tup, params)

            with ut.Indenter('[REVIEW_CHECK]'):
                for car, person in zip(car_list, person_list):
                    check_if_need_review(person, car, params)
Example #18
0
def precfg(ibs, acfg_name_list, test_cfg_name_list):
    r"""
    Helper to precompute information

    Args:
        ibs (IBEISController):  ibeis controller object
        qaids (list):  query annotation ids
        daids (list):  database annotation ids
        test_cfg_name_list (list):

    CommandLine:
        python -m ibeis.expt.precomputer --exec-precfg -t custom --expt-preload

        python -m ibeis.expt.precomputer --exec-precfg -t candidacy -a default:qaids=allgt --preload
        python -m ibeis.expt.precomputer --exec-precfg -t candidacy_invariance -a default:qaids=allgt --preload

        python -m ibeis.expt.precomputer --exec-precfg --delete-nn-cache

    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis.expt.precomputer import *  # NOQA
        >>> import ibeis
        >>> ibs = ibeis.opendb(defaultdb='PZ_MTEST')
        >>> default_acfgstrs = ['default:qaids=allgt']
        >>> acfg_name_list = ut.get_argval(('--aidcfg', '--acfg', '-a'), type_=list, default=default_acfgstrs)
        >>> test_cfg_name_list = ut.get_argval('-t', type_=list, default=['custom'])
        >>> result = precfg(ibs, acfg_name_list, test_cfg_name_list)
        >>> print(result)
    """
    # Generate list of database annotation configurations
    acfg_list, expanded_aids_list = experiment_helpers.get_annotcfg_list(ibs, acfg_name_list)
    # Generate list of query pipeline param configs
    cfgdict_list, pipecfg_list = experiment_helpers.get_pipecfg_list(test_cfg_name_list, ibs=ibs)
    #cfgx2_lbl = experiment_helpers.get_varied_cfg_lbls(cfgdict_list)

    expanded_aids_iter = ut.ProgressIter(expanded_aids_list, lbl='annot config', freq=1, autoadjust=False)
    nAcfg = len(acfg_list)

    for acfgx, (qaids, daids) in enumerate(expanded_aids_iter):
        if len(qaids) == 0:
            print('[harness] WARNING No query annotations specified')
            continue
        if len(daids) == 0:
            print('[harness] WARNING No database annotations specified')
            continue
        ut.colorprint('\n---Annot config', 'turquoise')

        nCfg     = len(pipecfg_list)   # number of configurations (cols)
        dbname = ibs.get_dbname()

        cfgiter = ut.ProgressIter(pipecfg_list, lbl='query config', freq=1, autoadjust=False, parent_index=acfgx, parent_nTotal=nAcfg)

        flag = False
        if ut.get_argflag('--delete-nn-cache'):
            ibs.delete_neighbor_cache()
            flag = True

        for cfgx, query_cfg in enumerate(cfgiter):
            print('')
            ut.colorprint(query_cfg.get_cfgstr(), 'turquoise')
            verbose = True
            with ut.Indenter('[%s cfg %d/%d]' % (dbname, (acfgx * nCfg) + cfgx * + 1, nCfg * nAcfg)):

                qreq_ = ibs.new_query_request(qaids, daids, verbose=True, query_cfg=query_cfg)
                if ut.get_argflag('--preload'):
                    qreq_.lazy_preload(verbose=verbose)
                    flag = True
                if ut.get_argflag('--preload-chip'):
                    qreq_.ensure_chips(verbose=verbose, extra_tries=1)
                    flag = True
                if ut.get_argflag('--preload-feat'):
                    qreq_.ensure_features(verbose=verbose)
                    flag = True
                if ut.get_argflag('--preload-featweight'):
                    qreq_.ensure_featweights(verbose=verbose)
                    flag = True
                if ut.get_argflag('--preindex'):
                    flag = True
                    if qreq_.qparams.pipeline_root in ['vsone', 'vsmany']:
                        qreq_.load_indexer(verbose=verbose)
            assert flag is True, 'no flag specified'
        assert flag is True, 'no flag specified'
Example #19
0
def make_single_testres(
    ibs,
    qaids,
    daids,
    pipecfg_list,
    cfgx2_lbl,
    cfgdict_list,
    lbl,
    testnameid,
    use_cache=None,
    subindexer_partial=ut.ProgIter,
):
    """
    CommandLine:
        python -m wbia run_expt
    """
    cfgslice = None
    if cfgslice is not None:
        pipecfg_list = pipecfg_list[cfgslice]

    dbname = ibs.get_dbname()

    # if ut.NOT_QUIET:
    #     logger.info('[harn] Make single testres')

    cfgx2_qreq_ = [
        ibs.new_query_request(qaids, daids, verbose=False, query_cfg=pipe_cfg)
        for pipe_cfg in ut.ProgIter(
            pipecfg_list, lbl='Building qreq_', enabled=False)
    ]

    if use_cache is None:
        use_cache = USE_BIG_TEST_CACHE

    if use_cache:
        try:
            bt_cachedir = ut.ensuredir(
                (ibs.get_cachedir(), 'BULK_TEST_CACHE2'))
            cfgstr_list = [
                qreq_.get_cfgstr(with_input=True) for qreq_ in cfgx2_qreq_
            ]
            bt_cachestr = ut.hashstr_arr27(cfgstr_list,
                                           ibs.get_dbname() + '_cfgs')
            bt_cachename = 'BULKTESTCACHE2_v2'
            testres = ut.load_cache(bt_cachedir, bt_cachename, bt_cachestr)
            testres.cfgdict_list = cfgdict_list
            testres.cfgx2_lbl = cfgx2_lbl  # hack override
        except IOError:
            pass
        else:
            if ut.NOT_QUIET:
                ut.colorprint('[harn] single testres cache hit... returning',
                              'brightcyan')
            return testres

    if ibs.table_cache:
        # HACK
        prev_feat_cfgstr = None

    cfgx2_cmsinfo = []
    cfgiter = subindexer_partial(range(len(cfgx2_qreq_)),
                                 lbl='pipe config',
                                 freq=1,
                                 adjust=False)
    # Run each pipeline configuration
    for cfgx in cfgiter:
        qreq_ = cfgx2_qreq_[cfgx]
        cprint = ut.colorprint
        cprint('testnameid=%r' % (testnameid, ), 'green')
        cprint(
            'annot_cfgstr = %s' %
            (qreq_.get_cfgstr(with_input=True, with_pipe=False), ),
            'yellow',
        )
        cprint('pipe_cfgstr= %s' % (qreq_.get_cfgstr(with_data=False), ),
               'brightcyan')
        cprint('pipe_hashstr = %s' % (qreq_.get_pipe_hashid(), ), 'cyan')
        if DRY_RUN:
            continue

        indent_prefix = '[%s cfg %d/%d]' % (
            dbname,
            # cfgiter.count (doesnt work when quiet)
            (cfgiter.parent_index * cfgiter.length) + cfgx,
            cfgiter.length * cfgiter.parent_length,
        )

        with ut.Indenter(indent_prefix):
            # Run the test / read cache
            _need_compute = True
            if use_cache:
                # smaller cache for individual configuration runs
                st_cfgstr = qreq_.get_cfgstr(with_input=True)
                st_cachedir = ut.unixjoin(bt_cachedir, 'small_tests')
                st_cachename = 'smalltest'
                ut.ensuredir(st_cachedir)
                try:
                    cmsinfo = ut.load_cache(st_cachedir, st_cachename,
                                            st_cfgstr)
                except IOError:
                    _need_compute = True
                else:
                    _need_compute = False
            if _need_compute:
                assert not ibs.table_cache
                if ibs.table_cache:
                    if len(prev_feat_cfgstr is not None
                           and prev_feat_cfgstr != qreq_.qparams.feat_cfgstr):
                        # Clear features to preserve memory
                        ibs.clear_table_cache()
                        # qreq_.ibs.print_cachestats_str()
                cm_list = qreq_.execute()
                cmsinfo = test_result.build_cmsinfo(cm_list, qreq_)
                # record previous feature configuration
                if ibs.table_cache:
                    prev_feat_cfgstr = qreq_.qparams.feat_cfgstr
                if use_cache:
                    ut.save_cache(st_cachedir, st_cachename, st_cfgstr,
                                  cmsinfo)
        if not NOMEMORY:
            # Store the results
            cfgx2_cmsinfo.append(cmsinfo)
        else:
            cfgx2_qreq_[cfgx] = None
    if ut.NOT_QUIET:
        ut.colorprint('[harn] Completed running test configurations', 'white')
    if DRY_RUN:
        logger.info('ran tests dryrun mode.')
        return
    if NOMEMORY:
        logger.info('ran tests in memory savings mode. Cannot Print. exiting')
        return
    # Store all pipeline config results in a test result object
    testres = test_result.TestResult(pipecfg_list, cfgx2_lbl, cfgx2_cmsinfo,
                                     cfgx2_qreq_)
    testres.testnameid = testnameid
    testres.lbl = lbl
    testres.cfgdict_list = cfgdict_list
    testres.aidcfg = None
    if use_cache:
        try:
            ut.save_cache(bt_cachedir, bt_cachename, bt_cachestr, testres)
        except Exception as ex:
            ut.printex(ex, 'error saving testres cache', iswarning=True)
            if ut.SUPER_STRICT:
                raise
    return testres
Example #20
0
def engine_loop(id_, port_dict, dbdir=None):
    r"""
    IBEIS:
        This will be part of a worker process with its own IBEISController
        instance.

        Needs to send where the results will go and then publish the results there.

    The engine_loop - receives messages, performs some action, and sends a reply,
    preserving the leading two message parts as routing identities
    """
    # NAME: engine_
    # CALLED_FROM: engine_queue
    import ibeis
    update_proctitle('engine_loop')
    #base_print = print  # NOQA
    print = partial(ut.colorprint, color='darkred')
    with ut.Indenter('[engine %d] ' % (id_)):
        if VERBOSE_JOBS:
            print('Initializing engine')
            print('connect engine_url2 = %r' % (port_dict['engine_url2'], ))
        assert dbdir is not None
        #ibs = ibeis.opendb(dbname)
        ibs = ibeis.opendb(dbdir=dbdir,
                           use_cache=False,
                           web=False,
                           force_serial=True)

        engine_rout_sock = ctx.socket(zmq.ROUTER)
        engine_rout_sock.connect(port_dict['engine_url2'])

        collect_deal_sock = ctx.socket(zmq.DEALER)
        collect_deal_sock.setsockopt_string(zmq.IDENTITY,
                                            'engine.collect.DEALER')
        collect_deal_sock.connect(port_dict['collect_url1'])
        if VERBOSE_JOBS:
            print('connect collect_url1 = %r' % (port_dict['collect_url1'], ))
            print('engine is initialized')

        try:
            while True:
                idents, engine_request = rcv_multipart_json(engine_rout_sock,
                                                            print=print)

                action = engine_request['action']
                jobid = engine_request['jobid']
                args = engine_request['args']
                kwargs = engine_request['kwargs']
                callback_url = engine_request['callback_url']
                callback_method = engine_request['callback_method']

                engine_result = on_engine_request(ibs, jobid, action, args,
                                                  kwargs)

                # Store results in the collector
                collect_request = dict(
                    idents=idents,
                    action='store',
                    jobid=jobid,
                    engine_result=engine_result,
                    callback_url=callback_url,
                    callback_method=callback_method,
                )
                if VERBOSE_JOBS:
                    print('...done working. pushing result to collector')
                # CALLS: collector_store
                collect_deal_sock.send_json(collect_request)
        except KeyboardInterrupt:
            print('Caught ctrl+c in engine loop. Gracefully exiting')
        # ----
        if VERBOSE_JOBS:
            print('Exiting engine loop')
Example #21
0
def engine_queue_loop(port_dict):
    """
    Specialized queue loop
    """
    # Flow of information tags:
    # NAME: engine_queue
    update_proctitle('engine_queue_loop')

    iface1, iface2 = port_dict['engine_url1'], port_dict['engine_url2']
    name = 'engine'
    queue_name = name + '_queue'
    loop_name = queue_name + '_loop'
    print = partial(ut.colorprint, color='red')
    with ut.Indenter('[%s] ' % (queue_name, )):
        print('Init specialized make_queue_loop: name=%r' % (name, ))
        # bind the client dealer to the queue router
        rout_sock = ctx.socket(zmq.ROUTER)
        rout_sock.setsockopt_string(zmq.IDENTITY,
                                    'special_queue.' + name + '.' + 'ROUTER')
        rout_sock.bind(iface1)
        if VERBOSE_JOBS:
            print('bind %s_url2 = %r' % (
                name,
                iface1,
            ))
        # bind the server router to the queue dealer
        deal_sock = ctx.socket(zmq.DEALER)
        deal_sock.setsockopt_string(zmq.IDENTITY,
                                    'special_queue.' + name + '.' + 'DEALER')
        deal_sock.bind(iface2)
        if VERBOSE_JOBS:
            print('bind %s_url2 = %r' % (
                name,
                iface2,
            ))

        collect_deal_sock = ctx.socket(zmq.DEALER)
        collect_deal_sock.setsockopt_string(zmq.IDENTITY,
                                            queue_name + '.collect.DEALER')
        collect_deal_sock.connect(port_dict['collect_url1'])
        if VERBOSE_JOBS:
            print('connect collect_url1 = %r' % (port_dict['collect_url1'], ))
        job_counter = 0

        # but this shows what is really going on:
        poller = zmq.Poller()
        poller.register(rout_sock, zmq.POLLIN)
        poller.register(deal_sock, zmq.POLLIN)
        try:
            while True:
                evts = dict(poller.poll())
                if rout_sock in evts:
                    # HACK GET REQUEST FROM CLIENT
                    job_counter += 1
                    # CALLER: job_client
                    idents, engine_request = rcv_multipart_json(rout_sock,
                                                                num=1,
                                                                print=print)

                    #jobid = 'result_%s' % (id_,)
                    #jobid = 'result_%s' % (uuid.uuid4(),)
                    jobid = 'jobid-%04d' % (job_counter, )
                    if VERBOSE_JOBS:
                        print('Creating jobid %r' % (jobid, ))

                    # Reply immediately with a new jobid
                    reply_notify = {
                        'jobid': jobid,
                        'status': 'ok',
                        'text': 'job accepted',
                        'action': 'notification',
                    }
                    engine_request = engine_request
                    engine_request['jobid'] = jobid
                    if VERBOSE_JOBS:
                        print('...notifying collector about new job')
                    # CALLS: collector_notify
                    collect_deal_sock.send_json(reply_notify)
                    if VERBOSE_JOBS:
                        print('... notifying client that job was accepted')
                    # RETURNS: job_client_return
                    send_multipart_json(rout_sock, idents, reply_notify)
                    if VERBOSE_JOBS:
                        print('... notifying backend engine to start')
                    # CALL: engine_
                    send_multipart_json(deal_sock, idents, engine_request)
                if deal_sock in evts:
                    pass
        except KeyboardInterrupt:
            print('Caught ctrl+c in %s queue. Gracefully exiting' %
                  (loop_name, ))
        if VERBOSE_JOBS:
            print('Exiting %s queue' % (loop_name, ))
Example #22
0
def make_single_testres(ibs,
                        qaids,
                        daids,
                        pipecfg_list,
                        cfgx2_lbl,
                        cfgdict_list,
                        lbl,
                        testnameid,
                        use_cache=None,
                        subindexer_partial=ut.ProgressIter):
    """
    CommandLine:
        python -m ibeis.expt.harness --exec-run_test_configurations2
    """
    cfgslice = None
    if cfgslice is not None:
        pipecfg_list = pipecfg_list[cfgslice]

    dbname = ibs.get_dbname()

    if ut.NOT_QUIET:
        print('[harn] Make single testres')

    cfgx2_qreq_ = [
        ibs.new_query_request(qaids, daids, verbose=False, query_cfg=pipe_cfg)
        for pipe_cfg in ut.ProgressIter(
            pipecfg_list, lbl='Building qreq_', enabled=False)
    ]

    if use_cache is None:
        use_cache = USE_BIG_TEST_CACHE

    if use_cache:
        get_big_test_cache_info(ibs, cfgx2_qreq_)
        try:
            cachetup = get_big_test_cache_info(ibs, cfgx2_qreq_)
            testres = ut.load_cache(*cachetup)
            testres.cfgdict_list = cfgdict_list
            testres.cfgx2_lbl = cfgx2_lbl  # hack override
        except IOError:
            pass
        else:
            if ut.NOT_QUIET:
                ut.colorprint('[harn] single testres cache hit... returning',
                              'turquoise')
            return testres

    if ibs.table_cache:
        # HACK
        prev_feat_cfgstr = None

    cfgx2_cfgresinfo = []
    #nPipeCfg = len(pipecfg_list)
    cfgiter = subindexer_partial(range(len(cfgx2_qreq_)),
                                 lbl='query config',
                                 freq=1,
                                 adjust=False,
                                 separate=True)
    # Run each pipeline configuration
    for cfgx in cfgiter:
        qreq_ = cfgx2_qreq_[cfgx]

        ut.colorprint('testnameid=%r' % (testnameid, ), 'green')
        ut.colorprint(
            'annot_cfgstr = %s' %
            (qreq_.get_cfgstr(with_input=True, with_pipe=False), ), 'yellow')
        ut.colorprint(
            'pipe_cfgstr= %s' % (qreq_.get_cfgstr(with_data=False), ),
            'turquoise')
        ut.colorprint('pipe_hashstr = %s' % (qreq_.get_pipe_hashid(), ),
                      'teal')
        if DRY_RUN:
            continue

        indent_prefix = '[%s cfg %d/%d]' % (
            dbname,
            # cfgiter.count (doesnt work when quiet)
            (cfgiter.parent_index * cfgiter.nTotal) + cfgx,
            cfgiter.nTotal * cfgiter.parent_nTotal)

        with ut.Indenter(indent_prefix):
            # Run the test / read cache
            _need_compute = True
            if use_cache:
                # smaller cache for individual configuration runs
                st_cfgstr = qreq_.get_cfgstr(with_input=True)
                bt_cachedir = cachetup[0]
                st_cachedir = ut.unixjoin(bt_cachedir, 'small_tests')
                st_cachename = 'smalltest'
                ut.ensuredir(st_cachedir)
                try:
                    cfgres_info = ut.load_cache(st_cachedir, st_cachename,
                                                st_cfgstr)
                except IOError:
                    _need_compute = True
                else:
                    _need_compute = False
            if _need_compute:
                assert not ibs.table_cache
                if ibs.table_cache:
                    if (len(prev_feat_cfgstr is not None and
                            prev_feat_cfgstr != qreq_.qparams.feat_cfgstr)):
                        # Clear features to preserve memory
                        ibs.clear_table_cache()
                        #qreq_.ibs.print_cachestats_str()
                cfgres_info = get_query_result_info(qreq_)
                # record previous feature configuration
                if ibs.table_cache:
                    prev_feat_cfgstr = qreq_.qparams.feat_cfgstr
                if use_cache:
                    ut.save_cache(st_cachedir, st_cachename, st_cfgstr,
                                  cfgres_info)
        if not NOMEMORY:
            # Store the results
            cfgx2_cfgresinfo.append(cfgres_info)
        else:
            cfgx2_qreq_[cfgx] = None
    if ut.NOT_QUIET:
        ut.colorprint('[harn] Completed running test configurations', 'white')
    if DRY_RUN:
        print('ran tests dryrun mode.')
        return
    if NOMEMORY:
        print('ran tests in memory savings mode. Cannot Print. exiting')
        return
    # Store all pipeline config results in a test result object
    testres = test_result.TestResult(pipecfg_list, cfgx2_lbl, cfgx2_cfgresinfo,
                                     cfgx2_qreq_)
    testres.testnameid = testnameid
    testres.lbl = lbl
    testres.cfgdict_list = cfgdict_list
    testres.aidcfg = None
    if use_cache:
        try:
            ut.save_cache(*tuple(list(cachetup) + [testres]))
        except Exception as ex:
            ut.printex(ex, 'error saving testres cache', iswarning=True)
            if ut.SUPER_STRICT:
                raise
    return testres
Example #23
0
def get_rowids(depc, tablename, root_rowids, config=None, ensure=True,
               eager=True, nInput=None, _debug=None, recompute=False,
               recompute_all=False):
    """
    Returns the rowids of `tablename` that correspond to `root_rowids`
    using `config`.

    Ignore:
        tablename = 'nnindexer'
        multi_rowids = (1, 2, 3, 4, 5)
        root_rowids = [[multi_rowids]]
        import plottool as pt
        pt.ensureqt()

        from dtool.depcache_control import *  # NOQA
        from dtool.example_depcache import testdata_depc
        depc = testdata_depc()
        exec(ut.execstr_funckw(depc.get_rowids), globals())
        print(ut.depth_profile(root_rowids))
        tablename = 'neighbs'
        table = depc[tablename]  # NOQA
        import plottool as pt
        pt.ensureqt()
        _debug = depc._debug = True
        depc.get_rowids(tablename, root_rowids, config, _debug=_debug)

        pt.show_nx(depc.graph)
        for key, val in table.type_to_subgraph.items():
            pt.show_nx(val)
            pt.set_title(key)

    CommandLine:
        python -m dtool.depcache_control --exec-get_rowids
        python -m dtool.depcache_control --dump-get_rowids
        python -m dtool.depcache_control --exec-get_rowids:0

    GridParams:
        >>> param_grid = dict(
        >>>     tablename=[ 'spam', 'neighbs'] # 'spam', 'multitest_score','keypoint'],
        >>>   #tablename=['neighbs', 'keypoint', 'spam', 'multitest_score','keypoint'],
        >>> )
        >>> flat_root_ids = [1, 2, 3]
        >>> combos = ut.all_dict_combinations(param_grid)
        >>> index = 0
        >>> keys = 'tablename'.split(', ')
        >>> tablename, = ut.dict_take(combos[index], keys)

    Setup:
        >>> # DISABLE_GRID_DOCTEST
        >>> from dtool.depcache_control import *  # NOQA
        >>> from dtool.example_depcache import testdata_depc
        >>> depc = testdata_depc()
        >>> exec(ut.execstr_funckw(depc.get_rowids), globals())
        >>> import plottool as pt
        >>> pt.ensureqt()
        >>> #pt.show_nx(depc.graph)

    GridExample0:
        >>> table = depc[tablename]  # NOQA
        >>> flat_root_ids = [1, 2, 3]
        >>> root_rowids = [flat_root_ids for _ in table.input_order]
        >>> print('root_rowids = %r' % (root_rowids,))
        >>> #root_rowids = [[flat_root_ids], [(flat_root_ids,)]]
        >>> #root_rowids = [list(zip(flat_root_ids)), (flat_root_ids,)]
        >>> _debug = True
        >>> depc.get_rowids(tablename, root_rowids, config, _debug=_debug)
        >>> for key, val in table.type_to_subgraph.items():
        >>>     pt.show_nx(val)
        >>>     pt.set_title(key)

    Example1:
        >>> # ENABLE_DOCTEST
        >>> from dtool.depcache_control import *  # NOQA
        >>> from dtool.example_depcache import testdata_depc
        >>> depc = testdata_depc()
        >>> exec(ut.execstr_funckw(depc.get_rowids), globals())
        >>> root_rowids = [1, 2, 3]
        >>> tablename = 'spam'
        >>> table = depc[tablename]
        >>> kp_rowids = depc.get_rowids(tablename, root_rowids)
        >>> #result = ('prop_list = %s' % (ut.repr2(prop_list),))
        >>> #print(result)

    Example:
        >>> # ENABLE_DOCTEST
        >>> from dtool.depcache_control import *  # NOQA
        >>> from dtool.example_depcache import testdata_depc
        >>> depc = testdata_depc()
        >>> exec(ut.execstr_funckw(depc.get_rowids), globals())
        >>> flat_root_ids = [1, 2, 3]
        >>> kp_rowids = depc.get_rowids('keypoint', flat_root_ids)
        >>> root_rowids = [flat_root_ids] * 8
        >>> _debug = True
        >>> tablename = 'nnindexer'
        >>> tablename = 'multitest_score'
        >>> table = depc[tablename]  # NOQA
        >>> #result = ('prop_list = %s' % (ut.repr2(prop_list),))
        >>> # print(result)
    """
    _debug = depc._debug if _debug is None else _debug
    if _debug:
        print(' * root_rowids=%s' % (ut.trunc_repr(root_rowids),))
        print(' * config = %r' % (config,))
    table = depc[tablename]  # NOQA
    INDEXER_VERSION = False

    if tablename == 'neighbor_index':
        """
        python -m ibeis.core_annots --exec-compute_neighbor_index --show
        """

        import utool
        utool.embed()

    if INDEXER_VERSION or tablename == 'neighbs':
        compute_order = table.compute_order
        depend_order = compute_order['depend_compute_ids']
        input_order = compute_order['input_compute_ids']

        if _debug:
            print(' * input_order = %s' % (ut.repr3(input_order, nl=1),))
            print(' * depend_order = %s' % (ut.repr3(depend_order, nl=1),))
        if len(input_order) > 1:
            assert ut.depth_atleast(root_rowids, 2), (
                'input_order = %r' % (input_order,))

        with ut.Indenter('[GetRowID-%s]' % (tablename,),
                         enabled=_debug):
            # New way to get rowids
            input_level = depend_order[0]
            mid_levels = depend_order[1:-1]
            output_level = depend_order[-1]

            # List that holds a mapping from input order to input "name"
            input_order_lookup = ut.make_index_lookup(input_order)
            # Dictionary that holds the rowids computed for each table
            # while tracing the dependencies.
            rowid_lookup = ut.odict([(key, ut.odict()) for key in input_order])

            # Need to split each path into parts.
            # Each part represents another level of unflattening
            # (because root indicies are all flat)

            # Handle input level
            assert input_level[0] == depc.root
            for compute_id in input_order:
                # for name in input_names:
                argx = input_order_lookup[compute_id]
                rowid_lookup[compute_id] = root_rowids[argx]
                # HACK: Flatten to scalars
                # The inputs should just be given in the "correct" nesting.
                # TODO: determine what correct nesting is.
                for i in range(5):
                    try:
                        current = rowid_lookup[compute_id]
                        rowid_lookup[compute_id] = ut.flatten(current)
                    except Exception:
                        pass

            level = 0
            if _debug:
                print('input_order_lookup = %r' % (input_order_lookup,))
                ut.printdict(rowid_lookup, 'rowid_lookup')

            def handle_level(compute_id, rowid_lookup, _recompute, level):
                print('+--- HANDLE LEVEL %d -------' % (level,))
                tablekey = compute_id[0]
                input_suff = compute_id[1]
                config_ = depc._ensure_config(tablekey, config)
                table = depc[tablekey]
                lookupkeys = [(n, input_suff) for n in table.parent_id_tablenames]
                # ordering = ut.dict_take(input_order_lookup, input_names)
                # sortx = ut.argsort(ordering)
                # FIXME: get inputs for each table.
                # input_names = ut.take(input_names, sortx)
                # lookupkeys = list(ut.iprod(table.parent_id_tablenames, input_names))
                # lookupkeys = list(zip(table.parent_id_tablenames, input_types))
                if _debug:
                    print('---- LOCALS ------')
                    ut.print_locals(compute_id, tablekey, lookupkeys, table)
                    print('L----------')
                # FIXME generalize
                _parent_ids = [rowid_lookup[tblkey] for tblkey in lookupkeys]
                if table.ismulti:
                    parent_rowidsT = [[tuple(x)] for x in _parent_ids]
                else:
                    parent_rowidsT = _parent_ids
                parent_rowidsT = np.broadcast_arrays(*parent_rowidsT)
                parent_rowids = list(zip(*parent_rowidsT))
                # Probably not right for general multi-input
                import utool
                with utool.embed_on_exception_context:
                    next_rowids = table.get_rowid(
                        parent_rowids, config=config_, eager=eager, nInput=nInput,
                        ensure=ensure, recompute=_recompute)
                rowid_lookup[compute_id] = next_rowids
                if _debug:
                    ut.printdict(rowid_lookup, 'rowid_lookup')
                if _debug:
                    print('L___ HANDLE LEVEL %d -------' % (level,))
                return next_rowids

            # Handle mid levels
            _recompute = recompute_all
            for level, compute_id in enumerate(mid_levels, start=1):
                handle_level(compute_id, rowid_lookup, _recompute, level)
            level += 1

            # Handel final (requested) level
            compute_id = output_level
            _recompute = recompute
            rowid_list =  handle_level(compute_id, rowid_lookup,
                                       _recompute, level)
    else:
        with ut.Indenter('[GetRowID-%s]' % (tablename,),
                         enabled=_debug):
            # TODO: Get nonself rowids first
            # THen get self rowids for debugging ease
            try:
                if False:
                    recompute_ = recompute or recompute_all
                    parent_rowids = depc._get_parent_input(
                        tablename, root_rowids, config, ensure=True, _debug=None,
                        recompute=False, recompute_all=False, eager=True,
                        nInput=None)
                    config_ = depc._ensure_config(tablename, config)
                    #if onthefly:
                    #    pass
                    table = depc[tablename]
                    rowid_list = table.get_rowid(
                        parent_rowids, config=config_, eager=eager, nInput=nInput,
                        ensure=ensure, recompute=recompute_)
                else:
                    # Compute everything from the root to the requested table
                    rowid_dict = depc.get_all_descendant_rowids(
                        tablename, root_rowids, config=config, ensure=ensure,
                        eager=eager, nInput=nInput, recompute=recompute,
                        recompute_all=recompute_all, _debug=ut.countdown_flag(_debug))
                    rowid_list = rowid_dict[tablename]
            except depcache_table.ExternalStorageException:
                print('EXTERNAL EXCEPTION One retry in get_rowids')
                rowid_dict = depc.get_all_descendant_rowids(
                    tablename, root_rowids, config=config, ensure=ensure,
                    eager=eager, nInput=nInput, recompute=recompute,
                    recompute_all=recompute_all, _debug=ut.countdown_flag(_debug))
                rowid_list = rowid_dict[tablename]
    if _debug:
        print(' * return rowid_list = %s' % (ut.trunc_repr(rowid_list),))
    return rowid_list
Example #24
0
def get_all_descendant_rowids(depc, tablename, root_rowids, config=None,
                              ensure=True, eager=True, nInput=None,
                              recompute=False, recompute_all=False,
                              levels_up=None, _debug=False):
    r"""
    Connects `root_rowids` to rowids in `tablename`, and computes all
    values needed along the way. This is the main workhorse function for
    dependency computations.

    Args:
        tablename (str): table to compute dependencies to
        root_rowids (list): rowids for ``tablename``
        config (dict): config applicable for all tables (default = None)
        ensure (bool): eager evaluation if True(default = True)
        eager (bool): (default = True)
        nInput (None): (default = None)
        recompute (bool): (default = False)
        recompute_all (bool): (default = False)
        levels_up (int): only partially compute dependencies (default = 0)
        _debug (bool): (default = False)

    CommandLine:
        python -m dtool.depcache_control --exec-get_all_descendant_rowids:0
        python -m dtool.depcache_control --exec-get_all_descendant_rowids:1

    Example:
        >>> # DISABLE_DOCTEST
        >>> from dtool.depcache_control import *  # NOQA
        >>> from dtool.example_depcache import testdata_depc
        >>> depc = testdata_depc()
        >>> tablename = 'spam'
        >>> root_rowids = [1, 2]
        >>> config1 = {'dim_size': 500}
        >>> config2 = {'dim_size': 100}
        >>> config3 = {'dim_size': 500, 'adapt_shape': False}
        >>> ensure, eager, nInput = True, True, None
        >>> _debug = True
        >>> rowid_dict1 = depc.get_all_descendant_rowids(
        >>>     tablename, root_rowids, config1, ensure, eager, nInput, _debug=_debug)
        >>> rowid_dict2 = depc.get_all_descendant_rowids(
        >>>     tablename, root_rowids, config2, ensure, eager, nInput, _debug=_debug)
        >>> rowid_dict3 = depc.get_all_descendant_rowids(
        >>>     tablename, root_rowids, config3, ensure, eager, nInput, _debug=_debug)
        >>> result1 = 'rowid_dict1 = ' + ut.repr3(rowid_dict1, nl=1)
        >>> result2 = 'rowid_dict2 = ' + ut.repr3(rowid_dict2, nl=1)
        >>> result3 = 'rowid_dict3 = ' + ut.repr3(rowid_dict3, nl=1)
        >>> result = '\n'.join([result1, result2, result3])
        >>> print(result)
        rowid_dict1 = {
            'chip': [1, 2],
            'dummy_annot': [1, 2],
            'fgweight': [1, 2],
            'keypoint': [1, 2],
            'probchip': [1, 2],
            'spam': [1, 2],
        }
        rowid_dict2 = {
            'chip': [3, 4],
            'dummy_annot': [1, 2],
            'fgweight': [3, 4],
            'keypoint': [3, 4],
            'probchip': [1, 2],
            'spam': [3, 4],
        }
        rowid_dict3 = {
            'chip': [1, 2],
            'dummy_annot': [1, 2],
            'fgweight': [5, 6],
            'keypoint': [5, 6],
            'probchip': [1, 2],
            'spam': [5, 6],
        }


    Example:
        >>> # ENABLE_DOCTEST
        >>> from dtool.depcache_control import *  # NOQA
        >>> from dtool.example_depcache import testdata_depc
        >>> depc = testdata_depc()
        >>> _debug = True
        >>> tablename = 'vsmany'
        >>> config = depc.configclass_dict['vsmany']()
        >>> root_rowids = [1, 2, 3]
        >>> ensure, eager, nInput = False, True, None
        >>> # Get rowids of algo ( should be None )
        >>> rowid_dict = depc.get_all_descendant_rowids(
        >>>     tablename, root_rowids, config, ensure, eager, nInput,
        >>>     _debug=_debug)
        >>> result = ut.repr3(rowid_dict, nl=1)
        >>> print(result)
        {
            'dummy_annot': [1, 2, 3],
            'vsmany': [None, None, None],
        }

    Example:
        >>> # ENABLE_DOCTEST
        >>> from dtool.depcache_control import *  # NOQA
        >>> from dtool.example_depcache import testdata_depc
        >>> # Make sure algo config can correctly get properites
        >>> depc = testdata_depc()
        >>> tablename = 'chip'
        >>> recompute = False
        >>> recompute_all = False
        >>> _debug = True
        >>> root_rowids = [1, 2]
        >>> configclass = depc.configclass_dict['chip']
        >>> config_ = configclass()
        >>> config1 = depc.configclass_dict['vsmany'](dim_size=500)
        >>> config2 = depc.configclass_dict['vsmany'](dim_size=100)
        >>> config = config2
        >>> prop_dicts1 = depc.get_all_descendant_rowids(
        >>>     tablename, root_rowids, config=config1, _debug=_debug)
        >>> prop_dicts2 = depc.get_all_descendant_rowids(
        >>>     tablename, root_rowids, config=config2, _debug=_debug)
        >>> print(prop_dicts2)
        >>> print(prop_dicts1)
        >>> assert prop_dicts1 != prop_dicts2

    Example:
        >>> # ENABLE_DOCTEST
        >>> from dtool.depcache_control import *  # NOQA
        >>> from dtool.example_depcache import testdata_depc
        >>> depc = testdata_depc()
        >>> exec(ut.execstr_funckw(depc.get_all_descendant_rowids), globals())
        >>> _debug = True
        >>> qaids, daids = [1, 2, 4], [2, 3, 4]
        >>> root_rowids = list(zip(*ut.product(qaids, daids)))
        >>> request = depc.new_request('vsone', qaids, daids)
        >>> results = request.execute()
        >>> tablename = 'vsone'
        >>> rowid_dict = depc.get_all_descendant_rowids(
        >>>     tablename, root_rowids, config=None, _debug=_debug)
    """
    # TODO: Need to have a nice way of ensuring configs dont overlap
    # via namespaces.
    _debug = depc._debug if _debug is None else _debug
    indenter = ut.Indenter('[Descend-to-%s]' % (tablename,), enabled=_debug)
    if _debug:
        indenter.start()
        print(' * GET DESCENDANT ROWIDS %s ' % (tablename,))
        print(' * config = %r' % (config,))
    dependency_levels = depc.get_dependencies(tablename)
    if levels_up is not None:
        dependency_levels = dependency_levels[:-levels_up]

    configclass_levels = [
        [depc.configclass_dict.get(tablekey, None)
         for tablekey in keys]
        for keys in dependency_levels
    ]
    if _debug:
        print('[depc] dependency_levels = %s' %
              ut.repr3(dependency_levels, nl=1))
        print('[depc] config_levels = %s' %
              ut.repr3(configclass_levels, nl=1))

    # TODO: better support for multi-edges
    if (len(root_rowids) > 0 and ut.isiterable(root_rowids[0]) and
         not depc[tablename].ismulti):
        rowid_dict = {}
        for colx, col in enumerate(root_rowids):
            rowid_dict[depc.root + '%d' % (colx + 1,)] = col
        rowid_dict[depc.root] = ut.unique_ordered(ut.flatten(root_rowids))
    else:
        rowid_dict = {depc.root: root_rowids}

    # Ensure that each level ``tablename``'s dependencies have been computed
    for level_keys in dependency_levels[1:]:
        if _debug:
            print(' * level_keys %s ' % (level_keys,))
        # For each table in the level
        for tablekey in level_keys:
            try:
                child_rowids = depc._expand_level_rowids(
                    tablename, tablekey, rowid_dict, ensure, eager, nInput,
                    config, recompute, recompute_all, _debug)
            except Exception as ex:
                table = depc[tablekey]  # NOQA
                keys = ['tablename', 'tablekey', 'rowid_dict', 'config',
                        'table', 'dependency_levels']
                ut.printex(ex, 'error expanding rowids', keys=keys)
                raise
            rowid_dict[tablekey] = child_rowids
    if _debug:
        print(' GOT DESCENDANT ROWIDS')
        indenter.stop()
    return rowid_dict