Пример #1
0
def execute_and_save(qreq_miss):
    # Iterate over vsone queries in chunks.
    total_chunks = ut.get_num_chunks(len(qreq_miss.qaids), qreq_miss.chunksize)
    qaid_chunk_iter = ut.ichunks(qreq_miss.qaids, qreq_miss.chunksize)
    _prog = ut.ProgPartial(
        length=total_chunks,
        freq=1,
        label='[mc5] query chunk: ',
        prog_hook=qreq_miss.prog_hook,
        bs=False,
    )
    qaid_chunk_iter = iter(_prog(qaid_chunk_iter))

    qaid_to_cm = {}
    for qaids in qaid_chunk_iter:
        sub_qreq = qreq_miss.shallowcopy(qaids=qaids)
        cm_batch = sub_qreq.execute_pipeline()
        assert len(cm_batch) == len(qaids), 'bad alignment'
        assert all([qaid == cm.qaid for qaid, cm in zip(qaids, cm_batch)])

        # TODO: we already computed the fpaths
        # should be able to pass them in
        fpath_list = sub_qreq.get_chipmatch_fpaths(qaids)
        _prog = ut.ProgPartial(
            length=len(cm_batch),
            adjust=True,
            freq=1,
            label='saving chip matches',
            bs=True,
        )
        for cm, fpath in _prog(zip(cm_batch, fpath_list)):
            cm.save_to_fpath(fpath, verbose=False)
        qaid_to_cm.update({cm.qaid: cm for cm in cm_batch})

    return qaid_to_cm
Пример #2
0
 def print_priors(model, ignore_ttypes=[], title='Priors', color='blue'):
     ut.colorprint('\n --- %s ---' % (title, ), color=color)
     for ttype, cpds in model.ttype2_cpds.items():
         if ttype not in ignore_ttypes:
             for fs_ in ut.ichunks(cpds, 4):
                 ut.colorprint(ut.hz_str([f._cpdstr('psql') for f in fs_]),
                               color)
Пример #3
0
def write_dirty_aids(ibs, dirty_probchip_fpath_list, dirty_aids, config2_,
                     species):
    if config2_ is None:
        fw_detector = ibs.cfg.featweight_cfg.fw_detector
    else:
        fw_detector = config2_.get('fw_detector')

    if fw_detector == 'rf':
        (
            extramargin_fpath_list,
            probchip_extramargin_fpath_list,
            halfoffset_cs_list,
        ) = compute_extramargin_detectchip(ibs,
                                           dirty_aids,
                                           config2_=config2_,
                                           species=species,
                                           FACTOR=4)
        #dirty_cfpath_list  = ibs.get_annot_chip_fpath(dirty_aids, ensure=True, config2_=config2_)

        config = {
            'scale_list': [1.0],
            'output_gpath_list': probchip_extramargin_fpath_list,
            'mode': 1,
        }
        probchip_generator = randomforest.detect_gpath_list_with_species(
            ibs, extramargin_fpath_list, species, **config)
        # Evalutate genrator until completion
        ut.evaluate_generator(probchip_generator)
        extramargin_mask_gen = (vt.imread(fpath, grayscale=True)
                                for fpath in probchip_extramargin_fpath_list)
        # Crop the extra margin off of the new probchips
        _iter = zip(dirty_probchip_fpath_list, extramargin_mask_gen,
                    halfoffset_cs_list)
        for (probchip_fpath, extramargin_probchip, halfmargin) in _iter:
            half_w, half_h = halfmargin
            probchip = extramargin_probchip[half_h:-half_h, half_w:-half_w]
            vt.imwrite(probchip_fpath, probchip)
    elif fw_detector == 'cnn':
        # dont use extrmargin here (for now)
        chip_fpath_list = ibs.get_annot_chip_fpath(dirty_aids,
                                                   config2_=config2_)
        mask_gen = ibs.generate_species_background_mask(
            chip_fpath_list, species)
        _iter = zip(dirty_probchip_fpath_list, mask_gen)
        for chunk in ut.ichunks(_iter, 64):
            for probchip_fpath, probchip in ut.ProgressIter(
                    chunk,
                    lbl='write probchip chunk',
                    adjust=True,
                    time_thresh=30.0):
                probchip = postprocess_mask(probchip)
                vt.imwrite(probchip_fpath, probchip)
    else:
        raise NotImplementedError('bad fw_detector=%r' % (fw_detector, ))
Пример #4
0
def print_factors(model, factor_list):
    if hasattr(model, 'var2_cpd'):
        semtypes = [model.var2_cpd[f.variables[0]].ttype for f in factor_list]
    else:
        semtypes = [0] * len(factor_list)
    for type_, factors in ut.group_items(factor_list, semtypes).items():
        logger.info('Result Factors (%r)' % (type_, ))
        factors = ut.sortedby(factors, [f.variables[0] for f in factors])
        for fs_ in ut.ichunks(factors, 4):
            ut.colorprint(ut.hz_str([f._str('phi', 'psql') for f in fs_]),
                          'yellow')
Пример #5
0
def print_factors(model, factor_list):
    if hasattr(model, 'var2_cpd'):
        semtypes = [model.var2_cpd[f.variables[0]].ttype
                    for f in factor_list]
    else:
        semtypes = [0] * len(factor_list)
    for type_, factors in ut.group_items(factor_list, semtypes).items():
        print('Result Factors (%r)' % (type_,))
        factors = ut.sortedby(factors, [f.variables[0] for f in factors])
        for fs_ in ut.ichunks(factors, 4):
            ut.colorprint(ut.hz_str([f._str('phi', 'psql') for f in fs_]),
                          'yellow')
Пример #6
0
def execute_query2(ibs, qreq_, verbose, save_qcache, batch_size=None):
    """
    Breaks up query request into several subrequests
    to process "more efficiently" and safer as well.
    """
    qreq_.lazy_preload(verbose=verbose and ut.NOT_QUIET)
    all_qaids = qreq_.qaids
    print('len(missed_qaids) = %r' % (len(all_qaids), ))
    qaid2_cm = {}
    # vsone must have a chunksize of 1
    if batch_size is None:
        if HOTS_BATCH_SIZE is None:
            hots_batch_size = ibs.cfg.other_cfg.hots_batch_size
        else:
            hots_batch_size = HOTS_BATCH_SIZE
    else:
        hots_batch_size = batch_size
    chunksize = 1 if qreq_.qparams.vsone else hots_batch_size
    # Iterate over vsone queries in chunks.
    # This minimizes lost computation if a qreq_ crashes
    nTotalChunks = ut.get_nTotalChunks(len(all_qaids), chunksize)
    qaid_chunk_iter = ut.ichunks(all_qaids, chunksize)
    _qreq_iter = (qreq_.shallowcopy(qaids=qaids) for qaids in qaid_chunk_iter)
    sub_qreq_iter = ut.ProgressIter(_qreq_iter,
                                    nTotal=nTotalChunks,
                                    freq=1,
                                    lbl='[mc4] query chunk: ',
                                    prog_hook=qreq_.prog_hook)
    for sub_qreq_ in sub_qreq_iter:
        if ut.VERBOSE:
            print('Generating vsmany chunk')
        sub_cm_list = pipeline.request_ibeis_query_L0(ibs,
                                                      sub_qreq_,
                                                      verbose=verbose)
        assert len(sub_qreq_.qaids) == len(sub_cm_list), 'not aligned'
        assert all([
            qaid == cm.qaid for qaid, cm in zip(sub_qreq_.qaids, sub_cm_list)
        ]), 'not corresonding'
        if save_qcache:
            fpath_list = qreq_.get_chipmatch_fpaths(sub_qreq_.qaids)
            _iter = zip(sub_cm_list, fpath_list)
            _iter = ut.ProgressIter(_iter,
                                    nTotal=len(sub_cm_list),
                                    lbl='saving chip matches',
                                    adjust=True,
                                    freq=1)
            for cm, fpath in _iter:
                cm.save_to_fpath(fpath, verbose=False)
        else:
            if ut.VERBOSE:
                print('[mc4] not saving vsmany chunk')
        qaid2_cm.update({cm.qaid: cm for cm in sub_cm_list})
    return qaid2_cm
Пример #7
0
def execute_query2(ibs, qreq_, verbose, save_qcache, batch_size=None):
    """
    Breaks up query request into several subrequests
    to process "more efficiently" and safer as well.
    """
    qreq_.lazy_preload(verbose=verbose and ut.NOT_QUIET)
    all_qaids = qreq_.get_external_qaids()
    print('len(missed_qaids) = %r' % (len(all_qaids),))
    qaid2_cm = {}
    # vsone must have a chunksize of 1
    if batch_size is None:
        if HOTS_BATCH_SIZE is None:
            hots_batch_size = ibs.cfg.other_cfg.hots_batch_size
        else:
            hots_batch_size = HOTS_BATCH_SIZE
    else:
        hots_batch_size = batch_size
    chunksize = 1 if qreq_.qparams.vsone else hots_batch_size
    # Iterate over vsone queries in chunks. This ensures that we dont lose
    # too much time if a qreq_ crashes after the 2000th nn index.
    nTotalChunks    = ut.get_nTotalChunks(len(all_qaids), chunksize)
    qaid_chunk_iter = ut.ichunks(all_qaids, chunksize)
    _qreq_iter = (
        qreq_.shallowcopy(qaids=qaids)
        for qaids in qaid_chunk_iter
    )
    sub_qreq_iter = ut.ProgressIter(
        _qreq_iter, nTotal=nTotalChunks, freq=1,
        lbl='[mc4] query chunk: ',
        prog_hook=qreq_.prog_hook)
    for sub_qreq_ in sub_qreq_iter:
        if ut.VERBOSE:
            print('Generating vsmany chunk')
        sub_cm_list = pipeline.request_ibeis_query_L0(
            ibs, sub_qreq_, verbose=verbose)
        assert len(sub_qreq_.get_external_qaids()) == len(sub_cm_list)
        assert all([qaid == cm.qaid for qaid, cm in zip(sub_qreq_.get_external_qaids(), sub_cm_list)])
        if save_qcache:
            fpath_list = qreq_.get_chipmatch_fpaths(sub_qreq_.get_external_qaids())
            _iter = zip(sub_cm_list, fpath_list)
            _iter = ut.ProgressIter(_iter, nTotal=len(sub_cm_list),
                                    lbl='saving chip matches', adjust=True, freq=1)
            for cm, fpath in _iter:
                cm.save_to_fpath(fpath, verbose=False)
        else:
            if ut.VERBOSE:
                print('[mc4] not saving vsmany chunk')
        qaid2_cm.update({cm.qaid: cm for cm in sub_cm_list})
    return qaid2_cm
Пример #8
0
def write_dirty_aids(ibs, dirty_probchip_fpath_list, dirty_aids, config2_, species):
    if config2_ is None:
        featweight_detector = ibs.cfg.featweight_cfg.featweight_detector
    else:
        featweight_detector = config2_.get('featweight_detector')

    if featweight_detector == 'rf':
        (extramargin_fpath_list,
         probchip_extramargin_fpath_list,
         halfoffset_cs_list,
         ) = compute_extramargin_detectchip(
             ibs, dirty_aids, config2_=config2_, species=species, FACTOR=4)
        #dirty_cfpath_list  = ibs.get_annot_chip_fpath(dirty_aids, ensure=True, config2_=config2_)

        config = {
            'scale_list': [1.0],
            'output_gpath_list': probchip_extramargin_fpath_list,
            'mode': 1,
        }
        probchip_generator = randomforest.detect_gpath_list_with_species(
            ibs, extramargin_fpath_list, species, **config)
        # Evalutate genrator until completion
        ut.evaluate_generator(probchip_generator)
        extramargin_mask_gen = (
            vt.imread(fpath, grayscale=True) for fpath in probchip_extramargin_fpath_list
        )
        # Crop the extra margin off of the new probchips
        _iter = zip(dirty_probchip_fpath_list,
                    extramargin_mask_gen,
                    halfoffset_cs_list)
        for (probchip_fpath, extramargin_probchip, halfmargin) in _iter:
            half_w, half_h = halfmargin
            probchip = extramargin_probchip[half_h:-half_h, half_w:-half_w]
            vt.imwrite(probchip_fpath, probchip)
    elif featweight_detector == 'cnn':
        # dont use extrmargin here (for now)
        chip_fpath_list = ibs.get_annot_chip_fpath(dirty_aids, config2_=config2_)
        mask_gen = ibs.generate_species_background_mask(chip_fpath_list, species)
        _iter = zip(dirty_probchip_fpath_list, mask_gen)
        for chunk in ut.ichunks(_iter, 64):
            for probchip_fpath, probchip in ut.ProgressIter(chunk, lbl='write probchip chunk', adjust=True, time_thresh=30.0):
                probchip = postprocess_mask(probchip)
                vt.imwrite(probchip_fpath, probchip)
    else:
        raise NotImplementedError('bad featweight_detector=%r' % (featweight_detector,))
Пример #9
0
def testshow_colors(rgb_list, gray=ut.get_argflag('--gray')):
    """

    colors = ['r', 'b', 'purple', 'orange', 'deeppink', 'g']

    colors = list(mcolors.CSS4_COLORS.keys())

    CommandLine:
        python -m wbia.plottool.color_funcs testshow_colors --show

    Example:
        >>> # ENABLE_DOCTEST
        >>> from wbia.plottool.color_funcs import *  # NOQA
        >>> colors = ut.get_argval('--colors', type_=list, default=['k', 'r'])
        >>> ut.quit_if_noshow()
        >>> rgb_list = ut.emap(ensure_base01, colors)
        >>> testshow_colors(rgb_list)
        >>> import wbia.plottool as pt
        >>> pt.show_if_requested()
    """
    import wbia.plottool as pt
    import vtool as vt

    block = np.zeros((5, 5, 3))
    block_list = [block + color[0:3] for color in rgb_list]
    # print(ut.repr2(block_list))
    # print(ut.repr2(rgb_list))
    chunks = ut.ichunks(block_list, 10)
    stacked_chunk = []
    for chunk in chunks:
        stacked_chunk.append(vt.stack_image_list(chunk, vert=False))
    stacked_block = vt.stack_image_list(stacked_chunk, vert=True)
    # convert to bgr
    stacked_block = stacked_block[:, :, ::-1]
    uint8_img = (255 * stacked_block).astype(np.uint8)
    if gray:
        import cv2

        uint8_img = cv2.cvtColor(uint8_img, cv2.COLOR_RGB2GRAY)
    pt.imshow(uint8_img)
Пример #10
0
def execute_query2(qreq_,
                   verbose,
                   save_qcache,
                   batch_size=None,
                   use_supercache=False):
    """
    Breaks up query request into several subrequests
    to process "more efficiently" and safer as well.
    """
    if qreq_.prog_hook is not None:
        preload_hook, query_hook = qreq_.prog_hook.subdivide(
            spacing=[0, 0.15, 0.8])
        preload_hook(0, lbl='preloading')
        qreq_.prog_hook = query_hook
    else:
        preload_hook = None
    # Load features / weights for all annotations
    qreq_.lazy_preload(prog_hook=preload_hook,
                       verbose=verbose and ut.NOT_QUIET)

    all_qaids = qreq_.qaids
    logger.info('len(missed_qaids) = %r' % (len(all_qaids), ))
    qaid2_cm = {}
    # vsone must have a chunksize of 1
    if batch_size is None:
        if HOTS_BATCH_SIZE is None:
            hots_batch_size = qreq_.ibs.cfg.other_cfg.hots_batch_size
            # hots_batch_size = 256
        else:
            hots_batch_size = HOTS_BATCH_SIZE
    else:
        hots_batch_size = batch_size
    chunksize = 1 if qreq_.qparams.vsone else hots_batch_size

    # Iterate over vsone queries in chunks.
    n_total_chunks = ut.get_num_chunks(len(all_qaids), chunksize)
    qaid_chunk_iter = ut.ichunks(all_qaids, chunksize)
    _qreq_iter = (qreq_.shallowcopy(qaids=qaids) for qaids in qaid_chunk_iter)
    sub_qreq_iter = ut.ProgIter(
        _qreq_iter,
        length=n_total_chunks,
        freq=1,
        label='[mc4] query chunk: ',
        prog_hook=qreq_.prog_hook,
    )
    for sub_qreq_ in sub_qreq_iter:
        if ut.VERBOSE:
            logger.info('Generating vsmany chunk')
        sub_cm_list = pipeline.request_wbia_query_L0(qreq_.ibs,
                                                     sub_qreq_,
                                                     verbose=verbose)
        assert len(sub_qreq_.qaids) == len(sub_cm_list), 'not aligned'
        assert all([
            qaid == cm.qaid for qaid, cm in zip(sub_qreq_.qaids, sub_cm_list)
        ]), 'not corresonding'
        if save_qcache:
            fpath_list = list(
                qreq_.get_chipmatch_fpaths(sub_qreq_.qaids,
                                           super_qres_cache=use_supercache))
            _iter = zip(sub_cm_list, fpath_list)
            _iter = ut.ProgIter(
                _iter,
                length=len(sub_cm_list),
                label='saving chip matches',
                adjust=True,
                freq=1,
            )
            for cm, fpath in _iter:
                cm.save_to_fpath(fpath, verbose=False)
        else:
            if ut.VERBOSE:
                logger.info('[mc4] not saving vsmany chunk')
        qaid2_cm.update({cm.qaid: cm for cm in sub_cm_list})
    return qaid2_cm
Пример #11
0
def draw_twoday_count(ibs, visit_info_list_):
    import copy
    visit_info_list = copy.deepcopy(visit_info_list_)

    aids_day1, aids_day2 = ut.take_column(visit_info_list_, 'aids')
    nids_day1, nids_day2 = ut.take_column(visit_info_list_, 'unique_nids')
    resight_nids = ut.isect(nids_day1, nids_day2)

    if False:
        # HACK REMOVE DATA TO MAKE THIS FASTER
        num = 20
        for info in visit_info_list:
            non_resight_nids = list(set(info['unique_nids']) - set(resight_nids))
            sample_nids2 = non_resight_nids[0:num] + resight_nids[:num]
            info['grouped_aids'] = ut.dict_subset(info['grouped_aids'], sample_nids2)
            info['unique_nids'] = sample_nids2

    # Build a graph of matches
    if False:

        debug = False

        for info in visit_info_list:
            edges = []
            grouped_aids = info['grouped_aids']

            aids_list = list(grouped_aids.values())
            ams_list = ibs.get_annotmatch_rowids_in_cliques(aids_list)
            aids1_list = ibs.unflat_map(ibs.get_annotmatch_aid1, ams_list)
            aids2_list = ibs.unflat_map(ibs.get_annotmatch_aid2, ams_list)
            for ams, aids, aids1, aids2 in zip(ams_list, aids_list, aids1_list, aids2_list):
                edge_nodes = set(aids1 + aids2)
                ##if len(edge_nodes) != len(set(aids)):
                #    #print('--')
                #    #print('aids = %r' % (aids,))
                #    #print('edge_nodes = %r' % (edge_nodes,))
                bad_aids = edge_nodes - set(aids)
                if len(bad_aids) > 0:
                    print('bad_aids = %r' % (bad_aids,))
                unlinked_aids = set(aids) - edge_nodes
                mst_links = list(ut.itertwo(list(unlinked_aids) + list(edge_nodes)[:1]))
                bad_aids.add(None)
                user_links = [(u, v) for (u, v) in zip(aids1, aids2) if u not in bad_aids and v not in bad_aids]
                new_edges = mst_links + user_links
                new_edges = [(int(u), int(v)) for u, v in new_edges if u not in bad_aids and v not in bad_aids]
                edges += new_edges
            info['edges'] = edges

        # Add edges between days
        grouped_aids1, grouped_aids2 = ut.take_column(visit_info_list, 'grouped_aids')
        nids_day1, nids_day2 = ut.take_column(visit_info_list, 'unique_nids')
        resight_nids = ut.isect(nids_day1, nids_day2)

        resight_aids1 = ut.take(grouped_aids1, resight_nids)
        resight_aids2 = ut.take(grouped_aids2, resight_nids)
        #resight_aids3 = [list(aids1) + list(aids2) for aids1, aids2 in zip(resight_aids1, resight_aids2)]

        ams_list = ibs.get_annotmatch_rowids_between_groups(resight_aids1, resight_aids2)
        aids1_list = ibs.unflat_map(ibs.get_annotmatch_aid1, ams_list)
        aids2_list = ibs.unflat_map(ibs.get_annotmatch_aid2, ams_list)

        between_edges = []
        for ams, aids1, aids2, rawaids1, rawaids2 in zip(ams_list, aids1_list, aids2_list, resight_aids1, resight_aids2):
            link_aids = aids1 + aids2
            rawaids3 = rawaids1 + rawaids2
            badaids = ut.setdiff(link_aids, rawaids3)
            assert not badaids
            user_links = [(int(u), int(v)) for (u, v) in zip(aids1, aids2)
                          if u is not None and v is not None]
            # HACK THIS OFF
            user_links = []
            if len(user_links) == 0:
                # Hack in an edge
                between_edges += [(rawaids1[0], rawaids2[0])]
            else:
                between_edges += user_links

        assert np.all(0 == np.diff(np.array(ibs.unflat_map(ibs.get_annot_nids, between_edges)), axis=1))

        import plottool_ibeis as pt
        import networkx as nx
        #pt.qt4ensure()
        #len(list(nx.connected_components(graph1)))
        #print(ut.graph_info(graph1))

        # Layout graph
        layoutkw = dict(
            prog='neato',
            draw_implicit=False, splines='line',
            #splines='curved',
            #splines='spline',
            #sep=10 / 72,
            #prog='dot', rankdir='TB',
        )

        def translate_graph_to_origin(graph):
            x, y, w, h = ut.get_graph_bounding_box(graph)
            ut.translate_graph(graph, (-x, -y))

        def stack_graphs(graph_list, vert=False, pad=None):
            graph_list_ = [g.copy() for g in graph_list]
            for g in graph_list_:
                translate_graph_to_origin(g)
            bbox_list = [ut.get_graph_bounding_box(g) for g in graph_list_]
            if vert:
                dim1 = 3
                dim2 = 2
            else:
                dim1 = 2
                dim2 = 3
            dim1_list = np.array([bbox[dim1] for bbox in bbox_list])
            dim2_list = np.array([bbox[dim2] for bbox in bbox_list])
            if pad is None:
                pad = np.mean(dim1_list) / 2
            offset1_list = ut.cumsum([0] + [d + pad for d in dim1_list[:-1]])
            max_dim2 = max(dim2_list)
            offset2_list = [(max_dim2 - d2) / 2 for d2 in dim2_list]
            if vert:
                t_xy_list = [(d2, d1) for d1, d2 in zip(offset1_list, offset2_list)]
            else:
                t_xy_list = [(d1, d2) for d1, d2 in zip(offset1_list, offset2_list)]

            for g, t_xy in zip(graph_list_, t_xy_list):
                ut.translate_graph(g, t_xy)
                nx.set_node_attributes(g, name='pin', values='true')

            new_graph = nx.compose_all(graph_list_)
            #pt.show_nx(new_graph, layout='custom', node_labels=False, as_directed=False)  # NOQA
            return new_graph

        # Construct graph
        for count, info in enumerate(visit_info_list):
            graph = nx.Graph()
            edges = [(int(u), int(v)) for u, v in info['edges']
                     if u is not None and v is not None]
            graph.add_edges_from(edges, attr_dict={'zorder': 10})
            nx.set_node_attributes(graph, name='zorder', values=20)

            # Layout in neato
            _ = pt.nx_agraph_layout(graph, inplace=True, **layoutkw)  # NOQA

            # Extract components and then flatten in nid ordering
            ccs = list(nx.connected_components(graph))
            root_aids = []
            cc_graphs = []
            for cc_nodes in ccs:
                cc = graph.subgraph(cc_nodes)
                try:
                    root_aids.append(list(ut.nx_source_nodes(cc.to_directed()))[0])
                except nx.NetworkXUnfeasible:
                    root_aids.append(list(cc.nodes())[0])
                cc_graphs.append(cc)

            root_nids = ibs.get_annot_nids(root_aids)
            nid2_graph = dict(zip(root_nids, cc_graphs))

            resight_nids_ = set(resight_nids).intersection(set(root_nids))
            noresight_nids_ = set(root_nids) - resight_nids_

            n_graph_list = ut.take(nid2_graph, sorted(noresight_nids_))
            r_graph_list = ut.take(nid2_graph, sorted(resight_nids_))

            if len(n_graph_list) > 0:
                n_graph = nx.compose_all(n_graph_list)
                _ = pt.nx_agraph_layout(n_graph, inplace=True, **layoutkw)  # NOQA
                n_graphs = [n_graph]
            else:
                n_graphs = []

            r_graphs = [stack_graphs(chunk) for chunk in ut.ichunks(r_graph_list, 100)]
            if count == 0:
                new_graph = stack_graphs(n_graphs + r_graphs, vert=True)
            else:
                new_graph = stack_graphs(r_graphs[::-1] + n_graphs, vert=True)

            #pt.show_nx(new_graph, layout='custom', node_labels=False, as_directed=False)  # NOQA
            info['graph'] = new_graph

        graph1_, graph2_ = ut.take_column(visit_info_list, 'graph')
        if False:
            _ = pt.show_nx(graph1_, layout='custom', node_labels=False, as_directed=False)  # NOQA
            _ = pt.show_nx(graph2_, layout='custom', node_labels=False, as_directed=False)  # NOQA

        graph_list = [graph1_, graph2_]
        twoday_graph = stack_graphs(graph_list, vert=True, pad=None)
        nx.set_node_attributes(twoday_graph, name='pin', values='true')

        if debug:
            ut.nx_delete_None_edge_attr(twoday_graph)
            ut.nx_delete_None_node_attr(twoday_graph)
            print('twoday_graph(pre) info' + ut.repr3(ut.graph_info(twoday_graph), nl=2))

        # Hack, no idea why there are nodes that dont exist here
        between_edges_ = [edge for edge in between_edges
                          if twoday_graph.has_node(edge[0]) and twoday_graph.has_node(edge[1])]

        twoday_graph.add_edges_from(between_edges_, attr_dict={'alpha': .2, 'zorder': 0})
        ut.nx_ensure_agraph_color(twoday_graph)

        layoutkw['splines'] = 'line'
        layoutkw['prog'] = 'neato'
        agraph = pt.nx_agraph_layout(twoday_graph, inplace=True, return_agraph=True, **layoutkw)[-1]  # NOQA
        if False:
            fpath = ut.truepath('~/ggr_graph.png')
            agraph.draw(fpath)
            ut.startfile(fpath)

        if debug:
            print('twoday_graph(post) info' + ut.repr3(ut.graph_info(twoday_graph)))

        _ = pt.show_nx(twoday_graph, layout='custom', node_labels=False, as_directed=False)  # NOQA
Пример #12
0
def detect_species_bboxes(src_gpath_list, species, quick=True, use_chunks=False, **detectkw):
    """
    Generates bounding boxes for each source image
    For each image yeilds a list of bounding boxes
    """
    nImgs = len(src_gpath_list)
    print('[detect.rf] Begining %s detection' % (species,))
    detect_lbl = 'detect %s ' % species
    mark_prog, end_prog = utool.progress_func(nImgs, detect_lbl, flush_after=1)

    detect_config = _get_detect_config(**detectkw)
    detector, forest = _get_detector(species, quick=quick)
    detector.set_detect_params(**detect_config)

    dst_gpath_list = [splitext(gpath)[0] for gpath in src_gpath_list]
    # FIXME: Doing this in a generator may cause unnecessary page-faults
    # Maybe there is a better way of doing this, or generating results
    # in batch. It could be a utool batch serial process

    chunksize = 8
    use_chunks_ = use_chunks and nImgs >= chunksize

    if use_chunks_:
        print('[rf] detect in chunks')
        pathtup_iter = zip(src_gpath_list, dst_gpath_list)
        for ic, chunk in enumerate(utool.ichunks(pathtup_iter, chunksize)):
            src_gpath_list = [tup[0] for tup in chunk]
            dst_gpath_list = [tup[1] for tup in chunk]
            mark_prog(ic * chunksize)
            results_list = detector.detect_many(forest, src_gpath_list, dst_gpath_list)

            for results in results_list:
                bboxes = [(minx, miny, (maxx - minx), (maxy - miny))
                          for (centx, centy, minx, miny, maxx, maxy, confidence, supressed)
                          in results if supressed == 0]

                #x_arr = results[:, 2]
                #y_arr = results[:, 3]
                #w_arr = results[:, 4] - results[:, 2]
                #h_arr = results[:, 5] - results[:, 3]
                #bboxes = np.hstack((x_arr, y_arr, w_arr, h_arr))
                # Unpack unsupressed bounding boxes

                confidences = [confidence
                               for (centx, centy, minx, miny, maxx, maxy, confidence, supressed)
                               in results if supressed == 0]

                if len(results) > 0:
                    image_confidence = max([float(result[6]) for result in results])
                else:
                    image_confidence = 0.0

                yield bboxes, confidences, image_confidence
    else:
        print('[rf] detect one image at a time')
        pathtup_iter = izip(src_gpath_list, dst_gpath_list)
        for ix, (src_gpath, dst_gpath) in enumerate(pathtup_iter):
            mark_prog(ix)
            results = detector.detect(forest, src_gpath, dst_gpath)
            bboxes = [(minx, miny, (maxx - minx), (maxy - miny))
                      for (centx, centy, minx, miny, maxx, maxy, confidence, supressed)
                      in results if supressed == 0]

            confidences = [confidence
                           for (centx, centy, minx, miny, maxx, maxy, confidence, supressed)
                           in results if supressed == 0]

            if len(results) > 0:
                image_confidence = max([float(result[6]) for result in results])
            else:
                image_confidence = 0.0

            yield bboxes, confidences, image_confidence
    end_prog()
Пример #13
0
def detect(
    gpath_list,
    config_filepath=None,
    weight_filepath=None,
    classes_filepath=None,
    sensitivity=0.0,
    verbose=VERBOSE_LN,
    flip=False,
    batch_size=192,
    **kwargs,
):
    """Detect image filepaths with lightnet.

    Args:
        gpath_list (list of str): the list of image paths that need proposal candidates

    Kwargs (optional): refer to the Lightnet documentation for configuration settings

    Returns:
        iter
    """
    # Get correct weight if specified with shorthand
    config_url = None
    if config_filepath in CONFIG_URL_DICT:
        config_url = CONFIG_URL_DICT[config_filepath]
        config_filepath = ut.grab_file_url(config_url,
                                           appname='lightnet',
                                           check_hash=True)

    # Get correct weights if specified with shorthand
    if weight_filepath in CONFIG_URL_DICT:
        if weight_filepath is None and config_url is not None:
            config_url_ = config_url
        else:
            config_url_ = CONFIG_URL_DICT[weight_filepath]
        weight_url = _parse_weights_from_cfg(config_url_)
        weight_filepath = ut.grab_file_url(weight_url,
                                           appname='lightnet',
                                           check_hash=True)

    assert exists(config_filepath)
    config_filepath = ut.truepath(config_filepath)
    assert exists(weight_filepath)
    weight_filepath = ut.truepath(weight_filepath)

    conf_thresh = sensitivity
    nms_thresh = 1.0  # Turn off NMS

    params = _create_network(config_filepath, weight_filepath, conf_thresh,
                             nms_thresh)

    # Execute detector for each image
    results_list_ = []
    for gpath_batch_list in tqdm(list(ut.ichunks(gpath_list, batch_size))):
        try:
            result_list, img_sizes = _detect(params,
                                             gpath_batch_list,
                                             flip=flip)
        except cv2.error:
            result_list, img_sizes = [], []

        for result, img_size in zip(result_list, img_sizes):
            img_w, img_h = img_size

            result_list_ = []
            for output in list(result):
                xtl = int(np.around(float(output.x_top_left)))
                ytl = int(np.around(float(output.y_top_left)))
                xbr = int(np.around(float(output.x_top_left + output.width)))
                ybr = int(np.around(float(output.y_top_left + output.height)))
                width = xbr - xtl
                height = ybr - ytl
                class_ = output.class_label
                conf = float(output.confidence)
                if flip:
                    xtl = img_w - xbr
                result_dict = {
                    'xtl': xtl,
                    'ytl': ytl,
                    'width': width,
                    'height': height,
                    'class': class_,
                    'confidence': conf,
                }
                result_list_.append(result_dict)
            results_list_.append(result_list_)

    if len(results_list_) != len(gpath_list):
        raise ValueError('Lightnet did not return valid data')

    results_list = zip(gpath_list, results_list_)
    return results_list
Пример #14
0
def ensure_pz_mtest_batchworkflow_test():
    r"""
    CommandLine:
        python -m ibeis.init.sysres --test-ensure_pz_mtest_batchworkflow_test
        python -m ibeis.init.sysres --test-ensure_pz_mtest_batchworkflow_test --reset
        python -m ibeis.init.sysres --test-ensure_pz_mtest_batchworkflow_test --reset

    Example:
        >>> # SCRIPT
        >>> from ibeis.init.sysres import *  # NOQA
        >>> ensure_pz_mtest_batchworkflow_test()
    """
    import ibeis
    ibeis.ensure_pz_mtest()
    workdir = ibeis.sysres.get_workdir()
    mtest_dbpath = join(workdir, 'PZ_MTEST')

    source_dbdir = mtest_dbpath
    dest_dbdir = join(workdir, 'PZ_BATCH_WORKFLOW_MTEST')

    if ut.get_argflag('--reset'):
        ut.delete(dest_dbdir)

    if ut.checkpath(dest_dbdir):
        return
    else:
        copy_ibeisdb(source_dbdir, dest_dbdir)

    ibs = ibeis.opendb('PZ_BATCH_WORKFLOW_MTEST')
    assert len(ibs.get_valid_aids()) == 119
    assert len(ibs.get_valid_nids()) == 41

    ibs.delete_all_encounters()

    aid_list = ibs.get_valid_aids()

    unixtime_list = ibs.get_annot_image_unixtimes(aid_list)
    untimed_aids = ut.compress(aid_list, [t == -1 for t in unixtime_list])

    ibs.get_annot_groundtruth(untimed_aids, aid_list)

    aids_list, nid_list = ibs.group_annots_by_name(aid_list)

    hourdiffs_list = ibs.get_name_hourdiffs(nid_list)

    encounter_aids_list = [[] for _ in range(4)]

    encounter_idx = 0

    for hourdiffs, aids in zip(hourdiffs_list, aids_list):
        #import scipy.spatial.distance as spdist
        if len(aids) == 1:
            encounter_aids_list[encounter_idx].extend(aids)
            encounter_idx = (encounter_idx + 1) % len(encounter_aids_list)
        else:
            for chunk in list(ut.ichunks(aids, 2)):
                encounter_aids_list[encounter_idx].extend(chunk)
                encounter_idx = (encounter_idx + 1) % len(encounter_aids_list)

            #import vtool as vt
            #import networkx as netx
            #nodes = list(range(len(aids)))
            #edges_pairs = vt.pdist_argsort(hourdiffs)
            #edge_weights = -hourdiffs[hourdiffs.argsort()]
            #netx_graph = make_netx_graph(edges_pairs, nodes, edge_weights)
            #cut_edges = netx.minimum_edge_cut(netx_graph)
            #netx_graph.remove_edges_from(cut_edges)
            #components = list(netx.connected_components(netx_graph))
            #components = ut.sortedby(components, list(map(len, components)), reverse=True)
            #print(components)
            #encounter_aids_list[0].extend(components[0])
            #for compoment in components:

            # TODO do max-nway cut
        #day_diffs = spdist.squareform(hourdiffs) / 24.0
        #print(ut.numpy_str(day_diffs, precision=2, suppress_small=True))
        #import itertools
        #compare_idxs = [(r, c) for r, c in itertools.product(range(len(aids)), range(len(aids))) if (c > r)]
        #print(len(aids))
    #def make_netx_graph(edges_pairs, nodes=None, edge_weights=None):
    #    import networkx as netx
    #    node_lbls = [('id_', 'int')]

    #    edge_lbls = [('weight', 'float')]
    #    edges = [(pair[0], pair[1], weight) for pair, weight in zip(edges_pairs, edge_weights)]

    #    print('make_netx_graph')
    #    # Make a graph between the chips
    #    netx_nodes = [(ntup[0], {key[0]: val for (key, val) in zip(node_lbls, ntup[1:])})
    #                  for ntup in iter(zip(nodes))]

    #    netx_edges = [(etup[0], etup[1], {key[0]: val for (key, val) in zip(edge_lbls, etup[2:])})
    #                  for etup in iter(edges)]
    #    netx_graph = netx.Graph()
    #    netx_graph.add_nodes_from(netx_nodes)
    #    netx_graph.add_edges_from(netx_edges)
    #    return netx_graph

    # Group into encounters based on old names
    gids_list = ibs.unflat_map(ibs.get_annot_image_rowids, encounter_aids_list)
    eid_list = ibs.new_encounters_from_images(gids_list)  # NOQA

    # Remove all names
    ibs.delete_annot_nids(aid_list)
Пример #15
0
def temp_model(num_annots,
               num_names,
               score_evidence=[],
               name_evidence=[],
               other_evidence={},
               noquery=False,
               verbose=None,
               **kwargs):
    if verbose is None:
        verbose = ut.VERBOSE

    method = kwargs.pop('method', None)
    model = make_name_model(num_annots, num_names, verbose=verbose, **kwargs)

    if verbose:
        model.print_priors(ignore_ttypes=[MATCH_TTYPE, SCORE_TTYPE])

    model, evidence, soft_evidence = update_model_evidence(
        model, name_evidence, score_evidence, other_evidence)

    if verbose and len(soft_evidence) != 0:
        model.print_priors(ignore_ttypes=[MATCH_TTYPE, SCORE_TTYPE],
                           title='Soft Evidence',
                           color='green')

    # if verbose:
    #    ut.colorprint('\n --- Soft Evidence ---', 'white')
    #    for ttype, cpds in model.ttype2_cpds.items():
    #        if ttype != MATCH_TTYPE:
    #            for fs_ in ut.ichunks(cpds, 4):
    #                ut.colorprint(ut.hz_str([f._cpdstr('psql') for f in fs_]),
    #                              'green')

    if verbose:
        ut.colorprint('\n --- Inference ---', 'red')

    if (len(evidence) > 0 or len(soft_evidence) > 0) and not noquery:
        evidence = model._ensure_internal_evidence(evidence)
        query_vars = []
        query_vars += ut.list_getattr(model.ttype2_cpds[NAME_TTYPE],
                                      'variable')
        # query_vars += ut.list_getattr(model.ttype2_cpds[MATCH_TTYPE], 'variable')
        query_vars = ut.setdiff(query_vars, evidence.keys())
        # query_vars = ut.setdiff(query_vars, soft_evidence.keys())
        query_results = cluster_query(model, query_vars, evidence,
                                      soft_evidence, method)
    else:
        query_results = {}

    factor_list = query_results['factor_list']

    if verbose:
        if verbose:
            logger.info('+--------')
        semtypes = [model.var2_cpd[f.variables[0]].ttype for f in factor_list]
        for type_, factors in ut.group_items(factor_list, semtypes).items():
            logger.info('Result Factors (%r)' % (type_, ))
            factors = ut.sortedby(factors, [f.variables[0] for f in factors])
            for fs_ in ut.ichunks(factors, 4):
                ut.colorprint(ut.hz_str([f._str('phi', 'psql') for f in fs_]),
                              'yellow')
        logger.info('MAP assignments')
        top_assignments = query_results.get('top_assignments', [])
        tmp = []
        for lbl, val in top_assignments:
            tmp.append('%s : %.4f' % (ut.repr2(lbl), val))
        logger.info(ut.align('\n'.join(tmp), ' :'))
        logger.info('L_____\n')

    showkw = dict(evidence=evidence,
                  soft_evidence=soft_evidence,
                  **query_results)

    from wbia.algo.hots import pgm_viz

    pgm_viz.show_model(model, **showkw)
    return (model, evidence, query_results)
Пример #16
0
def ensure_pz_mtest_batchworkflow_test():
    r"""
    CommandLine:
        python -m ibeis.init.sysres --test-ensure_pz_mtest_batchworkflow_test
        python -m ibeis.init.sysres --test-ensure_pz_mtest_batchworkflow_test --reset
        python -m ibeis.init.sysres --test-ensure_pz_mtest_batchworkflow_test --reset

    Example:
        >>> # SCRIPT
        >>> from ibeis.init.sysres import *  # NOQA
        >>> ensure_pz_mtest_batchworkflow_test()
    """
    import ibeis
    ibeis.ensure_pz_mtest()
    workdir = ibeis.sysres.get_workdir()
    mtest_dbpath = join(workdir, 'PZ_MTEST')

    source_dbdir = mtest_dbpath
    dest_dbdir = join(workdir, 'PZ_BATCH_WORKFLOW_MTEST')

    if ut.get_argflag('--reset'):
        ut.delete(dest_dbdir)

    if ut.checkpath(dest_dbdir):
        return
    else:
        copy_ibeisdb(source_dbdir, dest_dbdir)

    ibs = ibeis.opendb('PZ_BATCH_WORKFLOW_MTEST')
    assert len(ibs.get_valid_aids()) == 119
    assert len(ibs.get_valid_nids()) == 41

    ibs.delete_all_imagesets()

    aid_list = ibs.get_valid_aids()

    unixtime_list = ibs.get_annot_image_unixtimes(aid_list)
    untimed_aids = ut.compress(aid_list, [t == -1 for t in unixtime_list])

    ibs.get_annot_groundtruth(untimed_aids, aid_list)

    aids_list, nid_list = ibs.group_annots_by_name(aid_list)

    hourdiffs_list = ibs.get_name_hourdiffs(nid_list)

    imageset_aids_list = [[] for _ in range(4)]

    imageset_idx = 0

    for hourdiffs, aids in zip(hourdiffs_list, aids_list):
        #import scipy.spatial.distance as spdist
        if len(aids) == 1:
            imageset_aids_list[imageset_idx].extend(aids)
            imageset_idx = (imageset_idx + 1) % len(imageset_aids_list)
        else:
            for chunk in list(ut.ichunks(aids, 2)):
                imageset_aids_list[imageset_idx].extend(chunk)
                imageset_idx = (imageset_idx + 1) % len(imageset_aids_list)

            #import vtool as vt
            #import networkx as netx
            #nodes = list(range(len(aids)))
            #edges_pairs = vt.pdist_argsort(hourdiffs)
            #edge_weights = -hourdiffs[hourdiffs.argsort()]
            #netx_graph = make_netx_graph(edges_pairs, nodes, edge_weights)
            #cut_edges = netx.minimum_edge_cut(netx_graph)
            #netx_graph.remove_edges_from(cut_edges)
            #components = list(netx.connected_components(netx_graph))
            #components = ut.sortedby(components, list(map(len, components)), reverse=True)
            #print(components)
            #imageset_aids_list[0].extend(components[0])
            #for compoment in components:

            # TODO do max-nway cut
        #day_diffs = spdist.squareform(hourdiffs) / 24.0
        #print(ut.numpy_str(day_diffs, precision=2, suppress_small=True))
        #import itertools
        #compare_idxs = [(r, c) for r, c in itertools.product(range(len(aids)), range(len(aids))) if (c > r)]
        #print(len(aids))
    #def make_netx_graph(edges_pairs, nodes=None, edge_weights=None):
    #    import networkx as netx
    #    node_lbls = [('id_', 'int')]

    #    edge_lbls = [('weight', 'float')]
    #    edges = [(pair[0], pair[1], weight) for pair, weight in zip(edges_pairs, edge_weights)]

    #    print('make_netx_graph')
    #    # Make a graph between the chips
    #    netx_nodes = [(ntup[0], {key[0]: val for (key, val) in zip(node_lbls, ntup[1:])})
    #                  for ntup in iter(zip(nodes))]

    #    netx_edges = [(etup[0], etup[1], {key[0]: val for (key, val) in zip(edge_lbls, etup[2:])})
    #                  for etup in iter(edges)]
    #    netx_graph = netx.Graph()
    #    netx_graph.add_nodes_from(netx_nodes)
    #    netx_graph.add_edges_from(netx_edges)
    #    return netx_graph

    # Group into imagesets based on old names
    gids_list = ibs.unflat_map(ibs.get_annot_image_rowids, imageset_aids_list)
    imgsetid_list = ibs.new_imagesets_from_images(gids_list)  # NOQA

    # Remove all names
    ibs.delete_annot_nids(aid_list)
Пример #17
0
def add_chip_feat(ibs, chip_rowid_list, config2_=None, verbose=not ut.QUIET, return_num_dirty=False):
    """ chip.feat.add(chip_rowid_list)

    CRITICAL FUNCTION MUST EXIST FOR ALL DEPENDANTS
    Adds / ensures / computes a dependant property
    Args:
         chip_rowid_list

    Returns:
        returns feat_rowid_list of added (or already existing feats)

    TemplateInfo:
        python -m ibeis.templates.template_generator --key feat --funcname-filter "\<add_chip_feat\>" --modfname=manual_feat_funcs
        Tadder_pl_dependant
        parent = chip
        leaf = feat

    CommandLine:
        python -m ibeis.control.manual_feat_funcs --test-add_chip_feat

    Example0:
        >>> # ENABLE_DOCTEST
        >>> from ibeis.control.manual_feat_funcs import *  # NOQA
        >>> ibs, config2_ = testdata_ibs()
        >>> ibs.get_annot_chip_rowids(ibs.get_valid_aids())  # Ensure chips are computed
        >>> chip_rowid_list = ibs._get_all_chip_rowids()[::3]
        >>> feat_rowid_list = ibs.add_chip_feat(chip_rowid_list, config2_=config2_)
        >>> assert len(feat_rowid_list) == len(chip_rowid_list), 'bad length'
        >>> ut.assert_all_not_None(feat_rowid_list)

    Example1:
        >>> # ENABLE_DOCTEST
        >>> from ibeis.control.manual_feat_funcs import *  # NOQA
        >>> ibs, config2_ = testdata_ibs()
        >>> ibs.get_annot_chip_rowids(ibs.get_valid_aids())  # Ensure chips are computed
        >>> chip_rowid_list = ibs._get_all_chip_rowids()[0:10]
        >>> assert len(chip_rowid_list) == 10, 'chips not computed'
        >>> sub_chip_rowid_list1 = chip_rowid_list[0:6]
        >>> sub_chip_rowid_list2 = chip_rowid_list[5:7]
        >>> sub_chip_rowid_list3 = chip_rowid_list[0:7]
        >>> sub_feat_rowid_list1 = ibs.get_chip_feat_rowid(sub_chip_rowid_list1, config2_=config2_, ensure=True)
        >>> ibs.get_chip_feat_rowid(sub_chip_rowid_list1, config2_=config2_, ensure=True)
        >>> sub_feat_rowid_list1, num_dirty0 = ibs.add_chip_feat(sub_chip_rowid_list1, config2_=config2_, return_num_dirty=True)
        >>> assert num_dirty0 == 0, 'num_dirty0=%r' % (num_dirty0,)
        >>> ut.assert_all_not_None(sub_feat_rowid_list1)
        >>> ibs.delete_chip_feats(sub_chip_rowid_list2)
        >>> #ibs.delete_chip_feat(sub_chip_rowid_list2)?
        >>> sub_feat_rowid_list3 = ibs.get_chip_feat_rowid(sub_chip_rowid_list3, config2_=config2_, ensure=False)
        >>> # Only the last two should be None
        >>> ut.assert_all_not_None(sub_feat_rowid_list3[0:5], 'sub_feat_rowid_list3[0:5])')
        >>> assert sub_feat_rowid_list3[5:7] == [None, None], 'sub_feat_rowid_list3=%r' % (sub_feat_rowid_list3,)
        >>> sub_feat_rowid_list3_ensured, num_dirty1 = ibs.add_chip_feat(sub_chip_rowid_list3, config2_=config2_,  return_num_dirty=True)
        >>> assert num_dirty1 == 2, 'Only two params should have been computed here'
        >>> ut.assert_all_not_None(sub_feat_rowid_list3_ensured)
    """
    from ibeis.algo.preproc import preproc_feat
    ut.assert_all_not_None(chip_rowid_list, ' chip_rowid_list')
    # Get requested configuration id
    config_rowid = ibs.get_feat_config_rowid(config2_=config2_)
    # Find leaf rowids that need to be computed
    initial_feat_rowid_list = get_chip_feat_rowids_(
        ibs, chip_rowid_list, config2_=config2_)
    # Get corresponding "dirty" parent rowids
    isdirty_list = ut.flag_None_items(initial_feat_rowid_list)
    dirty_chip_rowid_list = ut.compress(chip_rowid_list, isdirty_list)
    num_dirty = len(dirty_chip_rowid_list)
    num_total = len(chip_rowid_list)
    if num_dirty > 0:
        if verbose:
            fmtstr = '[add_chip_feat] adding %d / %d new feat for config_rowid=%r'
            print(fmtstr % (num_dirty, num_total, config_rowid))
        # Dependant columns do not need true from_superkey getters.
        # We can use the Tgetter_pl_dependant_rowids_ instead
        get_rowid_from_superkey = functools.partial(
            ibs.get_chip_feat_rowids_, config2_=config2_)
        # CALL EXTERNAL PREPROCESSING / GENERATION FUNCTION
        proptup_gen = preproc_feat.generate_feat_properties(
            ibs, dirty_chip_rowid_list, config2_=config2_)
        dirty_params_iter = (
            (chip_rowid, config_rowid, feature_nFeat,
             feature_kpt_arr, feature_vec_arr)
            for chip_rowid, (feature_nFeat, feature_kpt_arr, feature_vec_arr,) in
            zip(dirty_chip_rowid_list, proptup_gen)
        )
        colnames = ['chip_rowid', 'config_rowid',
                    'feature_num_feats', 'feature_keypoints', 'feature_vecs']
        #feat_rowid_list = ibs.dbcache.add_cleanly(const.FEATURE_TABLE, colnames, dirty_params_iter, get_rowid_from_superkey)
        CHUNKED_ADD = True
        if CHUNKED_ADD:
            chunksize = 128
            print('[add_chip_feat] adding to sql in chunks with chunksize=%r' % (chunksize,))
            for dirty_params_chunk in ut.ichunks(dirty_params_iter, chunksize=chunksize):
                print('[add_chip_feat] adding feature chunk to sql')
                nInput = len(dirty_params_chunk)
                ibs.dbcache._add(
                    const.FEATURE_TABLE, colnames, dirty_params_chunk, nInput=nInput)
        else:
            nInput = num_dirty
            ibs.dbcache._add(
                const.FEATURE_TABLE, colnames, dirty_params_iter, nInput=nInput)

        #ibs.dbcache._add(const.FEATURE_TABLE, colnames, dirty_params_iter)
        # Now that the dirty params are added get the correct order of rowids
        feat_rowid_list = get_rowid_from_superkey(chip_rowid_list)
    else:
        feat_rowid_list = initial_feat_rowid_list
    if return_num_dirty:
        return feat_rowid_list, num_dirty
    return feat_rowid_list
Пример #18
0
def test_model(num_annots, num_names, score_evidence=[], name_evidence=[],
               other_evidence={}, noquery=False, verbose=None,
               **kwargs):
    if verbose is None:
        verbose = ut.VERBOSE

    method = kwargs.pop('method', None)
    model = make_name_model(num_annots, num_names, verbose=verbose, **kwargs)

    if verbose:
        model.print_priors(ignore_ttypes=['match', 'score'])

    model, evidence, soft_evidence = update_model_evidence(
        model, name_evidence, score_evidence, other_evidence)

    if verbose and len(soft_evidence) != 0:
        model.print_priors(ignore_ttypes=['match', 'score'],
                           title='Soft Evidence', color='green')

    #if verbose:
    #    ut.colorprint('\n --- Soft Evidence ---', 'white')
    #    for ttype, cpds in model.ttype2_cpds.items():
    #        if ttype != 'match':
    #            for fs_ in ut.ichunks(cpds, 4):
    #                ut.colorprint(ut.hz_str([f._cpdstr('psql') for f in fs_]),
    #                              'green')

    if verbose:
        ut.colorprint('\n --- Inference ---', 'red')

    if (len(evidence) > 0 or len(soft_evidence) > 0) and not noquery:
        evidence = model._ensure_internal_evidence(evidence)
        query_vars = []
        query_vars += ut.list_getattr(model.ttype2_cpds['name'], 'variable')
        #query_vars += ut.list_getattr(model.ttype2_cpds['match'], 'variable')
        query_vars = ut.setdiff(query_vars, evidence.keys())
        #query_vars = ut.setdiff(query_vars, soft_evidence.keys())
        query_results = cluster_query(model, query_vars, evidence,
                                      soft_evidence, method)
    else:
        query_results = {}

    factor_list = query_results['factor_list']

    if verbose:
        if verbose:
            print('+--------')
        semtypes = [model.var2_cpd[f.variables[0]].ttype
                    for f in factor_list]
        for type_, factors in ut.group_items(factor_list, semtypes).items():
            print('Result Factors (%r)' % (type_,))
            factors = ut.sortedby(factors, [f.variables[0] for f in factors])
            for fs_ in ut.ichunks(factors, 4):
                ut.colorprint(ut.hz_str([f._str('phi', 'psql') for f in fs_]),
                              'yellow')
        print('MAP assignments')
        top_assignments = query_results.get('top_assignments', [])
        tmp = []
        for lbl, val in top_assignments:
            tmp.append('%s : %.4f' % (ut.repr2(lbl), val))
        print(ut.align('\n'.join(tmp), ' :'))
        print('L_____\n')

    showkw = dict(evidence=evidence,
                  soft_evidence=soft_evidence,
                  **query_results)

    pgm_viz.show_model(model, **showkw)
    return (model, evidence, query_results)
Пример #19
0
 def print_priors(model, ignore_ttypes=[], title='Priors', color='darkblue'):
     ut.colorprint('\n --- %s ---' % (title,), color=color)
     for ttype, cpds in model.ttype2_cpds.items():
         if ttype not in ignore_ttypes:
             for fs_ in ut.ichunks(cpds, 4):
                 ut.colorprint(ut.hz_str([f._cpdstr('psql') for f in fs_]), color)
Пример #20
0
def check_database_overlap(ibs1, ibs2):
    """
    CommandLine:
        python -m wbia.other.dbinfo --test-get_dbinfo:1 --db PZ_MTEST
        dev.py -t listdbs
        python -m wbia.dbio.export_subset check_database_overlap
        --db PZ_MTEST --db2 PZ_MOTHERS

    CommandLine:
        python -m wbia.dbio.export_subset check_database_overlap

        python -m wbia.dbio.export_subset check_database_overlap --db1=PZ_MTEST --db2=PZ_Master0  # NOQA
        python -m wbia.dbio.export_subset check_database_overlap --db1=NNP_Master3 --db2=PZ_Master0  # NOQA

        python -m wbia.dbio.export_subset check_database_overlap --db1=GZ_Master0 --db2=GZ_ALL
        python -m wbia.dbio.export_subset check_database_overlap --db1=GZ_ALL --db2=lewa_grevys

        python -m wbia.dbio.export_subset check_database_overlap --db1=PZ_FlankHack --db2=PZ_Master1
        python -m wbia.dbio.export_subset check_database_overlap --db1=PZ_PB_RF_TRAIN --db2=PZ_Master1


    Example:
        >>> # SCRIPT
        >>> from wbia.dbio.export_subset import *  # NOQA
        >>> import wbia
        >>> import utool as ut
        >>> #ibs1 = wbia.opendb(db='PZ_Master0')
        >>> #ibs2 = wbia.opendb(dbdir='/raid/work2/Turk/PZ_Master')
        >>> db1 = ut.get_argval('--db1', str, default='PZ_MTEST')
        >>> db2 = ut.get_argval('--db2', str, default='testdb1')
        >>> dbdir1 = ut.get_argval('--dbdir1', str, default=None)
        >>> dbdir2 = ut.get_argval('--dbdir2', str, default=None)
        >>> ibs1 = wbia.opendb(db=db1, dbdir=dbdir1)
        >>> ibs2 = wbia.opendb(db=db2, dbdir=dbdir2)
        >>> check_database_overlap(ibs1, ibs2)
    """
    import numpy as np

    def print_isect(items1, items2, lbl=''):
        set1_ = set(items1)
        set2_ = set(items2)
        items_isect = set1_.intersection(set2_)
        fmtkw1 = dict(
            part=1,
            lbl=lbl,
            num=len(set1_),
            num_isect=len(items_isect),
            percent=100 * len(items_isect) / len(set1_),
        )
        fmtkw2 = dict(
            part=2,
            lbl=lbl,
            num=len(set2_),
            num_isect=len(items_isect),
            percent=100 * len(items_isect) / len(set2_),
        )
        fmt_a = '  * Num {lbl} {part}: {num_isect} / {num} = {percent:.2f}%'
        # fmt_b = '  * Num {lbl} isect: {num}'
        logger.info('Checking {lbl} intersection'.format(lbl=lbl))
        logger.info(fmt_a.format(**fmtkw1))
        logger.info(fmt_a.format(**fmtkw2))
        # logger.info(fmt_b.format(lbl=lbl, num=len(items_isect)))
        # items = items_isect
        # list_ = items1
        x_list1 = ut.find_list_indexes(items1, items_isect)
        x_list2 = ut.find_list_indexes(items2, items_isect)
        return x_list1, x_list2

    gids1 = ibs1.images()
    gids2 = ibs2.images()

    # Find common images
    # items1, items2, lbl, = gids1.uuids, gids2.uuids, 'images'
    gx_list1, gx_list2 = print_isect(gids1.uuids, gids2.uuids, 'images')
    gids_isect1 = gids1.take(gx_list1)
    gids_isect2 = gids2.take(gx_list2)
    assert gids_isect2.uuids == gids_isect1.uuids, 'sequence must be aligned'

    SHOW_ISECT_GIDS = False
    if SHOW_ISECT_GIDS:
        if len(gx_list1) > 0:
            logger.info('gids_isect1 = %r' % (gids_isect1, ))
            logger.info('gids_isect2 = %r' % (gids_isect2, ))
            if False:
                # Debug code
                import wbia.viz
                import wbia.plottool as pt

                gid_pairs = list(zip(gids_isect1, gids_isect2))
                pairs_iter = ut.ichunks(gid_pairs, chunksize=8)
                for fnum, pairs in enumerate(pairs_iter, start=1):
                    pnum_ = pt.make_pnum_nextgen(nRows=len(pairs), nCols=2)
                    for gid1, gid2 in pairs:
                        wbia.viz.show_image(ibs1,
                                            gid1,
                                            pnum=pnum_(),
                                            fnum=fnum)
                        wbia.viz.show_image(ibs2,
                                            gid2,
                                            pnum=pnum_(),
                                            fnum=fnum)

    # if False:
    #     aids1 = ibs1.get_valid_aids()
    #     aids2 = ibs2.get_valid_aids()
    #     ibs1.update_annot_visual_uuids(aids1)
    #     ibs2.update_annot_visual_uuids(aids2)
    #     ibs1.update_annot_semantic_uuids(aids1)
    #     ibs2.update_annot_semantic_uuids(aids2)

    # Check to see which intersecting images have different annotations
    image_aids_isect1 = gids_isect1.aids
    image_aids_isect2 = gids_isect2.aids
    image_avuuids_isect1 = np.array(
        ibs1.unflat_map(ibs1.get_annot_visual_uuids, image_aids_isect1))
    image_avuuids_isect2 = np.array(
        ibs2.unflat_map(ibs2.get_annot_visual_uuids, image_aids_isect2))
    changed_image_xs = np.nonzero(
        image_avuuids_isect1 != image_avuuids_isect2)[0]
    if len(changed_image_xs) > 0:
        logger.info(
            'There are %d images with changes in annotation visual information'
            % (len(changed_image_xs), ))
        changed_gids1 = ut.take(gids_isect1, changed_image_xs)
        changed_gids2 = ut.take(gids_isect2, changed_image_xs)

        SHOW_CHANGED_GIDS = False
        if SHOW_CHANGED_GIDS:
            logger.info('gids_isect1 = %r' % (changed_gids2, ))
            logger.info('gids_isect2 = %r' % (changed_gids1, ))
            # if False:
            #     # Debug code
            #     import wbia.viz
            #     import wbia.plottool as pt
            #     gid_pairs = list(zip(changed_gids1, changed_gids2))
            #     pairs_iter = ut.ichunks(gid_pairs, chunksize=8)
            #     for fnum, pairs in enumerate(pairs_iter, start=1):
            #         pnum_ = pt.make_pnum_nextgen(nRows=len(pairs), nCols=2)
            #         for gid1, gid2 in pairs:
            #             wbia.viz.show_image(
            #                 ibs1, gid1, pnum=pnum_(), fnum=fnum)
            #             wbia.viz.show_image(
            #                 ibs2, gid2, pnum=pnum_(), fnum=fnum)

    # Check for overlapping annotations (visual info only) in general
    aids1 = ibs1.annots()
    aids2 = ibs2.annots()

    # Check for overlapping annotations (visual + semantic info) in general
    aux_list1, aux_list2 = print_isect(aids1.uuids, aids2.uuids, 'uuids')
    avx_list1, avx_list2 = print_isect(aids1.visual_uuids, aids2.visual_uuids,
                                       'vuuids')
    asx_list1, asx_list2 = print_isect(aids1.semantic_uuids,
                                       aids2.semantic_uuids, 'suuids')

    # Check which images with the same visual uuids have different semantic
    # uuids
    changed_ax_list1 = ut.setdiff_ordered(avx_list1, asx_list1)
    changed_ax_list2 = ut.setdiff_ordered(avx_list2, asx_list2)
    assert len(changed_ax_list1) == len(changed_ax_list2)
    assert ut.take(aids1.visual_uuids,
                   changed_ax_list1) == ut.take(aids2.visual_uuids,
                                                changed_ax_list2)

    changed_aids1 = np.array(ut.take(aids1, changed_ax_list1))
    changed_aids2 = np.array(ut.take(aids2, changed_ax_list2))

    changed_sinfo1 = ibs1.get_annot_semantic_uuid_info(changed_aids1)
    changed_sinfo2 = ibs2.get_annot_semantic_uuid_info(changed_aids2)
    sinfo1_arr = np.array(changed_sinfo1)
    sinfo2_arr = np.array(changed_sinfo2)
    is_semantic_diff = sinfo2_arr != sinfo1_arr
    # Inspect semantic differences
    if np.any(is_semantic_diff):
        colxs, rowxs = np.nonzero(is_semantic_diff)
        colx2_rowids = ut.group_items(rowxs, colxs)
        prop2_rowids = ut.map_dict_keys(changed_sinfo1._fields.__getitem__,
                                        colx2_rowids)
        logger.info('changed_value_counts = ' +
                    ut.repr2(ut.map_dict_vals(len, prop2_rowids)))
        yawx = changed_sinfo1._fields.index('yaw')

        # Show change in viewpoints
        if len(colx2_rowids[yawx]) > 0:
            vp_category_diff = ibsfuncs.viewpoint_diff(
                sinfo1_arr[yawx], sinfo2_arr[yawx]).astype(np.float)
            # Look for category changes
            # any_diff = np.floor(vp_category_diff) > 0
            # _xs    = np.nonzero(any_diff)[0]
            # _aids1 = changed_aids1.take(_xs)
            # _aids2 = changed_aids2.take(_xs)
            # Look for significant changes
            is_significant_diff = np.floor(vp_category_diff) > 1
            significant_xs = np.nonzero(is_significant_diff)[0]
            significant_aids1 = changed_aids1.take(significant_xs)
            significant_aids2 = changed_aids2.take(significant_xs)
            logger.info('There are %d significant viewpoint changes' %
                        (len(significant_aids2), ))
            # vt.ori_distance(sinfo1_arr[yawx], sinfo2_arr[yawx])
            # zip(ibs1.get_annot_viewpoint_code(significant_aids1),
            # ibs2.get_annot_viewpoint_code(significant_aids2))
            # logger.info('yawdiff = %r' % )
            # if False:
            # Hack: Apply fixes
            # good_yaws = ibs2.get_annot_yaws(significant_aids2)
            # ibs1.set_annot_yaws(significant_aids1, good_yaws)
            #    pass
            if False:
                # Debug code
                import wbia.viz
                import wbia.plottool as pt

                # aid_pairs = list(zip(_aids1, _aids2))
                aid_pairs = list(zip(significant_aids1, significant_aids2))
                pairs_iter = ut.ichunks(aid_pairs, chunksize=8)
                for fnum, pairs in enumerate(pairs_iter, start=1):
                    pnum_ = pt.make_pnum_nextgen(nRows=len(pairs), nCols=2)
                    for aid1, aid2 in pairs:
                        wbia.viz.show_chip(
                            ibs1,
                            aid1,
                            pnum=pnum_(),
                            fnum=fnum,
                            show_viewcode=True,
                            nokpts=True,
                        )
                        wbia.viz.show_chip(
                            ibs2,
                            aid2,
                            pnum=pnum_(),
                            fnum=fnum,
                            show_viewcode=True,
                            nokpts=True,
                        )

    #
    nAnnots_per_image1 = np.array(ibs1.get_image_num_annotations(gids1))
    nAnnots_per_image2 = np.array(ibs2.get_image_num_annotations(gids2))
    #
    images_without_annots1 = sum(nAnnots_per_image1 == 0)
    images_without_annots2 = sum(nAnnots_per_image2 == 0)
    logger.info('images_without_annots1 = %r' % (images_without_annots1, ))
    logger.info('images_without_annots2 = %r' % (images_without_annots2, ))

    nAnnots_per_image1
Пример #21
0
def get_sift_collection(sift,
                        aff=None,
                        bin_color=BLACK,
                        arm1_color=RED,
                        arm2_color=BLACK,
                        arm_alpha=1.0,
                        arm1_lw=1.0,
                        arm2_lw=2.0,
                        circ_alpha=.5,
                        **kwargs):
    """
    Creates a collection of SIFT matplotlib patches

    get_sift_collection

    Args:
        sift (?):
        aff (None):
        bin_color (ndarray):
        arm1_color (ndarray):
        arm2_color (ndarray):
        arm_alpha (float):
        arm1_lw (float):
        arm2_lw (float):
        circ_alpha (float):

    Returns:
        ?: coll_tup

    CommandLine:
        python -m plottool.mpl_sift --test-get_sift_collection

    Example:
        >>> from plottool.mpl_sift import *  # NOQA
        >>> sift = testdata_sifts()[0]
        >>> aff = None
        >>> bin_color = array([ 0.,  0.,  0.,  1.])
        >>> arm1_color = array([ 1.,  0.,  0.,  1.])
        >>> arm2_color = array([ 0.,  0.,  0.,  1.])
        >>> arm_alpha = 1.0
        >>> arm1_lw = 0.5
        >>> arm2_lw = 1.0
        >>> circ_alpha = 0.5
        >>> coll_tup = get_sift_collection(sift, aff, bin_color, arm1_color, arm2_color, arm_alpha, arm1_lw, arm2_lw, circ_alpha)
        >>> print(coll_tup)
    """
    # global offset scale adjustments
    if aff is None:
        aff = mpl.transforms.Affine2D()
    MULTI_COLORED_ARMS = kwargs.pop('multicolored_arms', False)
    _kwarm = kwargs.copy()
    _kwarm.update(
        dict(head_width=1e-10,
             length_includes_head=False,
             transform=aff,
             color=[1, 1, 0]))
    _kwcirc = dict(transform=aff)
    arm_patches = []
    DSCALE = 0.25  # Descriptor scale factor
    ARMSCALE = 1.5  # Arm length scale factor
    XYSCALE = 0.5  # Position scale factor
    XYOFFST = -0.75  # Position offset
    NORI, NX, NY = 8, 4, 4  # SIFT BIN CONSTANTS
    NBINS = NX * NY
    discrete_ori = (np.arange(0, NORI) * (TAU / NORI))
    # Arm magnitude and orientations
    arm_mag = sift / 255.0
    arm_ori = np.tile(discrete_ori, (NBINS, 1)).flatten()
    # Arm orientation in dxdy format
    arm_dxy = np.array(list(zip(*_cirlce_rad2xy(arm_ori, arm_mag))))
    # Arm locations and dxdy index
    yxt_gen = iprod(range(NY), range(NX), range(NORI))
    # Circle x,y locations
    yx_gen = iprod(range(NY), range(NX))
    # Draw 8 directional arms in each of the 4x4 grid cells
    arm_args_list = []

    for y, x, t in yxt_gen:
        #print('y=%r, x=%r, t=%r' % (y, x, t))
        index = (y * NX * NORI) + (x * NORI) + (t)
        (dx, dy) = arm_dxy[index]
        arm_x = (x * XYSCALE) + XYOFFST  # MULTIPLY BY -1 to invert X axis
        arm_y = (y * XYSCALE) + XYOFFST
        arm_dy = (dy * DSCALE) * ARMSCALE
        arm_dx = (dx * DSCALE) * ARMSCALE
        _args = [arm_x, arm_y, arm_dx, arm_dy]
        arm_args_list.append(_args)

    for _args in arm_args_list:
        arm_patch = mpl.patches.FancyArrow(*_args, **_kwarm)
        arm_patches.append(arm_patch)

    #print('len(arm_patches) = %r' % (len(arm_patches),))
    # Draw circles around each of the 4x4 grid cells
    circle_patches = []
    for y, x in yx_gen:
        circ_xy = (x * XYSCALE + XYOFFST, y * XYSCALE + XYOFFST)
        circ_radius = DSCALE
        circle_patches += [mpl.patches.Circle(circ_xy, circ_radius, **_kwcirc)]

    circ_coll = _circl_collection(circle_patches, bin_color, circ_alpha)
    arm2_coll = _arm_collection(arm_patches, arm2_color, arm_alpha, arm2_lw)

    if MULTI_COLORED_ARMS:
        # Hack in same colorscheme for arms as the sift bars
        ori_colors = color_fns.distinct_colors(16)
        coll_tup = [circ_coll, arm2_coll]
        coll_tup += [
            _arm_collection(_, color, arm_alpha, arm1_lw)
            for _, color in zip(ut.ichunks(arm_patches, 8), ori_colors)
        ]
        coll_tup = tuple(coll_tup)
    else:
        # Just use a single color for all the arms
        arm1_coll = _arm_collection(arm_patches, arm1_color, arm_alpha,
                                    arm1_lw)
        coll_tup = (circ_coll, arm2_coll, arm1_coll)
    return coll_tup
Пример #22
0
def test_siamese_performance(model, data, labels, flat_metadata, dataname=''):
    r"""
    CommandLine:
        utprof.py -m ibeis_cnn --tf pz_patchmatch --db liberty --test --weights=liberty:current --arch=siaml2_128 --test
        python -m ibeis_cnn --tf netrun --db liberty --arch=siaml2_128 --test  --ensure
        python -m ibeis_cnn --tf netrun --db liberty --arch=siaml2_128 --test  --ensure --weights=new
        python -m ibeis_cnn --tf netrun --db liberty --arch=siaml2_128 --train --weights=new
        python -m ibeis_cnn --tf netrun --db pzmtest --weights=liberty:current --arch=siaml2_128 --test  # NOQA
        python -m ibeis_cnn --tf netrun --db pzmtest --weights=liberty:current --arch=siaml2_128
    """
    import vtool as vt
    import plottool as pt

    # TODO: save in model.trainind_dpath/diagnostics/figures
    ut.colorprint('\n[siam_perf] Testing Siamese Performance', 'white')
    #epoch_dpath = model.get_epoch_diagnostic_dpath()
    epoch_dpath = model.arch_dpath
    ut.vd(epoch_dpath)

    dataname += ' ' + model.get_history_hashid() + '\n'

    history_text = ut.list_str(model.era_history, newlines=True)

    ut.write_to(ut.unixjoin(epoch_dpath, 'era_history.txt'), history_text)

    #if True:
    #    import matplotlib as mpl
    #    mpl.rcParams['agg.path.chunksize'] = 100000

    #data   = data[::50]
    #labels = labels[::50]
    #from ibeis_cnn import utils
    #data, labels = utils.random_xy_sample(data, labels, 10000, model.data_per_label_input)

    FULL = not ut.get_argflag('--quick')

    fnum_gen = pt.make_fnum_nextgen()

    ut.colorprint('[siam_perf] Show era history', 'white')
    fig = model.show_era_loss(fnum=fnum_gen())
    pt.save_figure(fig=fig, dpath=epoch_dpath, dpi=180)

    # hack
    ut.colorprint('[siam_perf] Show weights image', 'white')
    fig = model.show_weights_image(fnum=fnum_gen())
    pt.save_figure(fig=fig, dpath=epoch_dpath, dpi=180)
    #model.draw_all_conv_layer_weights(fnum=fnum_gen())
    #model.imwrite_weights(1)
    #model.imwrite_weights(2)

    # Compute each type of score
    ut.colorprint('[siam_perf] Building Scores', 'white')
    test_outputs = model.predict2(model, data)
    network_output = test_outputs['network_output_determ']
    # hack converting network output to distances for non-descriptor networks
    if len(network_output.shape) == 2 and network_output.shape[1] == 1:
        cnn_scores = network_output.T[0]
    elif len(network_output.shape) == 1:
        cnn_scores = network_output
    elif len(network_output.shape) == 2 and network_output.shape[1] > 1:
        assert model.data_per_label_output == 2
        vecs1 = network_output[0::2]
        vecs2 = network_output[1::2]
        cnn_scores = vt.L2(vecs1, vecs2)
    else:
        assert False
    cnn_scores = cnn_scores.astype(np.float64)

    # Segfaults with the data passed in is large (AND MEMMAPPED apparently)
    # Fixed in hesaff implementation
    SIFT = FULL
    if SIFT:
        sift_scores, sift_list = test_sift_patchmatch_scores(data, labels)
        sift_scores = sift_scores.astype(np.float64)

    ut.colorprint('[siam_perf] Learning Encoders', 'white')
    # Learn encoders
    encoder_kw = {
        #'monotonize': False,
        'monotonize': True,
    }
    cnn_encoder = vt.ScoreNormalizer(**encoder_kw)
    cnn_encoder.fit(cnn_scores, labels)

    if SIFT:
        sift_encoder = vt.ScoreNormalizer(**encoder_kw)
        sift_encoder.fit(sift_scores, labels)

    # Visualize
    ut.colorprint('[siam_perf] Visualize Encoders', 'white')
    viz_kw = dict(
        with_scores=False,
        with_postbayes=False,
        with_prebayes=False,
        target_tpr=.95,
    )
    inter_cnn = cnn_encoder.visualize(
        figtitle=dataname + ' CNN scores. #data=' + str(len(data)),
        fnum=fnum_gen(), **viz_kw)
    if SIFT:
        inter_sift = sift_encoder.visualize(
            figtitle=dataname + ' SIFT scores. #data=' + str(len(data)),
            fnum=fnum_gen(), **viz_kw)

    # Save
    pt.save_figure(fig=inter_cnn.fig, dpath=epoch_dpath)
    if SIFT:
        pt.save_figure(fig=inter_sift.fig, dpath=epoch_dpath)

    # Save out examples of hard errors
    #cnn_fp_label_indicies, cnn_fn_label_indicies =
    #cnn_encoder.get_error_indicies(cnn_scores, labels)
    #sift_fp_label_indicies, sift_fn_label_indicies =
    #sift_encoder.get_error_indicies(sift_scores, labels)

    with_patch_examples = FULL
    if with_patch_examples:
        ut.colorprint('[siam_perf] Visualize Confusion Examples', 'white')
        cnn_indicies = cnn_encoder.get_confusion_indicies(cnn_scores, labels)
        if SIFT:
            sift_indicies = sift_encoder.get_confusion_indicies(sift_scores, labels)

        warped_patch1_list, warped_patch2_list = list(zip(*ut.ichunks(data, 2)))
        samp_args = (warped_patch1_list, warped_patch2_list, labels)
        _sample = functools.partial(draw_results.get_patch_sample_img, *samp_args)

        cnn_fp_img = _sample({'fs': cnn_scores}, cnn_indicies.fp)[0]
        cnn_fn_img = _sample({'fs': cnn_scores}, cnn_indicies.fn)[0]
        cnn_tp_img = _sample({'fs': cnn_scores}, cnn_indicies.tp)[0]
        cnn_tn_img = _sample({'fs': cnn_scores}, cnn_indicies.tn)[0]

        if SIFT:
            sift_fp_img = _sample({'fs': sift_scores}, sift_indicies.fp)[0]
            sift_fn_img = _sample({'fs': sift_scores}, sift_indicies.fn)[0]
            sift_tp_img = _sample({'fs': sift_scores}, sift_indicies.tp)[0]
            sift_tn_img = _sample({'fs': sift_scores}, sift_indicies.tn)[0]

        #if ut.show_was_requested():
        #def rectify(arr):
        #    return np.flipud(arr)
        SINGLE_FIG = False
        if SINGLE_FIG:
            def dump_img(img_, lbl, fnum):
                fig, ax = pt.imshow(img_, figtitle=dataname + ' ' + lbl, fnum=fnum)
                pt.save_figure(fig=fig, dpath=epoch_dpath, dpi=180)
            dump_img(cnn_fp_img, 'cnn_fp_img', fnum_gen())
            dump_img(cnn_fn_img, 'cnn_fn_img', fnum_gen())
            dump_img(cnn_tp_img, 'cnn_tp_img', fnum_gen())
            dump_img(cnn_tn_img, 'cnn_tn_img', fnum_gen())

            dump_img(sift_fp_img, 'sift_fp_img', fnum_gen())
            dump_img(sift_fn_img, 'sift_fn_img', fnum_gen())
            dump_img(sift_tp_img, 'sift_tp_img', fnum_gen())
            dump_img(sift_tn_img, 'sift_tn_img', fnum_gen())
            #vt.imwrite(dataname + '_' + 'cnn_fp_img.png', (cnn_fp_img))
            #vt.imwrite(dataname + '_' + 'cnn_fn_img.png', (cnn_fn_img))
            #vt.imwrite(dataname + '_' + 'sift_fp_img.png', (sift_fp_img))
            #vt.imwrite(dataname + '_' + 'sift_fn_img.png', (sift_fn_img))
        else:
            print('Drawing TP FP TN FN')
            fnum = fnum_gen()
            pnum_gen = pt.make_pnum_nextgen(4, 2)
            fig = pt.figure(fnum)
            pt.imshow(cnn_fp_img,  title='CNN FP',  fnum=fnum, pnum=pnum_gen())
            pt.imshow(sift_fp_img, title='SIFT FP', fnum=fnum, pnum=pnum_gen())
            pt.imshow(cnn_fn_img,  title='CNN FN',  fnum=fnum, pnum=pnum_gen())
            pt.imshow(sift_fn_img, title='SIFT FN', fnum=fnum, pnum=pnum_gen())
            pt.imshow(cnn_tp_img,  title='CNN TP',  fnum=fnum, pnum=pnum_gen())
            pt.imshow(sift_tp_img, title='SIFT TP', fnum=fnum, pnum=pnum_gen())
            pt.imshow(cnn_tn_img,  title='CNN TN',  fnum=fnum, pnum=pnum_gen())
            pt.imshow(sift_tn_img, title='SIFT TN', fnum=fnum, pnum=pnum_gen())
            pt.set_figtitle(dataname + ' confusions')
            pt.adjust_subplots(left=0, right=1.0, bottom=0., wspace=.01, hspace=.05)
            pt.save_figure(fig=fig, dpath=epoch_dpath, dpi=180, figsize=(9, 18))

    with_patch_desc = FULL
    if with_patch_desc:
        ut.colorprint('[siam_perf] Visualize Patch Descriptors', 'white')
        fnum = fnum_gen()
        fig = pt.figure(fnum=fnum, pnum=(1, 1, 1))
        num_rows = 7
        pnum_gen = pt.make_pnum_nextgen(num_rows, 3)
        # Compare actual output descriptors
        for index in ut.random_indexes(len(sift_list), num_rows):
            vec_sift = sift_list[index]
            vec_cnn = network_output[index]
            patch = data[index]
            pt.imshow(patch, fnum=fnum, pnum=pnum_gen())
            pt.plot_descriptor_signature(vec_cnn, 'cnn vec',  fnum=fnum, pnum=pnum_gen())
            pt.plot_sift_signature(vec_sift, 'sift vec',  fnum=fnum, pnum=pnum_gen())
        pt.set_figtitle('Patch Descriptors')
        pt.adjust_subplots(left=0, right=0.95, bottom=0., wspace=.1, hspace=.15)
        pt.save_figure(fig=fig, dpath=epoch_dpath, dpi=180, figsize=(9, 18))
Пример #23
0
    def add_rows_from_parent(table, parent_rowids, config=None, verbose=True,
                             return_num_dirty=False):
        """
        Lazy addition
        """
        try:
            # Get requested configuration id
            config_rowid = table.get_config_rowid(config)
            # Find leaf rowids that need to be computed
            initial_rowid_list = table._get_rowid_from_superkey(parent_rowids,
                                                                config=config)
            # Get corresponding "dirty" parent rowids
            isdirty_list = ut.flag_None_items(initial_rowid_list)
            dirty_parent_rowids = ut.compress(parent_rowids, isdirty_list)
            num_dirty = len(dirty_parent_rowids)
            num_total = len(parent_rowids)
            if num_dirty > 0:
                if verbose:
                    fmtstr = 'adding %d / %d new props to %r for config_rowid=%r'
                    print(fmtstr % (num_dirty, num_total, table.tablename,
                                    config_rowid))
                args = zip(*dirty_parent_rowids)
                if table._asobject:
                    # Convinience
                    args = [table.depc.get_obj(parent, rowids)
                            for parent, rowids in zip(table.parents, args)]
                # CALL EXTERNAL PREPROCESSING / GENERATION FUNCTION
                proptup_gen = table.preproc_func(table.depc, *args, config=config)

                #proptup_gen = list(proptup_gen)

                if len(table._nested_idxs) > 0:
                    # TODO: rewrite
                    nested_nCols = len(table.data_colnames)
                    idxs1 = table._nested_idxs
                    mask1 = ut.index_to_boolmask(idxs1, nested_nCols)
                    mask2 = ut.not_list(mask1)
                    idxs2 = ut.where(mask2)
                    def unnest_data(data):
                        unnested_cols = list(zip(ut.take(data, idxs2)))
                        nested_cols = ut.take(data, idxs1)
                        grouped_items = [nested_cols, unnested_cols]
                        groupxs = [idxs1, idxs2]
                        unflat = ut.ungroup(grouped_items, groupxs, nested_nCols - 1)
                        return tuple(ut.flatten(unflat))
                    # Hack when a sql schema has tuples defined in it
                    proptup_gen = (unnest_data(data) for data in proptup_gen)

                #proptup_gen = list(proptup_gen)

                dirty_params_iter = (
                    parent_rowids + (config_rowid,) + data_cols
                    for parent_rowids, data_cols in zip(dirty_parent_rowids, proptup_gen))
                #dirty_params_iter = list(dirty_params_iter)
                #print('dirty_params_iter = %s' % (ut.repr2(dirty_params_iter, nl=1),))
                CHUNKED_ADD = table.chunksize is not None
                if CHUNKED_ADD:
                    for dirty_params_chunk in ut.ichunks(dirty_params_iter,
                                                         chunksize=table.chunksize):
                        table.db._add(table.tablename, table._table_colnames,
                                      dirty_params_chunk,
                                      nInput=len(dirty_params_chunk))
                else:
                    nInput = num_dirty
                    table.db._add(table.tablename, table._table_colnames,
                                  dirty_params_iter, nInput=nInput)
                # Now that the dirty params are added get the correct order of rowids
                rowid_list = table._get_rowid_from_superkey(parent_rowids,
                                                            config=config)
            else:
                rowid_list = initial_rowid_list
            if return_num_dirty:
                return rowid_list, num_dirty
            else:
                return rowid_list
        except Exception as ex:
            ut.printex(ex, 'error in add_rowids', keys=[
                'table', 'parent_rowids', 'config', 'args',
                'dirty_parent_rowids', 'table.preproc_func'])
            raise
Пример #24
0
def get_sift_collection(sift, aff=None, bin_color=BLACK, arm1_color=RED,
                        arm2_color=BLACK, arm_alpha=1.0, arm1_lw=1.0,
                        arm2_lw=2.0, circ_alpha=.5, **kwargs):
    """
    Creates a collection of SIFT matplotlib patches

    get_sift_collection

    Args:
        sift (?):
        aff (None):
        bin_color (ndarray):
        arm1_color (ndarray):
        arm2_color (ndarray):
        arm_alpha (float):
        arm1_lw (float):
        arm2_lw (float):
        circ_alpha (float):

    Returns:
        ?: coll_tup

    CommandLine:
        python -m plottool.mpl_sift --test-get_sift_collection

    Example:
        >>> from plottool.mpl_sift import *  # NOQA
        >>> sift = testdata_sifts()[0]
        >>> aff = None
        >>> bin_color = array([ 0.,  0.,  0.,  1.])
        >>> arm1_color = array([ 1.,  0.,  0.,  1.])
        >>> arm2_color = array([ 0.,  0.,  0.,  1.])
        >>> arm_alpha = 1.0
        >>> arm1_lw = 0.5
        >>> arm2_lw = 1.0
        >>> circ_alpha = 0.5
        >>> coll_tup = get_sift_collection(sift, aff, bin_color, arm1_color, arm2_color, arm_alpha, arm1_lw, arm2_lw, circ_alpha)
        >>> print(coll_tup)
    """
    # global offset scale adjustments
    if aff is None:
        aff = mpl.transforms.Affine2D()
    MULTI_COLORED_ARMS = kwargs.pop('multicolored_arms', False)
    _kwarm = kwargs.copy()
    _kwarm.update(dict(head_width=1e-10, length_includes_head=False, transform=aff, color=[1, 1, 0]))
    _kwcirc = dict(transform=aff)
    arm_patches = []
    DSCALE   =  0.25  # Descriptor scale factor
    ARMSCALE =  1.5   # Arm length scale factor
    XYSCALE  =  0.5   # Position scale factor
    XYOFFST  = -0.75  # Position offset
    NORI, NX, NY = 8, 4, 4  # SIFT BIN CONSTANTS
    NBINS = NX * NY
    discrete_ori = (np.arange(0, NORI) * (TAU / NORI))
    # Arm magnitude and orientations
    arm_mag = sift / 255.0
    arm_ori = np.tile(discrete_ori, (NBINS, 1)).flatten()
    # Arm orientation in dxdy format
    arm_dxy = np.array(list(zip(*_cirlce_rad2xy(arm_ori, arm_mag))))
    # Arm locations and dxdy index
    yxt_gen = iprod(range(NY), range(NX), range(NORI))
    # Circle x,y locations
    yx_gen  = iprod(range(NY), range(NX))
    # Draw 8 directional arms in each of the 4x4 grid cells
    arm_args_list = []

    for y, x, t in yxt_gen:
        #print('y=%r, x=%r, t=%r' % (y, x, t))
        index = (y * NX * NORI) + (x * NORI) + (t)
        (dx, dy) = arm_dxy[index]
        arm_x  = (x * XYSCALE) + XYOFFST  # MULTIPLY BY -1 to invert X axis
        arm_y  = (y * XYSCALE) + XYOFFST
        arm_dy = (dy * DSCALE) * ARMSCALE
        arm_dx = (dx * DSCALE) * ARMSCALE
        _args = [arm_x, arm_y, arm_dx, arm_dy]
        arm_args_list.append(_args)

    for _args in arm_args_list:
        arm_patch = mpl.patches.FancyArrow(*_args, **_kwarm)
        arm_patches.append(arm_patch)

    #print('len(arm_patches) = %r' % (len(arm_patches),))
    # Draw circles around each of the 4x4 grid cells
    circle_patches = []
    for y, x in yx_gen:
        circ_xy = (x * XYSCALE + XYOFFST, y * XYSCALE + XYOFFST)
        circ_radius = DSCALE
        circle_patches += [mpl.patches.Circle(circ_xy, circ_radius, **_kwcirc)]

    circ_coll = _circl_collection(circle_patches,  bin_color, circ_alpha)
    arm2_coll = _arm_collection(arm_patches, arm2_color, arm_alpha, arm2_lw)

    if MULTI_COLORED_ARMS:
        # Hack in same colorscheme for arms as the sift bars
        ori_colors = color_fns.distinct_colors(16)
        coll_tup = [circ_coll, arm2_coll]
        coll_tup += [_arm_collection(_, color, arm_alpha, arm1_lw)
                     for _, color in zip(ut.ichunks(arm_patches, 8), ori_colors)]
        coll_tup = tuple(coll_tup)
    else:
        # Just use a single color for all the arms
        arm1_coll = _arm_collection(arm_patches, arm1_color, arm_alpha, arm1_lw)
        coll_tup = (circ_coll, arm2_coll, arm1_coll)
    return coll_tup
Пример #25
0
def clean_tags():
    zotero = get_libzotero()
    # dict of all zotero items
    # items = zotero.index
    # get sql cursor
    cur = zotero.cur
    if False:
        sorted(ut.util_sqlite.get_tablenames(cur))
        ut.print_database_structure(cur)
        # Debug info about tags table in sql

        # The `tags` table stores all tags
        # The itemTags table stores the association between items and tags
        ut.get_table_columninfo_list(cur, 'fields')
        # ut.get_table_columninfo_list(cur, 'relations')
        ut.get_table_columninfo_list(cur, 'fieldsCombined')

        ut.get_table_columninfo_list(cur, 'itemData')
        ut.get_table_columninfo_list(cur, 'itemDataValues')

        ut.get_table_columninfo_list(cur, 'tags')
        ut.get_table_columninfo_list(cur, 'itemTags')

    import pandas as pd
    pd.options.display.max_colwidth = 40
    pd.options.display.max_rows = 20
    def pandas_sql(table, columns):
        return pd.DataFrame(ut.get_table_rows(cur, table, columns),
                            columns=columns)

    item_df = pandas_sql('items', ('itemID', 'itemTypeID', 'libraryID', 'key')).set_index('itemID', drop=False)
    tags_df = pandas_sql('tags', ('tagID', 'name', 'type', 'libraryID', 'key')).set_index('tagID', drop=False)
    itemData_df = pandas_sql('itemData', ('itemID', 'fieldID', 'valueID'))

    itemTag_df = pandas_sql('itemTags', ('itemID', 'tagID'))

    itemDataValues_df = pandas_sql('itemDataValues', ('valueID', 'value')).set_index('valueID')
    field_df = pandas_sql('fields', ('fieldID', 'fieldName', 'fieldFormatID')).set_index('fieldID')

    itemData_df['value'] = itemDataValues_df['value'].loc[itemData_df['valueID'].values].values
    itemData_df['fieldName'] = field_df['fieldName'].loc[itemData_df['fieldID'].values].values

    titles = itemData_df[itemData_df['fieldName'] == 'title']
    assert len(ut.unique(ut.map_vals(len, titles.groupby('itemID').indices).values())) == 1

    # itemTag_df.groupby('itemID').count()
    # Find how often each tag is used
    tagid_to_count = itemTag_df.groupby('tagID').count()
    tagid_to_count = tagid_to_count.rename(columns={'itemID': 'nItems'})
    tagid_to_count['name'] = tags_df.loc[tagid_to_count.index]['name']
    tagid_to_count = tagid_to_count.sort_values('nItems')

    bad_tags = tagid_to_count[tagid_to_count['nItems'] == 1]

    tagid_to_count['tag_ncharsize'] = tagid_to_count['name'].apply(len)
    tagid_to_count = tagid_to_count.sort_values('tag_ncharsize')
    bad_tags = tagid_to_count[tagid_to_count['tag_ncharsize'] > 25]['name'].values.tolist()

    def clean_tags2():
        api_key = 'fBDBqRPwW9O3mYyNLiksBKZy'
        base_url = 'https://api.zotero.org'
        library_id = '1279414'
        library_type = 'user'
        from pyzotero import zotero
        zot = zotero.Zotero(library_id, library_type, api_key)

        for chunk in ut.ProgChunks(bad_tags, 50):
            zot.delete_tags(*chunk)

    if False:
        api_key = 'fBDBqRPwW9O3mYyNLiksBKZy'
        base_url = 'https://api.zotero.org'
        user_id = '1279414'
        userOrGroupPrefix = '/users/' + user_id
        params = {'v': 3, 'key': api_key}

        items_resp = requests.get(base_url + userOrGroupPrefix + '/items', params=params)
        print(items_resp.content)
        print(items_resp)

        json_tags = []
        get_url = base_url + userOrGroupPrefix + '/tags'
        while True:
            print('get_url = %r' % (get_url,))
            tag_resp = requests.get(get_url, params=params)
            if tag_resp.status_code != 200:
                break
            json_tags.extend(tag_resp.json())
            if 'next' in tag_resp.links:
                get_url = tag_resp.links['next']['url']
            else:
                break

        version_to_tags = ut.ddict(list)
        bad_tags = []
        for tag in ut.ProgIter(json_tags, label='parsing tags'):
            # x = requests.get(tag['links']['self']['href'], params=params)
            if tag['meta']['numItems'] == 1:
                import urllib2
                try:
                    bad_tags.append(urllib2.quote(tag['tag']))
                except Exception as ex:
                    print('cant encode tag=%r' % (tag,))
                    pass

        for chunk in ut.ProgIter(ut.ichunks(bad_tags, 50), length=len(bad_tags) / 50):
            search_url = base_url + userOrGroupPrefix + '/items?tag=' + ' || '.join(chunk)
            r = requests.get(search_url, params=params)
            matching_items = r.json()
            # assert len(matching_items) == 1
            for item in matching_items:
                version = item['version']
            version_to_tags[item['version']].append(tag['tag'])

        # DELETE MULTIPLE TAGS
        import requests
        for chunk in ut.ichunks(bad_tags['name'], 50):
            import urllib2
            encoded_chunk = []
            for t in chunk:
                try:
                    encoded_chunk.append(urllib2.quote(t))
                except Exception:
                    print(t)
            suffix = ' || '.join(encoded_chunk)
            delete_url = base_url + userOrGroupPrefix + '/tags?' + suffix
            print('delete_url = %r' % (delete_url,))
            resp = requests.delete(delete_url, params=params)

        bad_tags = tagid_to_count[tagid_to_count['nItems'] == 1]
        bad_tags['tagID'] = bad_tags.index
        for tagid in bad_tags:
            delete from itemTags where tagID in (select tagID from tags where type=1);
        pass
        for name in k['name'].values.tolist()
    item_df['title'] = titles.set_index('itemID')['value']
    for idx, item in zotero.index.items():
        sql_title = item_df.loc[item.id]['title']
        if item.title != sql_title:
            if pd.isnull(sql_title) and item.title is not None:
                print(item.__dict__)
                print(item_df.loc[item.id])
                print('item.title = %r' % (item.title,))
                print('sql_title = %r' % (sql_title,))
                assert False

    duplicate_tags = [
        (name, idxs) for name, idxs in tags_df.groupby('name', sort=True).indices.items() if len(idxs) > 2
    ]
    tagname_to_tagid = tags_df.groupby('name', sort=True).first()
    new_to_oldtags = {}
    # Determine which tagi to use for each name
    for tagname, idxs in duplicate_tags:
        tags_subdf = tags_df.iloc[idxs]
        mapping = itemTag_df[itemTag_df['tagID'].isin(tags_subdf['tagID'])]
        tag_hist = mapping.groupby('tagID').count()
        best_tagid = tag_hist['itemID'].idxmax()

        new_to_oldtags[best_tagid] = set(tag_hist['itemID'].values) - {best_tagid}

        tagname_to_tagid.loc[tagname] = tags_df.loc[best_tagid]
        # for col in tagname_to_tagid.columns:
        #     tagname_to_tagid.loc[tagname][col] = tags_df.loc[best_tagid][col]
        # tags_df.loc[best_tagid]

    if False:
        # Update tagIds
        for newid, oldids in new_to_oldtags.items():
            for oldid in oldids:
                # cur.execute('SELECT itemID, tagID FROM itemTags WHERE tagID=?', (oldid,))
                import sqlite3
                try:
                    cmd = 'UPDATE itemTags SET tagID=? WHERE tagID=?'
                    args = (newid, oldid)
                    print('(%s) args = %r' % (cmd, args,))
                    cur.execute(cmd, args)
                    print(cur.fetchall())
                except sqlite3.IntegrityError:
                    print('error')
                    pass

    # tags_df.groupby('name', sort=True)

    # itemTag_df.groupby('itemID')
    # duptags = tags_df.iloc[tags_df.groupby('name', sort=True).indices['animals']]
    # duptags['tagID']
    # flags = itemTag_df['tagID'].isin(duptags['tagID'])
    # dup_rel = itemTag_df[flags]
    # item_df['title'].loc[dup_rel['itemID']].values
    # tags_df.iloc[tags_df.groupby('name', sort=True).indices['animals']]

    # tags_df[tags_df['type'] == 1]
    # tags_df[tags_df['type'] == 0]
    # tags_df['libraryID'].unique()
    # tags_df['type'].unique()

    '''
    SELECT
    SELECT FROM itemTags WHERE name in (animals)
    '''

    item_tag_pairs = ut.get_table_rows(cur, 'itemTags', ('itemID', 'tagID'))
    # Group tags by item
    itemid_to_tagids = ut.group_pairs(item_tag_pairs)
    # Group items by tags
    tagid_to_itemids = ut.group_pairs(map(tuple, map(reversed, item_tag_pairs)))

    # mapping from tagid to name
    tagid_to_name = dict(ut.get_table_rows(cur, 'tags', ('tagID', 'name')))

    tagid_freq = list(ut.sort_dict(ut.map_vals(len, tagid_to_itemids), 'vals').items())
    ut.sort_dict(ut.map_vals(sum, ut.group_pairs([(freq, tagid_to_name.get(tagid, tagid)) for tagid, freq in tagid_freq])), 'vals')
    tagname_freq = ut.map_keys(lambda k: tagid_to_name.get(k, k), tagid_freq)
Пример #26
0
 def chunks(self, chunksize):
     for idxs in ut.ichunks(self, range(len(self))):
         yield self.take(idxs)
Пример #27
0
def add_annot_chips(ibs, aid_list, config2_=None, verbose=not ut.QUIET, return_num_dirty=False):
    r"""
    annot.chip.add(aid_list)

    CRITICAL FUNCTION MUST EXIST FOR ALL DEPENDANTS
    Adds / ensures / computes a dependant property

    Args:
         aid_list

    Returns:
        returns chip_rowid_list of added (or already existing chips)

    TemplateInfo:
        python -m ibeis.templates.template_generator --key chip --funcname-filter "\<add_annot_chips\>" --modfname=manual_chip_funcs
        python -m ibeis.templates.template_generator --key chip --modfname=manual_chip_funcs --funcname-filter "\<add_annot_chip"

        Tadder_pl_dependant
        parent = annot
        leaf = chip

    CommandLine:
        python -m ibeis.control.manual_chip_funcs --test-add_annot_chips

    RESTful:
        Method: POST
        URL:    /api/annot_chip/

    Example0:
        >>> # ENABLE_DOCTEST
        >>> from ibeis.control.manual_chip_funcs import *  # NOQA
        >>> ibs, config2_ = testdata_ibs()
        >>> aid_list = ibs._get_all_aids()[::3]
        >>> chip_rowid_list = ibs.add_annot_chips(aid_list, config2_=config2_)
        >>> assert len(chip_rowid_list) == len(aid_list)
        >>> ut.assert_all_not_None(chip_rowid_list)

    Example1:
        >>> # ENABLE_DOCTEST
        >>> from ibeis.control.manual_chip_funcs import *  # NOQA
        >>> ibs, config2_ = testdata_ibs()
        >>> aid_list = ibs._get_all_aids()[0:10]
        >>> sub_aid_list1 = aid_list[0:6]
        >>> sub_aid_list2 = aid_list[5:7]
        >>> sub_aid_list3 = aid_list[0:7]
        >>> sub_chip_rowid_list1 = ibs.get_annot_chip_rowids(sub_aid_list1, config2_=config2_, ensure=True)
        >>> ibs.get_annot_chip_rowids(sub_aid_list1, config2_=config2_, ensure=True)
        >>> sub_chip_rowid_list1, num_dirty0 = ibs.add_annot_chips(sub_aid_list1, config2_=config2_, return_num_dirty=True)
        >>> assert num_dirty0 == 0
        >>> ut.assert_all_not_None(sub_chip_rowid_list1)
        >>> ibs.delete_annot_chips(sub_aid_list2)
        >>> #ibs.delete_annot_chip(sub_aid_list2)?
        >>> sub_chip_rowid_list3 = ibs.get_annot_chip_rowids(sub_aid_list3, config2_=config2_, ensure=False)
        >>> # Only the last two should be None
        >>> ut.assert_all_not_None(sub_chip_rowid_list3[0:5], 'sub_chip_rowid_list3[0:5])')
        >>> assert sub_chip_rowid_list3[5:7] == [None, None]
        >>> sub_chip_rowid_list3_ensured, num_dirty1 = ibs.add_annot_chips(sub_aid_list3, config2_=config2_, return_num_dirty=True)
        >>> assert num_dirty1 == 2, 'Only two params should have been computed here'
        >>> ut.assert_all_not_None(sub_chip_rowid_list3_ensured)
    """
    from ibeis.algo.preproc import preproc_chip
    ut.assert_all_not_None(aid_list, ' annot_rowid_list')
    # Get requested configuration id
    config_rowid = ibs.get_chip_config_rowid(config2_=config2_)
    # Find leaf rowids that need to be computed
    initial_chip_rowid_list = get_annot_chip_rowids_(ibs, aid_list, config2_=config2_)
    # Get corresponding "dirty" parent rowids
    isdirty_list = ut.flag_None_items(initial_chip_rowid_list)
    dirty_aid_list = ut.compress(aid_list, isdirty_list)
    num_dirty = len(dirty_aid_list)
    num_total = len(aid_list)
    if num_dirty > 0:
        if verbose:
            fmtstr = '[add_annot_chips] adding %d / %d new chip for config_rowid=%r'
            print(fmtstr % (num_dirty, num_total, config_rowid))
        # Dependant columns do not need true from_superkey getters.
        # We can use the Tgetter_pl_dependant_rowids_ instead
        get_rowid_from_superkey = functools.partial(
            ibs.get_annot_chip_rowids_, config2_=config2_)
        proptup_gen = preproc_chip.generate_chip_properties(ibs, dirty_aid_list, config2_=config2_)
        dirty_params_iter = (
            (aid, config_rowid, chip_uri, chip_width, chip_height)
            for aid, (chip_uri, chip_width, chip_height,) in
            zip(dirty_aid_list, proptup_gen)
        )
        colnames = ['annot_rowid', 'config_rowid',
                    'chip_uri', 'chip_width', 'chip_height']
        #chip_rowid_list = ibs.dbcache.add_cleanly(const.CHIP_TABLE, colnames, dirty_params_iter, get_rowid_from_superkey)
        CHUNKED_ADD = True
        if CHUNKED_ADD:
            chunksize = 32 if ut.WIN32 else 128
            for dirty_params_chunk in ut.ichunks(dirty_params_iter, chunksize=chunksize):
                nInput = len(dirty_params_chunk)
                ibs.dbcache._add(
                    const.CHIP_TABLE, colnames, dirty_params_chunk, nInput=nInput)
        else:
            nInput = num_dirty
            ibs.dbcache._add(
                const.CHIP_TABLE, colnames, dirty_params_iter, nInput=nInput)
        # Now that the dirty params are added get the correct order of rowids
        chip_rowid_list = get_rowid_from_superkey(aid_list)
    else:
        chip_rowid_list = initial_chip_rowid_list
    if return_num_dirty:
        return chip_rowid_list, num_dirty
    return chip_rowid_list