Пример #1
0
        def get_match_thumbtup(ibs, qaid2_cm, qaids, daids, index, qreq_=None,
                               thumbsize=(128, 128), match_thumbtup_cache={}):
            daid = daids[index]
            qaid = qaids[index]
            cm = qaid2_cm[qaid]
            assert cm.qaid == qaid, 'aids do not aggree'

            OLD = False
            if OLD:
                fpath = ensure_match_img(ibs, cm, daid, qreq_=qreq_,
                                         match_thumbtup_cache=match_thumbtup_cache)
                if isinstance(thumbsize, int):
                    thumbsize = (thumbsize, thumbsize)
                thumbtup = (ut.augpath(fpath, 'thumb_%d,%d' % thumbsize), fpath, thumbsize,
                            [], [])
                return thumbtup
            else:
                # Hacky new way of drawing
                fpath, func, func2 = make_ensure_match_img_nosql_func(qreq_, cm, daid)
                #match_thumbdir = ibs.get_match_thumbdir()
                #match_thumb_fname = get_match_thumb_fname(cm, daid, qreq_)
                #fpath = ut.unixjoin(match_thumbdir, match_thumb_fname)
                thumbdat = {
                    'fpath': fpath,
                    'thread_func': func,
                    'main_func': func2,
                    #'args': (ibs, cm, daid),
                    #'kwargs': dict(qreq_=qreq_,
                    #               match_thumbtup_cache=match_thumbtup_cache)
                }
                return thumbdat
Пример #2
0
    def outline():
        """
        ./texfix.py --fpaths chapter4-application.tex --outline --asmarkdown --numlines=999 -w --ignoreinputstartswith=def,Crall,header,colordef,figdef
        """
        fpaths = testdata_fpaths()
        print('fpaths = %r' % (fpaths, ))

        for fpath in fpaths:
            text = ut.readfrom(fpath)
            root = latex_parser.LatexDocPart.parse_text(text, debug=None)

            # HACK
            new_text = '\n'.join(root.reformat_blocks(debug=None))
            # remove trailing spaces
            new_text = re.sub(' *$', '', new_text, flags=re.MULTILINE)
            # remove double newlines
            new_text = re.sub('(\n *)+\n+',
                              '\n\n',
                              new_text,
                              flags=re.MULTILINE)

            document = root.find_descendant_type('document')
            #document = root.find_descendant_type('section', pat='Identification')
            print('document = %r' % (document, ))
            if document is not None:
                root = document

            sectionpat = ut.get_argval('--section', default=None)
            if sectionpat is not None:
                root = root.find_descendant_type('section', pat=sectionpat)
                print('root = %r' % (root, ))
                if root is None:
                    # import utool
                    # utool.embed()
                    raise Exception('section %r does not exist' % (sectionpat))
            #print(root.get_debug_tree_text())

            #ut.colorprint(root.summary_str(outline=True), 'yellow')
            print('---outline---')
            outline = True
            # outline = False
            outline_text = root.summary_str(outline=outline, highlight=False)
            summary = root.summary_str(outline=outline, highlight=True)
            if not ut.get_argflag('-w'):
                print(summary)
            print('---/outline---')
            if root._config['asmarkdown']:
                codetype = 'markdown'
                newext = '.md'
            else:
                codetype = 'latex'
                newext = None

            ut.dump_autogen_code(ut.augpath(fpath,
                                            augpref='outline_',
                                            newext=newext),
                                 outline_text,
                                 codetype=codetype,
                                 fullprint=False)
Пример #3
0
def compress_pdf(pdf_fpath, output_fname=None):
    """ uses ghostscript to write a pdf """
    import utool as ut
    ut.assertpath(pdf_fpath)
    suffix = '_' + ut.get_datestamp(False) + '_compressed'
    print('pdf_fpath = %r' % (pdf_fpath, ))
    output_pdf_fpath = ut.augpath(pdf_fpath, suffix, newfname=output_fname)
    print('output_pdf_fpath = %r' % (output_pdf_fpath, ))
    gs_exe = find_ghostscript_exe()
    cmd_list = (gs_exe, '-sDEVICE=pdfwrite', '-dCompatibilityLevel=1.4',
                '-dNOPAUSE', '-dQUIET', '-dBATCH',
                '-sOutputFile=' + output_pdf_fpath, pdf_fpath)
    ut.cmd(*cmd_list)
    return output_pdf_fpath
Пример #4
0
def resize_imagelist_to_sqrtarea(gpath_list, new_gpath_list=None,
                                 sqrt_area=800, output_dir=None,
                                 checkexists=True,
                                 **kwargs):
    """ Resizes images and yeilds results asynchronously  """
    import vtool as vt
    target_area = sqrt_area ** 2
    # Read image sizes
    gsize_list = [vt.open_image_size(gpath) for gpath in gpath_list]
    # Compute new sizes which preserve aspect ratio
    newsize_list = [vt.ScaleStrat.area(target_area, wh) for wh in gsize_list]
    if new_gpath_list is None:
        # Compute names for the new images if not given
        if output_dir is None:
            # Create an output directory if not specified
            output_dir      = 'resized_sqrtarea%r' % sqrt_area
        ut.ensuredir(output_dir)
        size_suffixs =  ['_' + repr(newsize).replace(' ', '') for newsize in newsize_list]
        from os.path import basename
        old_gnames = [basename(p) for p in gpath_list]
        new_gname_list = [ut.augpath(p, suffix=s)
                          for p, s in zip(old_gnames, size_suffixs)]
        new_gpath_list = [join(output_dir, gname) for gname in new_gname_list]
        new_gpath_list = list(map(ut.unixpath, new_gpath_list))
    assert len(new_gpath_list) == len(gpath_list), 'unequal len'
    assert len(newsize_list) == len(gpath_list), 'unequal len'
    # Evaluate generator
    if checkexists:
        exists_list = list(map(exists, new_gpath_list))
        gpath_list_ = ut.filterfalse_items(gpath_list, exists_list)
        new_gpath_list_ = ut.filterfalse_items(new_gpath_list, exists_list)
        newsize_list_ = ut.filterfalse_items(newsize_list, exists_list)
    else:
        gpath_list_ = gpath_list
        new_gpath_list_ = new_gpath_list
        newsize_list_ = newsize_list
    generator = resize_imagelist_generator(gpath_list_, new_gpath_list_,
                                           newsize_list_, **kwargs)
    for res in generator:
        pass
    #return [res for res in generator]
    return new_gpath_list
Пример #5
0
def compress_pdf(pdf_fpath, output_fname=None):
    """ uses ghostscript to write a pdf """
    import utool as ut
    ut.assertpath(pdf_fpath)
    suffix = '_' + ut.get_datestamp(False) + '_compressed'
    print('pdf_fpath = %r' % (pdf_fpath,))
    output_pdf_fpath = ut.augpath(pdf_fpath, suffix, newfname=output_fname)
    print('output_pdf_fpath = %r' % (output_pdf_fpath,))
    gs_exe = find_ghostscript_exe()
    cmd_list = (
        gs_exe,
        '-sDEVICE=pdfwrite',
        '-dCompatibilityLevel=1.4',
        '-dNOPAUSE',
        '-dQUIET',
        '-dBATCH',
        '-sOutputFile=' + output_pdf_fpath,
        pdf_fpath
    )
    ut.cmd(*cmd_list)
    return output_pdf_fpath
Пример #6
0
def save_pretrained_weights_slice(pretrained_weights,
                                  weights_path,
                                  slice_=slice(None)):
    """
    Used to save a slice of pretrained weights. The doctest will publish a new set of weights

    CommandLine:
        python -m ibeis_cnn.utils --test-save_pretrained_weights_slice --net='vggnet_full' --slice='slice(0,6)'
        python -m ibeis_cnn.utils --test-save_pretrained_weights_slice --net='vggnet_full' --slice='slice(0,30)'
        python -m ibeis_cnn.utils --test-save_pretrained_weights_slice --net='caffenet_full' --slice='slice(0,6)'
        python -m ibeis_cnn.utils --test-save_pretrained_weights_slice --net='caffenet_full' --slice='slice(0,?)'

    Example:
        >>> # DISABLE_DOCTEST
        >>> # Build a new subset of an existing model
        >>> from ibeis_cnn.models import *  # NOQA
        >>> from ibeis_cnn._plugin_grabmodels import ensure_model
        >>> # Get base model weights
        >>> modelname = ut.get_argval('--net', type_=str, default='vggnet_full')
        >>> weights_path = ensure_model(modelname)
        >>> pretrained_weights = ut.load_cPkl(weights_path)
        >>> # Get the slice you want
        >>> slice_str = ut.get_argval('--slice', type_=str, default='slice(0, 6)')
        >>> slice_ = eval(slice_str, globals(), locals())
        >>> # execute function
        >>> sliced_weights_path = save_pretrained_weights_slice(pretrained_weights, weights_path, slice_)
        >>> # PUT YOUR PUBLISH PATH HERE
        >>> publish_fpath = ut.truepath('~/Dropbox/IBEIS')
        >>> ut.copy(sliced_weights_path, publish_fpath)
    """
    # slice and save
    suffix = '.slice_%r_%r_%r' % (slice_.start, slice_.stop, slice_.step)
    sliced_weights_path = ut.augpath(weights_path, suffix)
    sliced_pretrained_weights = pretrained_weights[slice_]
    ut.save_cPkl(sliced_weights_path, sliced_pretrained_weights)
    # print info
    net_strs.print_pretrained_weights(pretrained_weights, weights_path)
    net_strs.print_pretrained_weights(sliced_pretrained_weights,
                                      sliced_weights_path)
    return sliced_weights_path
Пример #7
0
def get_extramargin_detectchip_info(ibs, aid_list, config2_=None, species=None, FACTOR=4):
    r"""
    Computes a detection chip with a bit of spatial context so the detection algorithm doesn't clip boundaries

    CommandLine:
        python -m ibeis.algo.preproc.preproc_probchip --test-get_extramargin_detectchip_info --show
        python -m ibeis.algo.preproc.preproc_probchip --test-get_extramargin_detectchip_info --show --qaid 27
        python -m ibeis.algo.preproc.preproc_probchip --test-get_extramargin_detectchip_info --show --qaid 2

    Example:
        >>> # ENABLE_DOCTEST
        >>> from ibeis.algo.preproc.preproc_probchip import *  # NOQA
        >>> import ibeis
        >>> from ibeis.init import main_helpers
        >>> ibs = ibeis.opendb('PZ_MTEST')
        >>> aid_list = main_helpers.get_test_qaids(ibs)
        >>> arg_list, newsize_list, halfoffset_cs_list = get_extramargin_detectchip_info(ibs, aid_list)
        >>> ut.quit_if_noshow()
        >>> testshow_extramargin_info(ibs, aid_list, arg_list, newsize_list, halfoffset_cs_list)
    """
    from vtool import chip as ctool
    target_width = 128 * FACTOR
    gfpath_list = ibs.get_annot_image_paths(aid_list)
    bbox_list   = ibs.get_annot_bboxes(aid_list)
    theta_list  = ibs.get_annot_thetas(aid_list)
    bbox_size_list = ut.get_list_column(bbox_list, [2, 3])
    newsize_list = list(map(
        lambda size: ctool.get_scaled_size_with_width(target_width, *size),
        bbox_size_list))
    invalid_aids = [aid for aid, (w, h) in zip(aid_list, bbox_size_list) if w == 0 or h == 0]
    if len(invalid_aids) > 0:
        msg = ("REMOVE INVALID (BAD WIDTH AND/OR HEIGHT) AIDS TO COMPUTE AND WRITE CHIPS")
        msg += ("INVALID AIDS: %r" % (invalid_aids, ))
        print(msg)
        raise Exception(msg)
    # There are two spaces we are working in here
    # probchipspace _pcs (the space of the margined chip computed for probchip) and
    # imagespace _gs (the space using in bbox specification)

    # Compute the offset we would like in chip space for margin expansion
    halfoffset_cs_list = [
        # TODO: Find correct offsets
        (16 * FACTOR, 16 * FACTOR)  # (w / 16, h / 16)
        for (w, h) in newsize_list
    ]

    # Compute expanded newsize list to include the extra margin offset
    expanded_newsize_list = [
        (w_pcs + (2 * xo_pcs), h_pcs + (2 * yo_pcs))
        for (w_pcs, h_pcs), (xo_pcs, yo_pcs) in zip(newsize_list, halfoffset_cs_list)
    ]

    # Get the conversion from chip to image space
    to_imgspace_scale_factors = [
        (w_gs / w_pcs, h_gs / h_pcs)
        for ((w_pcs, h_pcs), (w_gs, h_gs)) in zip(newsize_list, bbox_size_list)
    ]

    # Convert the chip offsets to image space
    halfoffset_gs_list = [
        ((sx * xo), (sy * yo))
        for (sx, sy), (xo, yo) in zip(to_imgspace_scale_factors, halfoffset_cs_list)
    ]

    # Find the size of the expanded margin bbox in image space
    expanded_bbox_gs_list = [
        (x_gs - xo_gs, y_gs - yo_gs, w_gs + (2 * xo_gs), h_gs + (2 * yo_gs))
        for (x_gs, y_gs, w_gs, h_gs), (xo_gs, yo_gs) in zip(bbox_list, halfoffset_gs_list)
    ]

    # TODO: make this work
    probchip_fpath_list = get_annot_probchip_fpath_list(ibs, aid_list,
                                                        config2_=config2_,
                                                        species=species)
    #probchip_extramargin_fpath_list = [ut.augpath(fpath, '_extramargin') for
    #fpath in probchip_fpath_list]
    extramargin_fpath_list = [ut.augpath(fpath, '_extramargin').replace('probchip', 'detectchip')
                              for fpath in probchip_fpath_list]
    # # filter by species and add a suffix for the probchip_input
    # # also compute a probchip fpath with an expanded suffix for the detector
    #probchip_fpath_list = get_annot_probchip_fpath_list(ibs, aids, config2_=None, species=species)
    # Then crop the output and write that as the real probchip

    filtlist_iter = ([] for _ in range(len(aid_list)))
    arg_iter = zip(extramargin_fpath_list, gfpath_list,
                   expanded_bbox_gs_list, theta_list, expanded_newsize_list,
                   filtlist_iter)
    arg_list = list(arg_iter)
    return arg_list, newsize_list, halfoffset_cs_list
Пример #8
0
def update_bindings():
    r"""
    Returns:
        dict: matchtups

    CommandLine:
        python ~/local/build_scripts/flannscripts/autogen_bindings.py --exec-update_bindings
        utprof.py ~/local/build_scripts/flannscripts/autogen_bindings.py --exec-update_bindings

    Example:
        >>> # DISABLE_DOCTEST
        >>> from autogen_bindings import *  # NOQA
        >>> import sys
        >>> import utool as ut
        >>> sys.path.append(ut.truepath('~/local/build_scripts/flannscripts'))
        >>> matchtups = update_bindings()
        >>> result = ('matchtups = %s' % (ut.repr2(matchtups),))
        >>> print(result)
        >>> ut.quit_if_noshow()
        >>> import plottool as pt
        >>> ut.show_if_requested()
    """
    from os.path import basename
    import difflib
    import numpy as np
    import re
    binding_names = [
        'build_index',
        'used_memory',
        'add_points',
        'remove_point',
        'compute_cluster_centers',
        'load_index',
        'save_index',
        'find_nearest_neighbors',
        'radius_search',
        'remove_points',
        'free_index',
        'find_nearest_neighbors_index',

        # 'size',
        # 'veclen',
        # 'get_point',
        # 'flann_get_distance_order',
        # 'flann_get_distance_type',
        # 'flann_log_verbosity',

        # 'clean_removed_points',
    ]

    _places = [
        '~/code/flann/src/cpp/flann/flann.cpp',
        '~/code/flann/src/cpp/flann/flann.h',
        '~/code/flann/src/python/pyflann/flann_ctypes.py',
        '~/code/flann/src/python/pyflann/index.py',
    ]

    eof_sentinals = {
        # 'flann_ctypes.py': '# END DEFINE BINDINGS',
        'flann_ctypes.py': 'def ensure_2d_array(arr',
        # 'flann.h': '// END DEFINE BINDINGS',
        'flann.h': '#ifdef __cplusplus',
        'flann.cpp': None,
        'index.py': None,
    }
    block_sentinals = {
        'flann.h': re.escape('/**'),
        'flann.cpp': 'template *<typename Distance>',
        # 'flann_ctypes.py': '\n',
        'flann_ctypes.py': 'flann\.[a-z_.]* =',
        # 'index.py': '    def .*',
        'index.py': '    [^ ].*',
    }
    places = {
        basename(fpath): fpath
        for fpath in ut.lmap(ut.truepath, _places)
    }
    text_dict = ut.map_dict_vals(ut.readfrom, places)
    lines_dict = {key: val.split('\n') for key, val in text_dict.items()}
    orig_texts = text_dict.copy()  # NOQA
    binding_defs = {}
    named_blocks = {}

    print('binding_names = %r' % (binding_names, ))
    for binding_name in binding_names:
        blocks, defs = autogen_parts(binding_name)
        binding_defs[binding_name] = defs
        named_blocks[binding_name] = blocks

    for binding_name in ut.ProgIter(binding_names):
        ut.colorprint('+--- GENERATE BINDING %s -----' % (binding_name, ),
                      'yellow')
        blocks_dict = named_blocks[binding_name]
        for key in places.keys():
            ut.colorprint(
                '---- generating %s for %s -----' % (
                    binding_name,
                    key,
                ), 'yellow')
            # key = 'flann_ctypes.py'
            # print(text_dict[key])
            old_text = text_dict[key]
            line_list = lines_dict[key]
            #text = old_text
            block = blocks_dict[key]

            debug = ut.get_argflag('--debug')
            # debug = True
            # if debug:
            #     print(ut.highlight_code(block, splitext(key)[1]))

            # Find a place in the code that already exists

            searchblock = block
            if key.endswith('.cpp') or key.endswith('.h'):
                searchblock = re.sub(ut.REGEX_C_COMMENT,
                                     '',
                                     searchblock,
                                     flags=re.MULTILINE | re.DOTALL)
            searchblock = '\n'.join(searchblock.splitlines()[0:3])

            # @ut.cached_func(verbose=False)
            def cached_match(old_text, searchblock):
                def isjunk(x):
                    return False
                    return x in ' \t,*()'

                def isjunk2(x):
                    return x in ' \t,*()'

                # Not sure why the first one just doesnt find it
                # isjunk = None
                sm = difflib.SequenceMatcher(isjunk,
                                             old_text,
                                             searchblock,
                                             autojunk=False)
                sm0 = difflib.SequenceMatcher(isjunk,
                                              old_text,
                                              searchblock,
                                              autojunk=True)
                sm1 = difflib.SequenceMatcher(isjunk2,
                                              old_text,
                                              searchblock,
                                              autojunk=False)
                sm2 = difflib.SequenceMatcher(isjunk2,
                                              old_text,
                                              searchblock,
                                              autojunk=True)
                matchtups = (sm.get_matching_blocks() +
                             sm0.get_matching_blocks() +
                             sm1.get_matching_blocks() +
                             sm2.get_matching_blocks())
                return matchtups

            matchtups = cached_match(old_text, searchblock)
            # Find a reasonable match in matchtups

            found = False
            if debug:
                # print('searchblock =\n%s' % (searchblock,))
                print('searchblock = %r' % (searchblock, ))
            for (a, b, size) in matchtups:
                matchtext = old_text[a:a + size]
                pybind = binding_defs[binding_name]['py_binding_name']
                if re.search(binding_name + '\\b', matchtext) or re.search(
                        pybind + '\\b', matchtext):
                    found = True
                    pos = a + size
                    if debug:
                        print('MATCHING TEXT')
                        print(matchtext)
                    break
                else:
                    if debug and 0:
                        print('Not matching')
                        print('matchtext = %r' % (matchtext, ))
                        matchtext2 = old_text[a - 10:a + size + 20]
                        print('matchtext2 = %r' % (matchtext2, ))

            if found:
                linelens = np.array(ut.lmap(len, line_list)) + 1
                sumlen = np.cumsum(linelens)
                row = np.where(sumlen < pos)[0][-1] + 1
                #print(line_list[row])
                # Search for extents of the block to overwrite
                block_sentinal = block_sentinals[key]
                row1 = ut.find_block_end(row, line_list, block_sentinal,
                                         -1) - 1
                row2 = ut.find_block_end(row + 1, line_list, block_sentinal,
                                         +1)
                eof_sentinal = eof_sentinals[key]
                if eof_sentinal is not None:
                    print('eof_sentinal = %r' % (eof_sentinal, ))
                    row2 = min([
                        count for count, line in enumerate(line_list)
                        if line.startswith(eof_sentinal)
                    ][-1], row2)
                nr = len((block + '\n\n').splitlines())
                new_line_list = ut.insert_block_between_lines(
                    block + '\n', row1, row2, line_list)
                rtext1 = '\n'.join(line_list[row1:row2])
                rtext2 = '\n'.join(new_line_list[row1:row1 + nr])
                if debug:
                    print('-----')
                    ut.colorprint('FOUND AND REPLACING %s' % (binding_name, ),
                                  'yellow')
                    print(ut.highlight_code(rtext1))
                if debug:
                    print('-----')
                    ut.colorprint(
                        'FOUND AND REPLACED WITH %s' % (binding_name, ),
                        'yellow')
                    print(ut.highlight_code(rtext2))
                if not ut.get_argflag('--diff') and not debug:
                    print(
                        ut.color_diff_text(
                            ut.difftext(rtext1,
                                        rtext2,
                                        num_context_lines=7,
                                        ignore_whitespace=True)))
            else:
                # Append to end of the file
                eof_sentinal = eof_sentinals[key]
                if eof_sentinal is None:
                    row2 = len(line_list) - 1
                else:
                    row2_choice = [
                        count for count, line in enumerate(line_list)
                        if line.startswith(eof_sentinal)
                    ]
                    if len(row2_choice) == 0:
                        row2 = len(line_list) - 1
                        assert False
                    else:
                        row2 = row2_choice[-1] - 1

                # row1 = row2 - 1
                # row2 = row2 - 1
                row1 = row2

                new_line_list = ut.insert_block_between_lines(
                    block + '\n', row1, row2, line_list)
                # block + '\n\n\n', row1, row2, line_list)

                rtext1 = '\n'.join(line_list[row1:row2])
                nr = len((block + '\n\n').splitlines())
                rtext2 = '\n'.join(new_line_list[row1:row1 + nr])

                if debug:
                    print('-----')
                    ut.colorprint(
                        'NOT FOUND AND REPLACING %s' % (binding_name, ),
                        'yellow')
                    print(ut.highlight_code(rtext1))
                if debug:
                    print('-----')
                    ut.colorprint(
                        'NOT FOUND AND REPLACED WITH %s' % (binding_name, ),
                        'yellow')
                    print(ut.highlight_code(rtext2))

                if not ut.get_argflag('--diff') and not debug:
                    print(
                        ut.color_diff_text(
                            ut.difftext(rtext1,
                                        rtext2,
                                        num_context_lines=7,
                                        ignore_whitespace=True)))
            text_dict[key] = '\n'.join(new_line_list)
            lines_dict[key] = new_line_list
        ut.colorprint('L___  GENERATED BINDING %s ___' % (binding_name, ),
                      'yellow')

    for key in places:
        new_text = '\n'.join(lines_dict[key])
        #ut.writeto(ut.augpath(places[key], '.new'), new_text)
        ut.writeto(ut.augpath(places[key]), new_text)

    for key in places:
        if ut.get_argflag('--diff'):
            difftext = ut.get_textdiff(orig_texts[key],
                                       new_text,
                                       num_context_lines=7,
                                       ignore_whitespace=True)
            difftext = ut.color_diff_text(difftext)
            print(difftext)
Пример #9
0
 def on_save(nnindexer, depc, fpath):
     #print('NNINDEX ON SAVE')
     # Save FLANN as well
     flann_fpath = ut.augpath(fpath, '_flann', newext='.flann')
     nnindexer.save(fpath=flann_fpath)
Пример #10
0
def get_extramargin_detectchip_info(ibs,
                                    aid_list,
                                    config2_=None,
                                    species=None,
                                    FACTOR=4):
    r"""
    Computes a detection chip with a bit of spatial context so the detection algorithm doesn't clip boundaries

    CommandLine:
        python -m ibeis.algo.preproc.preproc_probchip --test-get_extramargin_detectchip_info --show
        python -m ibeis.algo.preproc.preproc_probchip --test-get_extramargin_detectchip_info --show --qaid 27
        python -m ibeis.algo.preproc.preproc_probchip --test-get_extramargin_detectchip_info --show --qaid 2

    Example:
        >>> # ENABLE_DOCTEST
        >>> from ibeis.algo.preproc.preproc_probchip import *  # NOQA
        >>> import ibeis
        >>> from ibeis.init import main_helpers
        >>> ibs = ibeis.opendb('PZ_MTEST')
        >>> aid_list = ibeis.testdata_aids(ibs=ibs, a='default')
        >>> arg_list, newsize_list, halfoffset_cs_list = get_extramargin_detectchip_info(ibs, aid_list)
        >>> ut.quit_if_noshow()
        >>> testshow_extramargin_info(ibs, aid_list, arg_list, newsize_list, halfoffset_cs_list)
    """
    from vtool import chip as ctool
    target_width = 128 * FACTOR
    gfpath_list = ibs.get_annot_image_paths(aid_list)
    bbox_list = ibs.get_annot_bboxes(aid_list)
    theta_list = ibs.get_annot_thetas(aid_list)
    bbox_size_list = ut.get_list_column(bbox_list, [2, 3])
    newsize_list = list(
        map(lambda size: ctool.get_scaled_size_with_width(target_width, *size),
            bbox_size_list))
    invalid_aids = [
        aid for aid, (w, h) in zip(aid_list, bbox_size_list)
        if w == 0 or h == 0
    ]
    if len(invalid_aids) > 0:
        msg = (
            "REMOVE INVALID (BAD WIDTH AND/OR HEIGHT) AIDS TO COMPUTE AND WRITE CHIPS"
        )
        msg += ("INVALID AIDS: %r" % (invalid_aids, ))
        print(msg)
        raise Exception(msg)
    # There are two spaces we are working in here
    # probchipspace _pcs (the space of the margined chip computed for probchip) and
    # imagespace _gs (the space using in bbox specification)

    # Compute the offset we would like in chip space for margin expansion
    halfoffset_cs_list = [
        # TODO: Find correct offsets
        (16 * FACTOR, 16 * FACTOR)  # (w / 16, h / 16)
        for (w, h) in newsize_list
    ]

    # Compute expanded newsize list to include the extra margin offset
    expanded_newsize_list = [
        (w_pcs + (2 * xo_pcs), h_pcs + (2 * yo_pcs))
        for (w_pcs, h_pcs), (xo_pcs,
                             yo_pcs) in zip(newsize_list, halfoffset_cs_list)
    ]

    # Get the conversion from chip to image space
    to_imgspace_scale_factors = [
        (w_gs / w_pcs, h_gs / h_pcs)
        for ((w_pcs, h_pcs), (w_gs, h_gs)) in zip(newsize_list, bbox_size_list)
    ]

    # Convert the chip offsets to image space
    halfoffset_gs_list = [
        ((sx * xo), (sy * yo))
        for (sx,
             sy), (xo,
                   yo) in zip(to_imgspace_scale_factors, halfoffset_cs_list)
    ]

    # Find the size of the expanded margin bbox in image space
    expanded_bbox_gs_list = [
        (x_gs - xo_gs, y_gs - yo_gs, w_gs + (2 * xo_gs), h_gs + (2 * yo_gs))
        for (x_gs, y_gs, w_gs,
             h_gs), (xo_gs, yo_gs) in zip(bbox_list, halfoffset_gs_list)
    ]

    # TODO: make this work
    probchip_fpath_list = get_annot_probchip_fpath_list(ibs,
                                                        aid_list,
                                                        config2_=config2_,
                                                        species=species)
    #probchip_extramargin_fpath_list = [ut.augpath(fpath, '_extramargin') for
    #fpath in probchip_fpath_list]
    extramargin_fpath_list = [
        ut.augpath(fpath, '_extramargin').replace('probchip', 'detectchip')
        for fpath in probchip_fpath_list
    ]
    # # filter by species and add a suffix for the probchip_input
    # # also compute a probchip fpath with an expanded suffix for the detector
    #probchip_fpath_list = get_annot_probchip_fpath_list(ibs, aids, config2_=None, species=species)
    # Then crop the output and write that as the real probchip

    filtlist_iter = ([] for _ in range(len(aid_list)))
    arg_iter = zip(extramargin_fpath_list, gfpath_list, expanded_bbox_gs_list,
                   theta_list, expanded_newsize_list, filtlist_iter)
    arg_list = list(arg_iter)
    return arg_list, newsize_list, halfoffset_cs_list
Пример #11
0
def update_bindings():
    r"""
    Returns:
        dict: matchtups

    CommandLine:
        python ~/local/build_scripts/flannscripts/autogen_bindings.py --exec-update_bindings
        utprof.py ~/local/build_scripts/flannscripts/autogen_bindings.py --exec-update_bindings

    Example:
        >>> # DISABLE_DOCTEST
        >>> from autogen_bindings import *  # NOQA
        >>> import sys
        >>> import utool as ut
        >>> sys.path.append(ut.truepath('~/local/build_scripts/flannscripts'))
        >>> matchtups = update_bindings()
        >>> result = ('matchtups = %s' % (ut.repr2(matchtups),))
        >>> print(result)
        >>> ut.quit_if_noshow()
        >>> import plottool as pt
        >>> ut.show_if_requested()
    """
    from os.path import basename
    import difflib
    import numpy as np
    import re
    binding_names = [
        'build_index',
        'used_memory',
        'add_points',
        'remove_point',

        'compute_cluster_centers',
        'load_index',
        'save_index',
        'find_nearest_neighbors',

        'radius_search',
        'remove_points',
        'free_index',
        'find_nearest_neighbors_index',

        # 'size',
        # 'veclen',
        # 'get_point',
        # 'flann_get_distance_order',
        # 'flann_get_distance_type',
        # 'flann_log_verbosity',

        # 'clean_removed_points',
    ]

    _places = [
        '~/code/flann/src/cpp/flann/flann.cpp',
        '~/code/flann/src/cpp/flann/flann.h',
        '~/code/flann/src/python/pyflann/flann_ctypes.py',
        '~/code/flann/src/python/pyflann/index.py',
    ]

    eof_sentinals = {
        # 'flann_ctypes.py': '# END DEFINE BINDINGS',
        'flann_ctypes.py': 'def ensure_2d_array(arr',
        # 'flann.h': '// END DEFINE BINDINGS',
        'flann.h': '#ifdef __cplusplus',
        'flann.cpp': None,
        'index.py': None,
    }
    block_sentinals = {
        'flann.h': re.escape('/**'),
        'flann.cpp': 'template *<typename Distance>',
        # 'flann_ctypes.py': '\n',
        'flann_ctypes.py': 'flann\.[a-z_.]* =',
        # 'index.py': '    def .*',
        'index.py': '    [^ ].*',
    }
    places = {basename(fpath): fpath for fpath in ut.lmap(ut.truepath, _places)}
    text_dict = ut.map_dict_vals(ut.readfrom, places)
    lines_dict = {key: val.split('\n') for key, val in text_dict.items()}
    orig_texts = text_dict.copy()  # NOQA
    binding_defs = {}
    named_blocks  = {}

    print('binding_names = %r' % (binding_names,))
    for binding_name in binding_names:
        blocks, defs = autogen_parts(binding_name)
        binding_defs[binding_name] = defs
        named_blocks[binding_name] = blocks

    for binding_name in ut.ProgIter(binding_names):
        ut.colorprint('+--- GENERATE BINDING %s -----' % (binding_name,), 'yellow')
        blocks_dict = named_blocks[binding_name]
        for key in places.keys():
            ut.colorprint('---- generating %s for %s -----' % (binding_name, key,), 'yellow')
            # key = 'flann_ctypes.py'
            # print(text_dict[key])
            old_text = text_dict[key]
            line_list = lines_dict[key]
            #text = old_text
            block = blocks_dict[key]

            debug = ut.get_argflag('--debug')
            # debug = True
            # if debug:
            #     print(ut.highlight_code(block, splitext(key)[1]))

            # Find a place in the code that already exists

            searchblock = block
            if key.endswith('.cpp') or key.endswith('.h'):
                searchblock = re.sub(ut.REGEX_C_COMMENT, '', searchblock,
                                     flags=re.MULTILINE | re.DOTALL)
            searchblock = '\n'.join(searchblock.splitlines()[0:3])

            # @ut.cached_func(verbose=False)
            def cached_match(old_text, searchblock):
                def isjunk(x):
                    return False
                    return x in ' \t,*()'
                def isjunk2(x):
                    return x in ' \t,*()'
                # Not sure why the first one just doesnt find it
                # isjunk = None
                sm = difflib.SequenceMatcher(isjunk, old_text, searchblock,
                                             autojunk=False)
                sm0 = difflib.SequenceMatcher(isjunk, old_text, searchblock,
                                              autojunk=True)
                sm1 = difflib.SequenceMatcher(isjunk2, old_text, searchblock,
                                              autojunk=False)
                sm2 = difflib.SequenceMatcher(isjunk2, old_text, searchblock,
                                              autojunk=True)
                matchtups = (sm.get_matching_blocks() +
                             sm0.get_matching_blocks() +
                             sm1.get_matching_blocks() +
                             sm2.get_matching_blocks())
                return matchtups
            matchtups = cached_match(old_text, searchblock)
            # Find a reasonable match in matchtups

            found = False
            if debug:
                # print('searchblock =\n%s' % (searchblock,))
                print('searchblock = %r' % (searchblock,))
            for (a, b, size) in matchtups:
                matchtext = old_text[a: a + size]
                pybind = binding_defs[binding_name]['py_binding_name']
                if re.search(binding_name + '\\b', matchtext) or re.search(pybind + '\\b', matchtext):
                    found = True
                    pos = a + size
                    if debug:
                        print('MATCHING TEXT')
                        print(matchtext)
                    break
                else:
                    if debug and 0:
                        print('Not matching')
                        print('matchtext = %r' % (matchtext,))
                        matchtext2 = old_text[a - 10: a + size + 20]
                        print('matchtext2 = %r' % (matchtext2,))

            if found:
                linelens = np.array(ut.lmap(len, line_list)) + 1
                sumlen = np.cumsum(linelens)
                row = np.where(sumlen < pos)[0][-1] + 1
                #print(line_list[row])
                # Search for extents of the block to overwrite
                block_sentinal = block_sentinals[key]
                row1 = ut.find_block_end(row, line_list, block_sentinal, -1) - 1
                row2 = ut.find_block_end(row + 1, line_list, block_sentinal, +1)
                eof_sentinal = eof_sentinals[key]
                if eof_sentinal is not None:
                    print('eof_sentinal = %r' % (eof_sentinal,))
                    row2 = min([count for count, line in enumerate(line_list) if line.startswith(eof_sentinal)][-1], row2)
                nr = len((block + '\n\n').splitlines())
                new_line_list = ut.insert_block_between_lines(
                    block + '\n', row1, row2, line_list)
                rtext1 = '\n'.join(line_list[row1:row2])
                rtext2 = '\n'.join(new_line_list[row1:row1 + nr])
                if debug:
                    print('-----')
                    ut.colorprint('FOUND AND REPLACING %s' % (binding_name,), 'yellow')
                    print(ut.highlight_code(rtext1))
                if debug:
                    print('-----')
                    ut.colorprint('FOUND AND REPLACED WITH %s' % (binding_name,), 'yellow')
                    print(ut.highlight_code(rtext2))
                if not ut.get_argflag('--diff') and not debug:
                    print(ut.color_diff_text(ut.difftext(rtext1, rtext2, num_context_lines=7, ignore_whitespace=True)))
            else:
                # Append to end of the file
                eof_sentinal = eof_sentinals[key]
                if eof_sentinal is None:
                    row2 = len(line_list) - 1
                else:
                    row2_choice = [count for count, line in enumerate(line_list)
                                   if line.startswith(eof_sentinal)]
                    if len(row2_choice) == 0:
                        row2 = len(line_list) - 1
                        assert False
                    else:
                        row2 = row2_choice[-1] - 1

                # row1 = row2 - 1
                # row2 = row2 - 1
                row1 = row2

                new_line_list = ut.insert_block_between_lines(
                    block + '\n', row1, row2, line_list)
                # block + '\n\n\n', row1, row2, line_list)

                rtext1 = '\n'.join(line_list[row1:row2])
                nr = len((block + '\n\n').splitlines())
                rtext2 = '\n'.join(new_line_list[row1:row1 + nr])

                if debug:
                    print('-----')
                    ut.colorprint('NOT FOUND AND REPLACING %s' % (binding_name,), 'yellow')
                    print(ut.highlight_code(rtext1))
                if debug:
                    print('-----')
                    ut.colorprint('NOT FOUND AND REPLACED WITH %s' % (binding_name,), 'yellow')
                    print(ut.highlight_code(rtext2))

                if not ut.get_argflag('--diff') and not debug:
                    print(ut.color_diff_text(ut.difftext(rtext1, rtext2, num_context_lines=7, ignore_whitespace=True)))
            text_dict[key] = '\n'.join(new_line_list)
            lines_dict[key] = new_line_list
        ut.colorprint('L___  GENERATED BINDING %s ___' % (binding_name,), 'yellow')

    for key in places:
        new_text = '\n'.join(lines_dict[key])
        #ut.writeto(ut.augpath(places[key], '.new'), new_text)
        ut.writeto(ut.augpath(places[key]), new_text)

    for key in places:
        if ut.get_argflag('--diff'):
            difftext = ut.get_textdiff(orig_texts[key], new_text,
                                       num_context_lines=7, ignore_whitespace=True)
            difftext = ut.color_diff_text(difftext)
            print(difftext)
Пример #12
0
def sort_module_functions():
    from os.path import dirname, join
    import utool as ut
    import ibeis.control
    import re
    #import re
    #regex = r'[^@]*\ndef'
    modfpath = dirname(ibeis.control.__file__)
    fpath = join(modfpath, 'manual_annot_funcs.py')
    #fpath = join(modfpath, 'manual_dependant_funcs.py')
    #fpath = join(modfpath, 'manual_lblannot_funcs.py')
    #fpath = join(modfpath, 'manual_name_species_funcs.py')
    text = ut.read_from(fpath, verbose=False)
    lines = text.splitlines()
    indent_list = [ut.get_indentation(line) for line in lines]
    isfunc_list = [line.startswith('def ') for line in lines]
    isblank_list = [len(line.strip(' ')) == 0 for line in lines]
    isdec_list = [line.startswith('@') for line in lines]

    tmp = [
        'def' if isfunc else indent
        for isfunc, indent in zip(isfunc_list, indent_list)
    ]
    tmp = ['b' if isblank else t for isblank, t in zip(isblank_list, tmp)]
    tmp = ['@' if isdec else t for isdec, t in zip(isdec_list, tmp)]
    #print('\n'.join([str((t, count + 1)) for (count, t) in enumerate(tmp)]))
    block_list = re.split('\n\n\n', text, flags=re.MULTILINE)

    #for block in block_list:
    #    print('#====')
    #    print(block)

    isfunc_list = [
        re.search('^def ', block, re.MULTILINE) is not None
        for block in block_list
    ]

    whole_varname = ut.whole_word(ut.REGEX_VARNAME)
    funcname_regex = r'def\s+' + ut.named_field('funcname', whole_varname)

    def findfuncname(block):
        match = re.search(funcname_regex, block)
        return match.group('funcname')

    funcnameblock_list = [
        findfuncname(block) if isfunc else None
        for isfunc, block in zip(isfunc_list, block_list)
    ]

    funcblock_list = ut.filter_items(block_list, isfunc_list)
    funcname_list = ut.filter_items(funcnameblock_list, isfunc_list)

    nonfunc_list = ut.filterfalse_items(block_list, isfunc_list)

    nonfunc_list = ut.filterfalse_items(block_list, isfunc_list)
    ismain_list = [
        re.search('^if __name__ == ["\']__main__["\']', nonfunc) is not None
        for nonfunc in nonfunc_list
    ]

    mainblock_list = ut.filter_items(nonfunc_list, ismain_list)
    nonfunc_list = ut.filterfalse_items(nonfunc_list, ismain_list)

    newtext_list = []

    for nonfunc in nonfunc_list:
        newtext_list.append(nonfunc)
        newtext_list.append('\n')

    #funcname_list
    for funcblock in ut.sortedby(funcblock_list, funcname_list):
        newtext_list.append(funcblock)
        newtext_list.append('\n')

    for mainblock in mainblock_list:
        newtext_list.append(mainblock)

    newtext = '\n'.join(newtext_list)
    print('newtext = %s' % (newtext, ))
    print('len(newtext) = %r' % (len(newtext), ))
    print('len(text) = %r' % (len(text), ))

    backup_fpath = ut.augpath(fpath,
                              augext='.bak',
                              augdir='_backup',
                              ensure=True)

    ut.write_to(backup_fpath, text)
    ut.write_to(fpath, newtext)
Пример #13
0
def sort_module_functions():
    from os.path import dirname, join
    import utool as ut
    import ibeis.control
    import re
    #import re
    #regex = r'[^@]*\ndef'
    modfpath = dirname(ibeis.control.__file__)
    fpath = join(modfpath, 'manual_annot_funcs.py')
    #fpath = join(modfpath, 'manual_dependant_funcs.py')
    #fpath = join(modfpath, 'manual_lblannot_funcs.py')
    #fpath = join(modfpath, 'manual_name_species_funcs.py')
    text = ut.read_from(fpath, verbose=False)
    lines =  text.splitlines()
    indent_list = [ut.get_indentation(line) for line in lines]
    isfunc_list = [line.startswith('def ') for line in lines]
    isblank_list = [len(line.strip(' ')) == 0 for line in lines]
    isdec_list = [line.startswith('@') for line in lines]

    tmp = ['def' if isfunc else indent for isfunc, indent in  zip(isfunc_list, indent_list)]
    tmp = ['b' if isblank else t for isblank, t in  zip(isblank_list, tmp)]
    tmp = ['@' if isdec else t for isdec, t in  zip(isdec_list, tmp)]
    #print('\n'.join([str((t, count + 1)) for (count, t) in enumerate(tmp)]))
    block_list = re.split('\n\n\n', text, flags=re.MULTILINE)

    #for block in block_list:
    #    print('#====')
    #    print(block)

    isfunc_list = [re.search('^def ', block, re.MULTILINE) is not None for block in block_list]

    whole_varname = ut.whole_word(ut.REGEX_VARNAME)
    funcname_regex = r'def\s+' + ut.named_field('funcname', whole_varname)

    def findfuncname(block):
        match = re.search(funcname_regex, block)
        return match.group('funcname')

    funcnameblock_list = [findfuncname(block) if isfunc else None
                          for isfunc, block in zip(isfunc_list, block_list)]

    funcblock_list = ut.filter_items(block_list, isfunc_list)
    funcname_list = ut.filter_items(funcnameblock_list, isfunc_list)

    nonfunc_list = ut.filterfalse_items(block_list, isfunc_list)

    nonfunc_list = ut.filterfalse_items(block_list, isfunc_list)
    ismain_list = [re.search('^if __name__ == ["\']__main__["\']', nonfunc) is not None
                   for nonfunc in nonfunc_list]

    mainblock_list = ut.filter_items(nonfunc_list, ismain_list)
    nonfunc_list = ut.filterfalse_items(nonfunc_list, ismain_list)

    newtext_list = []

    for nonfunc in nonfunc_list:
        newtext_list.append(nonfunc)
        newtext_list.append('\n')

    #funcname_list
    for funcblock in ut.sortedby(funcblock_list, funcname_list):
        newtext_list.append(funcblock)
        newtext_list.append('\n')

    for mainblock in mainblock_list:
        newtext_list.append(mainblock)

    newtext = '\n'.join(newtext_list)
    print('newtext = %s' % (newtext,))
    print('len(newtext) = %r' % (len(newtext),))
    print('len(text) = %r' % (len(text),))

    backup_fpath = ut.augpath(fpath, augext='.bak', augdir='_backup', ensure=True)

    ut.write_to(backup_fpath, text)
    ut.write_to(fpath, newtext)
Пример #14
0
 def on_save(nnindexer, depc, fpath):
     #print('NNINDEX ON SAVE')
     # Save FLANN as well
     flann_fpath = ut.augpath(fpath, '_flann', newext='.flann')
     nnindexer.save(fpath=flann_fpath)