Exemplo n.º 1
0
def show_function_usage(fname, funcname_list, dpath_list):
    # Check to see for function usage
    funcname_list = [r'\b%s\b' % (funcname.strip(),) for funcname in funcname_list if len(funcname) > 0]
    flagged_funcnames = []
    for funcname in funcname_list:
        found_filestr_list, found_lines_list, found_lxs_list = ut.grep([funcname], dpath_list=dpath_list)
        total = 0
        for lines in found_lines_list:
            total += len(lines)
        funcname_ = funcname.replace('\\b', '')
        print(funcname_ + ' ' + str(total))
        if total == 1:
            flagged_funcnames.append(funcname_)
        # See where external usage is
        isexternal_list = [fname == fname_ for fname_ in found_filestr_list]
        external_filestr_list = ut.compress(found_filestr_list, isexternal_list)
        external_lines_list = ut.compress(found_lines_list, isexternal_list)
        #external_lxs_list = ut.compress(found_lxs_list, isexternal_list)
        if len(external_filestr_list) == 0:
            print(' no external usage')
        else:
            for filename, lines in zip(external_filestr_list, external_lines_list):
                print(' * filename=%r' % (filename,))
                print(ut.list_str(lines))
            #print(ut.list_str(list(zip(external_filestr_list, external_lines_list))))
    print('----------')
    print('flagged:')
    print('\n'.join(flagged_funcnames))
Exemplo n.º 2
0
def change_doctestcommand_to_use_dashm_flag():
    r"""
    VimRegex: # note sure how to execute replace command in vim in one lin
    %s/python\s*\([A-Za-z_]+[\\/]\S*\)\.py\(.*\)/python -m \1 \2

    """
    # http://stackoverflow.com/questions/18737863/passing-a-function-to-re-sub-in-python
    # CANNOT USE [^ ] FOR SOME GOD DAMN REASON USE /S instead
    regex_list = ['python [A-Za-z_]+[\\/]\S* --allexamples']
    dpath_list = [
        ut.ensure_crossplat_path(ut.truepath('~/code/utool/utool')),
        ut.ensure_crossplat_path(ut.truepath('~/code/ibeis/ibeis')),
        ut.ensure_crossplat_path(ut.truepath('~/code/vtool/vtool')),
        ut.ensure_crossplat_path(ut.truepath('~/code/plottool/plottool')),
        ut.ensure_crossplat_path(ut.truepath('~/code/guitool/guitool')),
    ]
    #ut.named_field_repl(['python ', ('modrelpath',),])
    #['python ', ('modrelpath', 'utool[\\/].*'), '--allexamples'])
    res = ut.grep(regex_list,
                  recursive=True,
                  dpath_list=dpath_list,
                  verbose=True)
    found_filestr_list, found_lines_list, found_lxs_list = res
    fpath = res[0][0]

    import re
    keypat_list = [
        ('prefix', 'python\s*'),
        ('modrelpath', '[A-Za-z_]+[\\/]\S*'),
        ('suffix', '.*'),
    ]
    namedregex = ut.named_field_regex(keypat_list)

    # Define function to pass to re.sub
    def replmodpath(matchobj):
        groupdict_ = matchobj.groupdict()
        relpath = groupdict_['modrelpath']
        prefix = groupdict_['prefix']
        suffix = groupdict_['suffix']
        modname = relpath
        modname = modname.replace('\\', '.')
        modname = modname.replace('/', '.')
        modname = modname.replace('.py', '')
        return prefix + '-m ' + modname + suffix

    for fpath in found_filestr_list:
        text = ut.read_from(fpath)
        #matchobj = re.search(namedregex, text, flags=re.MULTILINE)
        #print(text)
        #for matchobj in re.finditer(namedregex, text):
        #    print(ut.get_match_text(matchobj))
        #    print('--')
        newtext = re.sub(namedregex, replmodpath, text)
        # Perform replacement
        ut.write_to(fpath, newtext)
Exemplo n.º 3
0
 def glossterms():
     re_glossterm = ut.named_field('glossterm', '.' + ut.REGEX_NONGREEDY)
     pat = r'\\glossterm{' + re_glossterm + '}'
     tup = ut.grep(pat, fpath_list=testdata_fpaths(), verbose=True)
     found_fpath_list, found_lines_list, found_lxs_list = tup
     glossterm_list = []
     for line in ut.flatten(found_lines_list):
         match = re.search(pat, line)
         glossterm = match.groupdict()['glossterm']
         glossterm_list.append(glossterm)
     print('Glossary Terms: ')
     print(ut.repr2(ut.dict_hist(glossterm_list), nl=True, strvals=True))
Exemplo n.º 4
0
def change_doctestcommand_to_use_dashm_flag():
    r"""
    VimRegex: # note sure how to execute replace command in vim in one lin
    %s/python\s*\([A-Za-z_]+[\\/]\S*\)\.py\(.*\)/python -m \1 \2

    """
    # http://stackoverflow.com/questions/18737863/passing-a-function-to-re-sub-in-python
    # CANNOT USE [^ ] FOR SOME GOD DAMN REASON USE /S instead
    regex_list = ['python [A-Za-z_]+[\\/]\S* --allexamples']
    dpath_list = [
        ut.ensure_crossplat_path(ut.truepath('~/code/utool/utool')),
        ut.ensure_crossplat_path(ut.truepath('~/code/ibeis/ibeis')),
        ut.ensure_crossplat_path(ut.truepath('~/code/vtool/vtool')),
        ut.ensure_crossplat_path(ut.truepath('~/code/plottool/plottool')),
        ut.ensure_crossplat_path(ut.truepath('~/code/guitool/guitool')),
    ]
    #ut.named_field_repl(['python ', ('modrelpath',),])
    #['python ', ('modrelpath', 'utool[\\/].*'), '--allexamples'])
    res = ut.grep(regex_list, recursive=True, dpath_list=dpath_list, verbose=True)
    found_filestr_list, found_lines_list, found_lxs_list = res
    fpath = res[0][0]

    import re
    keypat_list = [
        ('prefix', 'python\s*'),
        ('modrelpath', '[A-Za-z_]+[\\/]\S*'),
        ('suffix', '.*'),
    ]
    namedregex = ut.named_field_regex(keypat_list)

    # Define function to pass to re.sub
    def replmodpath(matchobj):
        groupdict_ = matchobj.groupdict()
        relpath = groupdict_['modrelpath']
        prefix = groupdict_['prefix']
        suffix = groupdict_['suffix']
        modname = relpath
        modname = modname.replace('\\', '.')
        modname = modname.replace('/', '.')
        modname = modname.replace('.py', '')
        return prefix + '-m ' + modname + suffix

    for fpath in found_filestr_list:
        text = ut.read_from(fpath)
        #matchobj = re.search(namedregex, text, flags=re.MULTILINE)
        #print(text)
        #for matchobj in re.finditer(namedregex, text):
        #    print(ut.get_match_text(matchobj))
        #    print('--')
        newtext = re.sub(namedregex, replmodpath, text)
        # Perform replacement
        ut.write_to(fpath, newtext)
Exemplo n.º 5
0
    def find_module_callers():
        """
        TODO:
        attempt to build a call graph between module functions to make it easy to see
        what can be removed and what cannot.
        """
        import utool as ut
        from os.path import normpath
        mod_fpath = ut.truepath('~/code/ibeis/ibeis/expt/results_analyzer.py')
        mod_fpath = ut.truepath('~/code/ibeis/ibeis/expt/results_all.py')
        mod_fpath = ut.truepath('~/code/ibeis/ibeis/expt/results_organizer.py')
        module = ut.import_module_from_fpath(mod_fpath)
        user_profile = ut.ensure_user_profile()
        doctestables = list(
            ut.iter_module_doctestable(module, include_builtin=False))
        grepkw = {}
        grepkw['exclude_dirs'] = user_profile.project_exclude_dirs
        grepkw['dpath_list'] = user_profile.project_dpaths
        grepkw['verbose'] = True

        usage_map = {}
        for funcname, func in doctestables:
            print('Searching for funcname = %r' % (funcname, ))
            found_fpath_list, found_lines_list, found_lxs_list = ut.grep(
                [funcname], **grepkw)
            used_in = (found_fpath_list, found_lines_list, found_lxs_list)
            usage_map[funcname] = used_in

        external_usage_map = {}
        for funcname, used_in in usage_map.items():
            (found_fpath_list, found_lines_list, found_lxs_list) = used_in
            isexternal_flag = [
                normpath(fpath) != normpath(mod_fpath)
                for fpath in found_fpath_list
            ]
            ext_used_in = (ut.compress(found_fpath_list, isexternal_flag),
                           ut.compress(found_lines_list, isexternal_flag),
                           ut.compress(found_lxs_list, isexternal_flag))
            external_usage_map[funcname] = ext_used_in

        for funcname, used_in in external_usage_map.items():
            (found_fpath_list, found_lines_list, found_lxs_list) = used_in

        print('Calling modules: \n' + ut.repr2(ut.unique_ordered(
            ut.flatten([used_in[0]
                        for used_in in external_usage_map.values()])),
                                               nl=True))
Exemplo n.º 6
0
def abstract_external_module_cv2():
    from os.path import join  # NOQA
    modname = 'cv2'
    repo_dirs = ut.get_project_repo_dirs()
    #exclude_dirs = [join(dpath, 'build') for dpath in repo_dirs]

    grepkw = dict(
        #exclude_dirs=exclude_dirs,
        dpath_list=repo_dirs,
        greater_exclude_dirs=ut.get_standard_exclude_dnames(),
        recursive=True,
    )
    modregex = '\<' + modname + '\>'
    tup = ut.grep(modregex, **grepkw)
    fpath_list, line_list, lineno_list = tup
    fpath_list = [fpath.replace('\\', '/') for fpath in fpath_list]
    print('\n'.join(fpath_list))
Exemplo n.º 7
0
def abstract_external_module_cv2():
    from os.path import join  # NOQA
    modname = 'cv2'
    repo_dirs = ut.get_project_repo_dirs()
    #exclude_dirs = [join(dpath, 'build') for dpath in repo_dirs]

    grepkw = dict(
        #exclude_dirs=exclude_dirs,
        dpath_list=repo_dirs,
        greater_exclude_dirs=ut.get_standard_exclude_dnames(),
        recursive=True,
    )
    modregex = '\<' + modname + '\>'
    tup = ut.grep(modregex, **grepkw)
    fpath_list, line_list, lineno_list = tup
    fpath_list = [fpath.replace('\\', '/') for fpath in fpath_list]
    print('\n'.join(fpath_list))
Exemplo n.º 8
0
 def _grep_dpath(dpath):
     grep_tup = ut.grep([pat], dpath_list=[dpath],
                        exclude_patterns=['*.pyc'], verbose=False)
     reflags = 0
     (found_fpath_list, found_lines_list, found_lxs_list) = grep_tup
     regex_list = [pat]
     _exprs_flags = [ut.util_regex.extend_regex2(expr, reflags)
                     for expr in regex_list]
     extended_regex_list = ut.take_column(_exprs_flags, 0)
     grep_result = ut.GrepResult(found_fpath_list, found_lines_list,
                                 found_lxs_list, extended_regex_list,
                                 reflags=reflags)
     text = '\n'.join([
         'Greping Directory "{}"'.format(dpath),
         'tofind_list={}'.format(ut.repr2(extended_regex_list)),
         grep_result.make_resultstr(colored=False),
         '=============',
         'found_fpath_list = {}'.format(ut.repr2(found_fpath_list, nl=1))
     ])
     return text
Exemplo n.º 9
0
    def find_module_callers():
        """
        TODO:
        attempt to build a call graph between module functions to make it easy to see
        what can be removed and what cannot.
        """
        import utool as ut
        from os.path import normpath
        mod_fpath = ut.truepath('~/code/ibeis/ibeis/expt/results_analyzer.py')
        mod_fpath = ut.truepath('~/code/ibeis/ibeis/expt/results_all.py')
        mod_fpath = ut.truepath('~/code/ibeis/ibeis/expt/results_organizer.py')
        module = ut.import_module_from_fpath(mod_fpath)
        user_profile = ut.ensure_user_profile()
        doctestables = list(ut.iter_module_doctestable(module, include_builtin=False))
        grepkw = {}
        grepkw['exclude_dirs'] = user_profile.project_exclude_dirs
        grepkw['dpath_list'] = user_profile.project_dpaths
        grepkw['verbose'] = True

        usage_map = {}
        for funcname, func in doctestables:
            print('Searching for funcname = %r' % (funcname,))
            found_fpath_list, found_lines_list, found_lxs_list = ut.grep([funcname], **grepkw)
            used_in = (found_fpath_list, found_lines_list, found_lxs_list)
            usage_map[funcname] = used_in

        external_usage_map = {}
        for funcname, used_in in usage_map.items():
            (found_fpath_list, found_lines_list, found_lxs_list) = used_in
            isexternal_flag = [normpath(fpath) != normpath(mod_fpath) for fpath in found_fpath_list]
            ext_used_in = (ut.compress(found_fpath_list, isexternal_flag),
                           ut.compress(found_lines_list, isexternal_flag),
                           ut.compress(found_lxs_list, isexternal_flag))
            external_usage_map[funcname] = ext_used_in

        for funcname, used_in in external_usage_map.items():
            (found_fpath_list, found_lines_list, found_lxs_list) = used_in

        print('Calling modules: \n' +
              ut.repr2(ut.unique_ordered(ut.flatten([used_in[0] for used_in in  external_usage_map.values()])), nl=True))
Exemplo n.º 10
0
    def tozip():
        re_fpath = ut.named_field('fpath', 'figure.*?[jp][pn]g') + '}'
        patterns = [
            'chapter4-application.tex', 'figdef4*', 'main.tex', 'def.tex',
            'Crall*', 'thesis.cls', 'header*', 'colordef.tex', '*.bib'
        ]
        exclude_dirs = ['guts']
        fpaths = sorted(
            ut.glob('.', patterns, recursive=True, exclude_dirs=exclude_dirs))

        tup = ut.grep(re_fpath, fpath_list=fpaths, verbose=True)
        found_fpath_list, found_lines_list, found_lxs_list = tup
        fig_fpath_list = []
        for line in ut.flatten(found_lines_list):
            if not line.startswith('%'):
                for match in re.finditer(re_fpath, line):
                    fig_fpath = match.groupdict()['fpath']
                    if 'junc' not in fig_fpath and 'markov' not in fig_fpath and 'bayes' not in fig_fpath:
                        fig_fpath_list += [fig_fpath]

        fpath_list = fig_fpath_list + fpaths
        ut.archive_files('chap4.zip', fpath_list)
Exemplo n.º 11
0
def grep(r, *tofind_list):
    import utool as ut
    #include_patterns = ['*.py', '*.cxx', '*.cpp', '*.hxx', '*.hpp', '*.c', '*.h', '*.vim']
    ut.grep(tofind_list, recursive=True, verbose=True)
Exemplo n.º 12
0
def grep(r, *tofind_list):
    import utool as ut
    #include_patterns = ['*.py', '*.cxx', '*.cpp', '*.hxx', '*.hpp', '*.c', '*.h', '*.vim']
    ut.grep(tofind_list, recursive=True, verbose=True)
Exemplo n.º 13
0
def grep_projects(tofind_list,
                  user_profile=None,
                  verbose=True,
                  new=False,
                  **kwargs):
    r"""
    Greps the projects defined in the current UserProfile

    Args:
        tofind_list (list):
        user_profile (None): (default = None)

    Kwargs:
        user_profile

    CommandLine:
        python -m utool --tf grep_projects grep_projects

    Example:
        >>> # DISABLE_DOCTEST
        >>> from utool.util_project import *  # NOQA
        >>> import utool as ut
        >>> import sys
        >>> tofind_list = ut.get_argval('--find', type_=list,
        >>>                             default=[sys.argv[-1]])
        >>> grep_projects(tofind_list)
    """
    import utool as ut
    user_profile = ensure_user_profile(user_profile)

    kwargs = kwargs.copy()
    colored = kwargs.pop('colored', True)

    grepkw = {}
    grepkw['greater_exclude_dirs'] = user_profile.project_exclude_dirs
    grepkw['exclude_dirs'] = user_profile.project_exclude_dirs
    grepkw['dpath_list'] = user_profile.project_dpaths
    grepkw['include_patterns'] = user_profile.project_include_patterns
    grepkw['exclude_patterns'] = user_profile.project_exclude_patterns
    grepkw.update(kwargs)

    msg_list1 = []
    msg_list2 = []

    print_ = msg_list1.append
    print_('Greping Projects')
    print_('tofind_list = %s' % (ut.list_str(tofind_list, nl=True), ))
    #print_('grepkw = %s' % ut.dict_str(grepkw, nl=True))
    if verbose:
        print('\n'.join(msg_list1))
    #with ut.Timer('greping', verbose=True):
    grep_result = ut.grep(tofind_list, **grepkw)
    found_fpath_list, found_lines_list, found_lxs_list = grep_result

    # HACK, duplicate behavior. TODO: write grep print result function
    reflags = grepkw.get('reflags', 0)
    _exprs_flags = [ut.extend_regex2(expr, reflags) for expr in tofind_list]
    extended_regex_list = ut.take_column(_exprs_flags, 0)
    reflags_list = ut.take_column(_exprs_flags, 1)
    # HACK
    # pat = ut.util_regex.regex_or(extended_regex_list)
    reflags = reflags_list[0]

    # from utool import util_regex
    resultstr = ut.make_grep_resultstr(grep_result,
                                       extended_regex_list,
                                       reflags,
                                       colored=colored)
    msg_list2.append(resultstr)
    print_ = msg_list2.append
    #for fpath, lines, lxs in zip(found_fpath_list, found_lines_list,
    #                             found_lxs_list):
    #    print_('----------------------')
    #    print_('found %d line(s) in %r: ' % (len(lines), fpath))
    #    name = split(fpath)[1]
    #    max_line = len(lines)
    #    ndigits = str(len(str(max_line)))
    #    for (lx, line) in zip(lxs, lines):
    #        line = line.replace('\n', '')
    #        print_(('%s : %' + ndigits + 'd |%s') % (name, lx, line))
    # iter_ = zip(found_fpath_list, found_lines_list, found_lxs_list)
    # for fpath, lines, lxs in iter_:
    #     print_('----------------------')
    #     print_('found %d line(s) in %r: ' % (len(lines), fpath))
    #     name = split(fpath)[1]
    #     max_line = len(lines)
    #     ndigits = str(len(str(max_line)))
    #     for (lx, line) in zip(lxs, lines):
    #         line = line.replace('\n', '')
    #         colored_line = ut.highlight_regex(
    #             line.rstrip('\n'), pat, reflags=reflags)
    #         print_(('%s : %' + ndigits + 'd |%s') % (name, lx, colored_line))

    print_('====================')
    print_('found_fpath_list = ' + ut.list_str(found_fpath_list))
    print_('')
    #print_('gvim -o ' + ' '.join(found_fpath_list))
    if verbose:
        print('\n'.join(msg_list2))
    msg_list = msg_list1 + msg_list2

    if new:
        return GrepResult(found_fpath_list, found_lines_list, found_lxs_list,
                          extended_regex_list, reflags)
    else:
        return msg_list
Exemplo n.º 14
0
def autogen_argparse2(dpath_list):
    r"""

    FUNCTION IS NOT FULLY IMPLEMENTED CURRENTLY ONLY RETURNS
    LIST OF FLAGS THAT THE PROGRAM SILENTLY TAKES

    Example:
        >>> from utool.util_arg import *  # NOQA
        >>> import utool as ut
        >>> dpath_list = [
        ...     ut.truepath('~/code/utool/utool'),
        ...     ut.truepath('~/code/ibeis/ibeis'),
        ...     ut.truepath('~/code/guitool/guitool'),
        ...     ut.truepath('~/code/vtool/vtool'),
        ...     ut.truepath('~/code/plottool/plottool'),
        ... ]
        >>> flagtups_list = autogen_argparse2(dpath_list)
        >>> flagtup_list_ = [ut.regex_replace('[)(\']','',tupstr) for tupstr in ut.flatten(flagtups_list)]
        >>> flagtup_list = ut.flatten([tupstr.split(',') for tupstr in flagtup_list_])
        >>> flagtup_set = set([tupstr.strip() for tupstr in flagtup_list if tupstr.find('=') == -1])
        >>> print('\n'.join(flagtup_set))
    """
    import utool as ut
    import parse
    include_patterns = ['*.py']
    regex_list = ['get_argflag', 'get_argval']
    recursive = True
    result = ut.grep(regex_list, recursive, dpath_list, include_patterns, verbose=True)
    (found_filestr_list, found_lines_list, found_lxs_list) = result
    # TODO: Check to see if in a comment block
    flagtups_list = []
    for found_lines in found_lines_list:
        flagtups = []
        for line in found_lines:
            line_ = ut.regex_replace('#.*', '', line)

            argval_parse_list = [
                '\'{flag}\' in sys.argv',
                'get_argval({flagtup}, type={type}, default={default})',
                'get_argval({flagtup}, {type}, default={default})',
                'get_argval({flagtup}, {type}, {default})',
                'get_argval({flagtup})',
            ]
            argflag_parse_list = [
                'get_argflag({flagtup})',
            ]
            def parse_pattern_list(parse_list, line):
                #result_list = []
                result = None
                for pattern in parse_list:
                    result = parse.parse('{_prefix}' + pattern, line_)
                    if result is not None:
                        break
                        #if len(result_list) > 1:
                        #    print('warning')
                        #result_list.append(result)
                return result
            val_result  = parse_pattern_list(argval_parse_list, line)
            flag_result = parse_pattern_list(argflag_parse_list, line)
            if flag_result is None and val_result is None:
                print('warning1')
            elif flag_result is not None and val_result is not None:
                print('warning2')
            else:
                result = flag_result if val_result is None else val_result
                flagtups.append(result['flagtup'])
        flagtups_list.append(flagtups)
    return flagtups_list
Exemplo n.º 15
0
def grep_projects(tofind_list, user_profile=None, verbose=True, new=False,
                  **kwargs):
    r"""
    Greps the projects defined in the current UserProfile

    Args:
        tofind_list (list):
        user_profile (None): (default = None)

    Kwargs:
        user_profile

    CommandLine:
        python -m utool --tf grep_projects grep_projects

    Example:
        >>> # DISABLE_DOCTEST
        >>> from utool.util_project import *  # NOQA
        >>> import utool as ut
        >>> import sys
        >>> tofind_list = ut.get_argval('--find', type_=list,
        >>>                             default=[sys.argv[-1]])
        >>> grep_projects(tofind_list)
    """
    import utool as ut
    user_profile = ensure_user_profile(user_profile)

    kwargs = kwargs.copy()
    colored = kwargs.pop('colored', True)

    grepkw = {}
    grepkw['greater_exclude_dirs'] = user_profile.project_exclude_dirs
    grepkw['exclude_dirs'] = user_profile.project_exclude_dirs
    grepkw['dpath_list'] = user_profile.project_dpaths
    grepkw['include_patterns'] = user_profile.project_include_patterns
    grepkw['exclude_patterns'] = user_profile.project_exclude_patterns
    grepkw.update(kwargs)

    msg_list1 = []
    msg_list2 = []

    print_ = msg_list1.append
    print_('Greping Projects')
    print_('tofind_list = %s' % (ut.repr4(tofind_list, nl=True),))
    #print_('grepkw = %s' % ut.repr4(grepkw, nl=True))
    if verbose:
        print('\n'.join(msg_list1))
    #with ut.Timer('greping', verbose=True):
    grep_result = ut.grep(tofind_list, **grepkw)
    found_fpath_list, found_lines_list, found_lxs_list = grep_result

    # HACK, duplicate behavior. TODO: write grep print result function
    reflags = grepkw.get('reflags', 0)
    _exprs_flags = [ut.extend_regex2(expr, reflags)
                    for expr in tofind_list]
    extended_regex_list = ut.take_column(_exprs_flags, 0)
    reflags_list = ut.take_column(_exprs_flags, 1)
    # HACK
    # pat = ut.util_regex.regex_or(extended_regex_list)
    reflags = reflags_list[0]

    # from utool import util_regex
    resultstr = ut.make_grep_resultstr(grep_result, extended_regex_list,
                                       reflags, colored=colored)
    msg_list2.append(resultstr)
    print_ = msg_list2.append
    #for fpath, lines, lxs in zip(found_fpath_list, found_lines_list,
    #                             found_lxs_list):
    #    print_('----------------------')
    #    print_('found %d line(s) in %r: ' % (len(lines), fpath))
    #    name = split(fpath)[1]
    #    max_line = len(lines)
    #    ndigits = str(len(str(max_line)))
    #    for (lx, line) in zip(lxs, lines):
    #        line = line.replace('\n', '')
    #        print_(('%s : %' + ndigits + 'd |%s') % (name, lx, line))
    # iter_ = zip(found_fpath_list, found_lines_list, found_lxs_list)
    # for fpath, lines, lxs in iter_:
    #     print_('----------------------')
    #     print_('found %d line(s) in %r: ' % (len(lines), fpath))
    #     name = split(fpath)[1]
    #     max_line = len(lines)
    #     ndigits = str(len(str(max_line)))
    #     for (lx, line) in zip(lxs, lines):
    #         line = line.replace('\n', '')
    #         colored_line = ut.highlight_regex(
    #             line.rstrip('\n'), pat, reflags=reflags)
    #         print_(('%s : %' + ndigits + 'd |%s') % (name, lx, colored_line))

    print_('====================')
    print_('found_fpath_list = ' + ut.repr4(found_fpath_list))
    print_('')
    #print_('gvim -o ' + ' '.join(found_fpath_list))
    if verbose:
        print('\n'.join(msg_list2))
    msg_list = msg_list1 + msg_list2

    if new:
        return GrepResult(found_fpath_list, found_lines_list, found_lxs_list, extended_regex_list, reflags)
    else:
        return msg_list
Exemplo n.º 16
0
def autogen_argparse2(dpath_list):
    r"""

    FUNCTION IS NOT FULLY IMPLEMENTED CURRENTLY ONLY RETURNS
    LIST OF FLAGS THAT THE PROGRAM SILENTLY TAKES

    Example:
        >>> from utool.util_arg import *  # NOQA
        >>> import utool as ut
        >>> dpath_list = [
        ...     ut.truepath('~/code/utool/utool'),
        ...     ut.truepath('~/code/ibeis/ibeis'),
        ...     ut.truepath('~/code/guitool/guitool'),
        ...     ut.truepath('~/code/vtool/vtool'),
        ...     ut.truepath('~/code/plottool/plottool'),
        ... ]
        >>> flagtups_list = autogen_argparse2(dpath_list)
        >>> flagtup_list_ = [ut.regex_replace('[)(\']','',tupstr) for tupstr in ut.flatten(flagtups_list)]
        >>> flagtup_list = ut.flatten([tupstr.split(',') for tupstr in flagtup_list_])
        >>> flagtup_set = set([tupstr.strip() for tupstr in flagtup_list if tupstr.find('=') == -1])
        >>> print('\n'.join(flagtup_set))
    """
    import utool as ut
    import parse
    include_patterns = ['*.py']
    regex_list = ['get_argflag', 'get_argval']
    recursive = True
    result = ut.grep(regex_list,
                     recursive,
                     dpath_list,
                     include_patterns,
                     verbose=True)
    (found_filestr_list, found_lines_list, found_lxs_list) = result
    # TODO: Check to see if in a comment block
    flagtups_list = []
    for found_lines in found_lines_list:
        flagtups = []
        for line in found_lines:
            line_ = ut.regex_replace('#.*', '', line)

            argval_parse_list = [
                '\'{flag}\' in sys.argv',
                'get_argval({flagtup}, type={type}, default={default})',
                'get_argval({flagtup}, {type}, default={default})',
                'get_argval({flagtup}, {type}, {default})',
                'get_argval({flagtup})',
            ]
            argflag_parse_list = [
                'get_argflag({flagtup})',
            ]

            def parse_pattern_list(parse_list, line):
                #result_list = []
                result = None
                for pattern in parse_list:
                    result = parse.parse('{_prefix}' + pattern, line_)
                    if result is not None:
                        break
                        #if len(result_list) > 1:
                        #    print('warning')
                        #result_list.append(result)
                return result

            val_result = parse_pattern_list(argval_parse_list, line)
            flag_result = parse_pattern_list(argflag_parse_list, line)
            if flag_result is None and val_result is None:
                print('warning1')
            elif flag_result is not None and val_result is not None:
                print('warning2')
            else:
                result = flag_result if val_result is None else val_result
                flagtups.append(result['flagtup'])
        flagtups_list.append(flagtups)
    return flagtups_list
Exemplo n.º 17
0
def main():
    print(ut.bubbletext('TeX FiX'))
    #dryrun = ut.get_argflag('--dryrun')
    dryrun = not ut.get_argflag('-w')
    if dryrun:
        print('dryrun=True, specify --w to save any changes')
    find_text = ut.get_argval('--grep')
    if find_text is not None:
        tup = ut.grep(find_text, fpath_list=testdata_fpaths(), verbose=True)
        found_fpath_list, found_lines_list, found_lxs_list = tup
    else:
        print('~~~ --grep [text] ~~~')

    findrepl = ut.get_argval('--sed', type_=list, default=None)
    if findrepl is not None:
        assert len(findrepl) == 2, 'must specify a search and replace'
        search, repl = findrepl
        tup = ut.sed(search,
                     repl,
                     fpath_list=testdata_fpaths(),
                     verbose=True,
                     force=not dryrun)

    @ut.argv_flag_dec(indent='    ')
    def fix_common_errors():
        for tex_fpath in testdata_fpaths():
            fix_section_common_errors(tex_fpath, dryrun)

    @ut.argv_flag_dec(indent='    ')
    def fixcap():
        for tex_fpath in testdata_fpaths():
            fix_section_title_capitalization(tex_fpath, dryrun)

    @ut.argv_flag_dec(indent='    ')
    def fixsent():
        fix_sentences()
        # for tex_fpath in testdata_fpaths():
        #     fix_section_title_capitalization(tex_fpath, dryrun)

    @ut.argv_flag_dec(indent='    ')
    def glossterms():
        re_glossterm = ut.named_field('glossterm', '.' + ut.REGEX_NONGREEDY)
        pat = r'\\glossterm{' + re_glossterm + '}'
        tup = ut.grep(pat, fpath_list=testdata_fpaths(), verbose=True)
        found_fpath_list, found_lines_list, found_lxs_list = tup
        glossterm_list = []
        for line in ut.flatten(found_lines_list):
            match = re.search(pat, line)
            glossterm = match.groupdict()['glossterm']
            glossterm_list.append(glossterm)
        print('Glossary Terms: ')
        print(ut.repr2(ut.dict_hist(glossterm_list), nl=True, strvals=True))

    @ut.argv_flag_dec(indent='    ')
    def fix_chktex():
        """
        ./texfix.py --fixcite --fix-chktex
        """
        import parse
        fpaths = testdata_fpaths()
        print('Running chktex')
        output_list = [
            ut.cmd('chktex', fpath, verbose=False)[0] for fpath in fpaths
        ]

        fixcite = ut.get_argflag('--fixcite')
        fixlbl = ut.get_argflag('--fixlbl')
        fixcmdterm = ut.get_argflag('--fixcmdterm')

        for fpath, output in zip(fpaths, output_list):
            text = ut.readfrom(fpath)
            buffer = text.split('\n')
            pat = '\n' + ut.positive_lookahead('Warning')
            warn_list = list(
                filter(lambda x: x.startswith('Warning'),
                       re.split(pat, output)))
            delete_linenos = []

            if not (fixcmdterm or fixlbl or fixcite):
                print(' CHOOSE A FIX ')

            modified_lines = []

            for warn in warn_list:
                warnlines = warn.split('\n')
                pres = parse.parse(
                    'Warning {num} in {fpath} line {lineno}: {warnmsg}',
                    warnlines[0])
                if pres is not None:
                    fpath_ = pres['fpath']
                    lineno = int(pres['lineno']) - 1
                    warnmsg = pres['warnmsg']
                    try:
                        assert fpath == fpath_, ('%r != %r' % (fpath, fpath_))
                    except AssertionError:
                        continue
                    if 'No errors printed' in warn:
                        #print('Cannot fix')
                        continue
                    if lineno in modified_lines:
                        print('Skipping modified line')
                        continue
                    if fixcmdterm and warnmsg == 'Command terminated with space.':
                        print('Fix command termination')
                        errorline = warnlines[1]  # NOQA
                        carrotline = warnlines[2]
                        pos = carrotline.find('^')
                        if 0:
                            print('pos = %r' % (pos, ))
                            print('lineno = %r' % (lineno, ))
                            print('errorline = %r' % (errorline, ))
                        modified_lines.append(lineno)
                        line = buffer[lineno]
                        pre_, post_ = line[:pos], line[pos + 1:]
                        newline = (pre_ + '{} ' + post_).rstrip(' ')
                        #print('newline   = %r' % (newline,))
                        buffer[lineno] = newline
                    elif fixlbl and warnmsg == 'Delete this space to maintain correct pagereferences.':
                        print('Fix label newline')
                        fpath_ = pres['fpath']
                        errorline = warnlines[1]  # NOQA
                        new_prevline = buffer[
                            lineno - 1].rstrip() + errorline.lstrip(' ')
                        buffer[lineno - 1] = new_prevline
                        modified_lines.append(lineno)
                        delete_linenos.append(lineno)
                    elif fixcite and re.match(
                            'Non-breaking space \\(.~.\\) should have been used',
                            warnmsg):
                        #print(warnmsg)
                        #print('\n'.join(warnlines))
                        print('Fix citation space')
                        carrotline = warnlines[2]
                        pos = carrotline.find('^')
                        modified_lines.append(lineno)
                        line = buffer[lineno]
                        if line[pos] == ' ':
                            pre_, post_ = line[:pos], line[pos + 1:]
                            newline = (pre_ + '~' + post_).rstrip(' ')
                        else:
                            pre_, post_ = line[:pos + 1], line[pos + 1:]
                            newline = (pre_ + '~' + post_).rstrip(' ')
                            print(warn)
                            print(line[pos])
                            assert False
                            #assert line[pos] == ' ', '%r' % line[pos]
                            break
                        if len(pre_.strip()) == 0:
                            new_prevline = buffer[
                                lineno - 1].rstrip() + newline.lstrip(' ')
                            buffer[lineno - 1] = new_prevline
                            delete_linenos.append(lineno)
                        else:
                            #print('newline   = %r' % (newline,))
                            buffer[lineno] = newline
                    #print(warn)

            if len(delete_linenos) > 0:
                mask = ut.index_to_boolmask(delete_linenos, len(buffer))
                buffer = ut.compress(buffer, ut.not_list(mask))
            newtext = '\n'.join(buffer)

            #ut.dump_autogen_code(fpath, newtext, 'tex', fullprint=False)
            ut.print_difftext(
                ut.get_textdiff(text, newtext, num_context_lines=4))
            if ut.get_argflag('-w'):
                ut.writeto(fpath, newtext)
            else:
                print('Specify -w to finialize change')

    @ut.argv_flag_dec(indent='    ')
    def reformat():
        """
        ./texfix.py --reformat --fpaths NewParts.tex
        >>> from texfix import *  # NOQA
        """
        fpaths = testdata_fpaths()

        for fpath in fpaths:
            text = ut.readfrom(fpath)
            root = latex_parser.LatexDocPart.parse_text(text, debug=None)

            if ut.get_argflag('--fixcref'):
                root.find(' \\\\cref')
                continue

            #print(root.children)
            #root.children = root.children[0:5]
            #print('Parsed Str Short')
            new_text = '\n'.join(root.reformat_blocks(debug=None))
            # remove trailing spaces
            new_text = re.sub(' *$', '', new_text, flags=re.MULTILINE)
            # remove double newlines
            new_text = re.sub('(\n *)+\n+',
                              '\n\n',
                              new_text,
                              flags=re.MULTILINE)

            if ut.get_argflag('--summary'):
                print('---summary---')
                root.print_summary()
                print('---/summary---')
                # ut.colorprint(root.summary_str(), 'blue')

            numchars1 = len(text.replace(' ', '').replace('\n', ''))
            numchars2 = len(new_text.replace(' ', '').replace('\n', ''))

            print('numchars1 = %r' % (numchars1, ))
            print('numchars2 = %r' % (numchars2, ))
            #assert numchars1 == numchars2, '%r == %r' % (numchars1, numchars2)

            print('old newlines = %r' % (text.count('\n'), ))
            print('new newlines = %r' % (new_text.count('\n'), ))

            #import unicodedata
            #new_text = unicodedata.normalize('NFKD', new_text).encode('ascii','ignore')
            #print('new_text = %r' % (new_text,))

            ut.dump_autogen_code(fpath,
                                 new_text,
                                 codetype='latex',
                                 fullprint=False)

    @ut.argv_flag_dec(indent='    ')
    def outline():
        """
        ./texfix.py --fpaths chapter4-application.tex --outline --asmarkdown --numlines=999 -w --ignoreinputstartswith=def,Crall,header,colordef,figdef
        """
        fpaths = testdata_fpaths()
        print('fpaths = %r' % (fpaths, ))

        for fpath in fpaths:
            text = ut.readfrom(fpath)
            root = latex_parser.LatexDocPart.parse_text(text, debug=None)

            # HACK
            new_text = '\n'.join(root.reformat_blocks(debug=None))
            # remove trailing spaces
            new_text = re.sub(' *$', '', new_text, flags=re.MULTILINE)
            # remove double newlines
            new_text = re.sub('(\n *)+\n+',
                              '\n\n',
                              new_text,
                              flags=re.MULTILINE)

            document = root.find_descendant_type('document')
            #document = root.find_descendant_type('section', pat='Identification')
            print('document = %r' % (document, ))
            if document is not None:
                root = document

            sectionpat = ut.get_argval('--section', default=None)
            if sectionpat is not None:
                root = root.find_descendant_type('section', pat=sectionpat)
                print('root = %r' % (root, ))
                if root is None:
                    # import utool
                    # utool.embed()
                    raise Exception('section %r does not exist' % (sectionpat))
            #print(root.get_debug_tree_text())

            #ut.colorprint(root.summary_str(outline=True), 'yellow')
            print('---outline---')
            outline = True
            # outline = False
            outline_text = root.summary_str(outline=outline, highlight=False)
            summary = root.summary_str(outline=outline, highlight=True)
            if not ut.get_argflag('-w'):
                print(summary)
            print('---/outline---')
            if root._config['asmarkdown']:
                codetype = 'markdown'
                newext = '.md'
            else:
                codetype = 'latex'
                newext = None

            ut.dump_autogen_code(ut.augpath(fpath,
                                            augpref='outline_',
                                            newext=newext),
                                 outline_text,
                                 codetype=codetype,
                                 fullprint=False)

    @ut.argv_flag_dec(indent='    ')
    def tozip():
        re_fpath = ut.named_field('fpath', 'figure.*?[jp][pn]g') + '}'
        patterns = [
            'chapter4-application.tex', 'figdef4*', 'main.tex', 'def.tex',
            'Crall*', 'thesis.cls', 'header*', 'colordef.tex', '*.bib'
        ]
        exclude_dirs = ['guts']
        fpaths = sorted(
            ut.glob('.', patterns, recursive=True, exclude_dirs=exclude_dirs))

        tup = ut.grep(re_fpath, fpath_list=fpaths, verbose=True)
        found_fpath_list, found_lines_list, found_lxs_list = tup
        fig_fpath_list = []
        for line in ut.flatten(found_lines_list):
            if not line.startswith('%'):
                for match in re.finditer(re_fpath, line):
                    fig_fpath = match.groupdict()['fpath']
                    if 'junc' not in fig_fpath and 'markov' not in fig_fpath and 'bayes' not in fig_fpath:
                        fig_fpath_list += [fig_fpath]

        fpath_list = fig_fpath_list + fpaths
        ut.archive_files('chap4.zip', fpath_list)

    fix_common_errors()
    fixcap()
    glossterms()
    fix_chktex()
    reformat()
    outline()
    tozip()
    fixsent()

    ut.argv_flag_dec(check_doublewords, indent='    ')()
    ut.argv_flag_dec(findcite, indent='    ')()

    print('Use --fpaths to specify specific files')