Esempio n. 1
0
def test():
    print('enter test')
    log_fpath1 = utool.get_app_resource_dir('utool', 'test_logfile1.txt')
    log_fpath2 = utool.get_app_resource_dir('utool', 'test_logfile2.txt')

    utool.start_logging(log_fpath1, 'w')
    func1()
    func2()
    utool.stop_logging()

    print('\n\n')
    print('This line is NOT logged')
    print('\n\n')

    utool.start_logging(log_fpath2, 'w')
    print('This line is logged')
    utool.stop_logging()

    log1 = utool.read_from(log_fpath1, verbose=False)
    log2 = utool.read_from(log_fpath2, verbose=False)

    target1 = utool.unindent('''
    <__LOG_START__>
    logging to log_fpath=%r
    [test][func1]enter func1
    [test][func1]exit  func1
    [test][func2]enter func2
    [test][func2][func1]enter func1
    [test][func2][func1]exit  func1
    [test][func2]exit  func2
    <__LOG_STOP__>''' % log_fpath1).strip()

    target2 = utool.unindent('''
    <__LOG_START__>
    logging to log_fpath=%r
    [test]This line is logged
    <__LOG_STOP__>''' % log_fpath2).strip()

    output1 = remove_timestamp(log1).strip()
    output2 = remove_timestamp(log2).strip()

    try:
        assert target1 == output1, 'target1 failed'
        assert target2 == output2, 'target2 failed'
        builtins.print('TEST PASSED')
    except AssertionError:
        builtins.print('\n<!!! TEST FAILED !!!>')

        builtins.print('\ntarget1:')
        builtins.print(target1)
        builtins.print('\noutput1:')
        builtins.print(output1)

        builtins.print('\ntarget2:')
        builtins.print(target2)
        builtins.print('\noutput2:')
        builtins.print(output2)

        builtins.print('</!!! TEST FAILED !!!>\n')
        raise
Esempio n. 2
0
def test():
    print('enter test')
    log_fpath1 = utool.get_app_resource_dir('utool', 'test_logfile1.txt')
    log_fpath2 = utool.get_app_resource_dir('utool', 'test_logfile2.txt')

    utool.start_logging(log_fpath1, 'w')
    func1()
    func2()
    utool.stop_logging()

    print('\n\n')
    print('This line is NOT logged')
    print('\n\n')

    utool.start_logging(log_fpath2, 'w')
    print('This line is logged')
    utool.stop_logging()

    log1 = utool.read_from(log_fpath1, verbose=False)
    log2 = utool.read_from(log_fpath2, verbose=False)

    target1 = utool.unindent('''
    <__LOG_START__>
    logging to log_fpath=%r
    [test][func1]enter func1
    [test][func1]exit  func1
    [test][func2]enter func2
    [test][func2][func1]enter func1
    [test][func2][func1]exit  func1
    [test][func2]exit  func2
    <__LOG_STOP__>''' % log_fpath1).strip()

    target2 = utool.unindent('''
    <__LOG_START__>
    logging to log_fpath=%r
    [test]This line is logged
    <__LOG_STOP__>''' % log_fpath2).strip()

    output1 = remove_timestamp(log1).strip()
    output2 = remove_timestamp(log2).strip()

    try:
        assert target1 == output1, 'target1 failed'
        assert target2 == output2, 'target2 failed'
        builtins.print('TEST PASSED')
    except AssertionError:
        builtins.print('\n<!!! TEST FAILED !!!>')

        builtins.print('\ntarget1:')
        builtins.print(target1)
        builtins.print('\noutput1:')
        builtins.print(output1)

        builtins.print('\ntarget2:')
        builtins.print(target2)
        builtins.print('\noutput2:')
        builtins.print(output2)

        builtins.print('</!!! TEST FAILED !!!>\n')
        raise
Esempio n. 3
0
def parse_latex_comments_for_commmands():
    r"""
    CommandLine:
        python -m ibeis.scripts.gen_cand_expts --exec-parse_latex_comments_for_commmands

    Example:
        >>> # SCRIPT
        >>> from ibeis.scripts.gen_cand_expts import *  # NOQA
        >>> parse_latex_comments_for_commmands()
    """
    fname = ut.get_argval('--fname', type_=str, default='figdefexpt.tex')
    text = ut.read_from(ut.truepath('~/latex/crall-candidacy-2015/' + fname))
    #text = ut.read_from(ut.truepath('~/latex/crall-candidacy-2015/figdefindiv.tex'))
    lines = text.split('\n')
    cmd_list = ['']
    in_comment = True
    for line in lines:
        if line.startswith('% ---'):
            # Keep separators
            toadd = line.replace('%', '#')
            if not (len(cmd_list) > 1 and cmd_list[-1].startswith('# ---')):
                cmd_list[-1] += (toadd)
            else:
                cmd_list.append(toadd)
            cmd_list.append('')

        if line.strip().startswith(r'\begin{comment}'):
            in_comment = True
            continue
        if in_comment:
            line = line.strip()
            if line == '' or line.startswith('#') or line.startswith('%'):
                in_comment = False
            else:
                cmd_list[-1] = cmd_list[-1] + line
                if not line.strip().endswith('\\'):
                    cmd_list[-1] = cmd_list[-1] + ' $@'
                    #cmd_list.append('')
                    #cmd_list.append('#--')
                    cmd_list.append('')
                    in_comment = False
                else:
                    cmd_list[-1] = cmd_list[-1] + '\n'

    cmd_list = [cmd.replace('--render', '').replace('--diskshow', '') for cmd in cmd_list]

    # formatting
    cmd_list2 = []
    for cmd in cmd_list:
        #cmd = cmd.replace(' -t ', ' \\\n    -t ')
        #cmd = cmd.replace('--db', '\\\n    --db')
        #cmd = cmd.replace('python -m ibeis.dev', './dev.py')
        cmd = cmd.replace('python -m ibeis.dev -e', 'ibeis -e')
        cmd_list2.append(cmd)
    cmd_list = cmd_list2

    print('cmd_list = %s' % (ut.list_str(cmd_list),))
    from os.path import splitext
    script_fname =  'regen_' + splitext(fname)[0] + '.sh'
    fname, script, line_list = write_script_lines(cmd_list, script_fname)
Esempio n. 4
0
def load_gztest(ibs):
    r"""
    CommandLine:
        python -m ibeis.algo.hots.special_query --test-load_gztest

    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis.algo.hots.devcases import *  # NOQA
        >>> import ibeis
        >>> ibs = ibeis.opendb('GZ_ALL')
    """
    from os.path import join
    from ibeis.algo.hots import match_chips4 as mc4
    dir_ = ut.get_module_dir(mc4)
    eval_text = ut.read_from(join(dir_,  'GZ_TESTTUP.txt'))
    testcases = eval(eval_text)
    count_dict = ut.count_dict_vals(testcases)
    print(ut.dict_str(count_dict))

    testtup_list = ut.flatten(ut.dict_take_list(testcases, ['vsone_wins',
                                                            'vsmany_outperformed',
                                                            'vsmany_dominates',
                                                            'vsmany_wins']))
    qaid_list = [testtup.qaid_t for testtup in testtup_list]
    visual_uuids = ibs.get_annot_visual_uuids(qaid_list)
    visual_uuids
Esempio n. 5
0
def clean_lprof_file(input_fname, output_fname=None):
    """ Reads a .lprof file and cleans it """
    # Read the raw .lprof text dump
    text = ut.read_from(input_fname)
    # Sort and clean the text
    output_text = clean_line_profile_text(text)
    return output_text
Esempio n. 6
0
def get_bibtex_dict():
    import utool as ut
    # HACK: custom current bibtex file
    possible_bib_fpaths = [
        ut.truepath('./My_Library_clean.bib'),
        #ut.truepath('~/latex/crall-thesis-2017/My_Library_clean.bib'),
    ]

    bib_fpath = None
    for bib_fpath_ in possible_bib_fpaths:
        if exists(bib_fpath_):
            bib_fpath = bib_fpath_
            break

    if bib_fpath is None:
        raise Exception('cant find bibtex file')

    # import bibtexparser
    from bibtexparser import bparser
    parser = bparser.BibTexParser()
    parser.ignore_nonstandard_types = True
    bib_text = ut.read_from(bib_fpath)
    bibtex_db = parser.parse(bib_text)
    bibtex_dict = bibtex_db.get_entry_dict()

    return bibtex_dict
Esempio n. 7
0
def clean_lprof_file(input_fname, output_fname=None):
    """ Reads a .lprof file and cleans it """
    # Read the raw .lprof text dump
    text = ut.read_from(input_fname)
    # Sort and clean the text
    output_text = clean_line_profile_text(text)
    return output_text
def ensure_explicit_namespace(fpath, namespace, varname_list):
    import re
    import utool as ut

    text = ut.read_from(fpath)
    orig_text = text
    new_text = text

    for varname in varname_list:
        regex = ''.join((
            ut.named_field('prefix', '[^.]'),
            ut.named_field('var', ut.whole_word(varname)),
        ))
        repl = ''.join((
            ut.bref_field('prefix'),
            namespace, '.',
            ut.bref_field('var')
        ))

        new_text = re.sub(regex, repl, new_text)

    textdiff = ut.get_textdiff(orig_text, new_text)
    print(textdiff)
    if ut.user_cmdline_prompt('Does the text look good?'):
        # if diff looks good write
        ut.write_to(fpath, new_text)
Esempio n. 9
0
def load_gztest(ibs):
    r"""
    CommandLine:
        python -m ibeis.algo.hots.special_query --test-load_gztest

    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis.algo.hots.devcases import *  # NOQA
        >>> import ibeis
        >>> ibs = ibeis.opendb('GZ_ALL')
    """
    from os.path import join
    from ibeis.algo.hots import match_chips4 as mc4
    dir_ = ut.get_module_dir(mc4)
    eval_text = ut.read_from(join(dir_, 'GZ_TESTTUP.txt'))
    testcases = eval(eval_text)
    count_dict = ut.count_dict_vals(testcases)
    print(ut.dict_str(count_dict))

    testtup_list = ut.flatten(
        ut.dict_take_list(testcases, [
            'vsone_wins', 'vsmany_outperformed', 'vsmany_dominates',
            'vsmany_wins'
        ]))
    qaid_list = [testtup.qaid_t for testtup in testtup_list]
    visual_uuids = ibs.get_annot_visual_uuids(qaid_list)
    visual_uuids
Esempio n. 10
0
def update_wildbook_config(ibs, wildbook_tomcat_path, dryrun=False):
    wildbook_properteis_dpath = join(wildbook_tomcat_path,
                                     'WEB-INF/classes/bundles/')
    print('[ibs.wildbook_signal_eid_list()] Wildbook properties=%r' % (
        wildbook_properteis_dpath, ))
    # The src file is non-standard. It should be remove here as well
    wildbook_config_fpath_dst = join(wildbook_properteis_dpath,
                                     'commonConfiguration.properties')
    ut.assert_exists(wildbook_properteis_dpath)
    # for come reason the .default file is not there, that should be ok though
    orig_content = ut.read_from(wildbook_config_fpath_dst)
    content = orig_content
    content = re.sub('IBEIS_DB_path = .*',
                     'IBEIS_DB_path = ' + ibs.get_db_core_path(), content)
    content = re.sub('IBEIS_image_path = .*',
                     'IBEIS_image_path = ' + ibs.get_imgdir(), content)

    # Write to the configuration if it is different
    if orig_content != content:
        need_sudo = not ut.is_file_writable(wildbook_config_fpath_dst)
        if need_sudo:
            quoted_content = '"%s"' % (content, )
            print('Attempting to gain sudo access to update wildbook config')
            command = ['sudo', 'sh', '-c', '\'', 'echo',
                       quoted_content, '>', wildbook_config_fpath_dst, '\'']
            # ut.cmd(command, sudo=True)
            command = ' '.join(command)
            if not dryrun:
                os.system(command)
        else:
            ut.write_to(wildbook_config_fpath_dst, content)
Esempio n. 11
0
def update_wildbook_ia_config(ibs, wildbook_tomcat_path, dryrun=False):
    """
    #if use_config_file and wildbook_tomcat_path:
    #    # Update the Wildbook configuration to see *THIS* wbia database
    #    with lockfile.LockFile(lock_fpath):
    #        update_wildbook_ia_config(ibs, wildbook_tomcat_path, dryrun)
    """
    wildbook_properteis_dpath = join(wildbook_tomcat_path,
                                     'WEB-INF/classes/bundles/')
    logger.info('[ibs.update_wildbook_ia_config()] Wildbook properties=%r' %
                (wildbook_properteis_dpath, ))
    # The src file is non-standard. It should be remove here as well
    wildbook_config_fpath_dst = join(wildbook_properteis_dpath,
                                     'commonConfiguration.properties')
    ut.assert_exists(wildbook_properteis_dpath)
    # for come reason the .default file is not there, that should be ok though
    orig_content = ut.read_from(wildbook_config_fpath_dst)
    content = orig_content
    # Make sure wildbook knows where to find us
    if False:
        # Old way of telling WB where to find IA
        content = re.sub('IBEIS_DB_path = .*',
                         'IBEIS_DB_path = ' + ibs.get_db_core_path(), content)
        content = re.sub('IBEIS_image_path = .*',
                         'IBEIS_image_path = ' + ibs.get_imgdir(), content)

    web_port = ibs.get_web_port_via_scan()
    if web_port is None:
        raise ValueError('IA web server is not running on any expected port')
    ia_hostport = 'http://localhost:%s' % (web_port, )
    ia_rest_prefix = ut.named_field('prefix', 'IBEISIARestUrl.*')
    host_port = ut.named_field('host_port', 'http://.*?:[0-9]+')
    content = re.sub(ia_rest_prefix + host_port,
                     ut.bref_field('prefix') + ia_hostport, content)

    # Write to the configuration if it is different
    if orig_content != content:
        need_sudo = not ut.is_file_writable(wildbook_config_fpath_dst)
        if need_sudo:
            quoted_content = '"%s"' % (content, )
            logger.info(
                'Attempting to gain sudo access to update wildbook config')
            command = [
                'sudo',
                'sh',
                '-c',
                "'",
                'echo',
                quoted_content,
                '>',
                wildbook_config_fpath_dst,
                "'",
            ]
            # ut.cmd(command, sudo=True)
            command = ' '.join(command)
            if not dryrun:
                os.system(command)
        else:
            ut.write_to(wildbook_config_fpath_dst, content)
Esempio n. 12
0
 def multiline_grepfile(regex, fpath):
     found_matchtexts = []
     found_linenos = []
     text = ut.read_from(fpath, verbose=False)
     for match in re.finditer(regex, text, flags=re.MULTILINE):
         lineno = text[:match.start()].count('\n')
         matchtext = ut.get_match_text(match)
         found_linenos.append(lineno)
         found_matchtexts.append(matchtext)
     return found_matchtexts, found_linenos
Esempio n. 13
0
 def multiline_grepfile(regex, fpath):
     found_matchtexts = []
     found_linenos   = []
     text = ut.read_from(fpath, verbose=False)
     for match in  re.finditer(regex, text, flags=re.MULTILINE):
         lineno = text[:match.start()].count('\n')
         matchtext = ut.get_match_text(match)
         found_linenos.append(lineno)
         found_matchtexts.append(matchtext)
     return found_matchtexts, found_linenos
Esempio n. 14
0
 def read_csv(csv_fpath):
     import utool as ut
     csv_text = ut.read_from(csv_fpath)
     csv_lines = csv_text.split('\n')
     print(ut.list_str(csv_lines[0:2]))
     csv_data = [[field.strip('"').strip('\r') for field in line.split(',')]
                 for line in csv_lines if len(line) > 0]
     csv_header = csv_data[0]
     csv_data = csv_data[1:]
     return csv_data, csv_header
Esempio n. 15
0
def change_doctestcommand_to_use_dashm_flag():
    r"""
    VimRegex: # note sure how to execute replace command in vim in one lin
    %s/python\s*\([A-Za-z_]+[\\/]\S*\)\.py\(.*\)/python -m \1 \2

    """
    # http://stackoverflow.com/questions/18737863/passing-a-function-to-re-sub-in-python
    # CANNOT USE [^ ] FOR SOME GOD DAMN REASON USE /S instead
    regex_list = ['python [A-Za-z_]+[\\/]\S* --allexamples']
    dpath_list = [
        ut.ensure_crossplat_path(ut.truepath('~/code/utool/utool')),
        ut.ensure_crossplat_path(ut.truepath('~/code/ibeis/ibeis')),
        ut.ensure_crossplat_path(ut.truepath('~/code/vtool/vtool')),
        ut.ensure_crossplat_path(ut.truepath('~/code/plottool/plottool')),
        ut.ensure_crossplat_path(ut.truepath('~/code/guitool/guitool')),
    ]
    #ut.named_field_repl(['python ', ('modrelpath',),])
    #['python ', ('modrelpath', 'utool[\\/].*'), '--allexamples'])
    res = ut.grep(regex_list,
                  recursive=True,
                  dpath_list=dpath_list,
                  verbose=True)
    found_filestr_list, found_lines_list, found_lxs_list = res
    fpath = res[0][0]

    import re
    keypat_list = [
        ('prefix', 'python\s*'),
        ('modrelpath', '[A-Za-z_]+[\\/]\S*'),
        ('suffix', '.*'),
    ]
    namedregex = ut.named_field_regex(keypat_list)

    # Define function to pass to re.sub
    def replmodpath(matchobj):
        groupdict_ = matchobj.groupdict()
        relpath = groupdict_['modrelpath']
        prefix = groupdict_['prefix']
        suffix = groupdict_['suffix']
        modname = relpath
        modname = modname.replace('\\', '.')
        modname = modname.replace('/', '.')
        modname = modname.replace('.py', '')
        return prefix + '-m ' + modname + suffix

    for fpath in found_filestr_list:
        text = ut.read_from(fpath)
        #matchobj = re.search(namedregex, text, flags=re.MULTILINE)
        #print(text)
        #for matchobj in re.finditer(namedregex, text):
        #    print(ut.get_match_text(matchobj))
        #    print('--')
        newtext = re.sub(namedregex, replmodpath, text)
        # Perform replacement
        ut.write_to(fpath, newtext)
Esempio n. 16
0
def inject_python_code(fpath, patch_code, tag=None,
                       inject_location='after_imports'):
    """
    DEPRICATE
    puts code into files on disk
    """
    import utool as ut
    assert tag is not None, 'TAG MUST BE SPECIFIED IN INJECTED CODETEXT'
    text = ut.read_from(fpath)
    comment_start_tag = '# <util_inject:%s>' % tag
    comment_end_tag  = '# </util_inject:%s>' % tag

    tagstart_txtpos = text.find(comment_start_tag)
    tagend_txtpos = text.find(comment_end_tag)

    text_lines = ut.split_python_text_into_lines(text)

    # split the file into two parts and inject code between them
    if tagstart_txtpos != -1 or tagend_txtpos != -1:
        assert tagstart_txtpos != -1, 'both tags must not be found'
        assert tagend_txtpos != -1, 'both tags must not be found'

        for pos, line in enumerate(text_lines):
            if line.startswith(comment_start_tag):
                tagstart_pos = pos
            if line.startswith(comment_end_tag):
                tagend_pos = pos
        part1 = text_lines[0:tagstart_pos]
        part2 = text_lines[tagend_pos + 1:]
    else:
        if inject_location == 'after_imports':
            first_nonimport_pos = 0
            for line in text_lines:
                list_ = ['import ', 'from ', '#', ' ']
                isvalid = (len(line) == 0 or
                           any(line.startswith(str_) for str_ in list_))
                if not isvalid:
                    break
                first_nonimport_pos += 1
            part1 = text_lines[0:first_nonimport_pos]
            part2 = text_lines[first_nonimport_pos:]
        else:
            raise AssertionError('Unknown inject location')

    newtext = (
        '\n'.join(part1 + [comment_start_tag]) +
        '\n' + patch_code + '\n' +
        '\n'.join( [comment_end_tag] + part2)
    )
    text_backup_fname = fpath + '.' + ut.get_timestamp() + '.bak'
    ut.write_to(text_backup_fname, text)
    ut.write_to(fpath, newtext)
Esempio n. 17
0
def change_doctestcommand_to_use_dashm_flag():
    r"""
    VimRegex: # note sure how to execute replace command in vim in one lin
    %s/python\s*\([A-Za-z_]+[\\/]\S*\)\.py\(.*\)/python -m \1 \2

    """
    # http://stackoverflow.com/questions/18737863/passing-a-function-to-re-sub-in-python
    # CANNOT USE [^ ] FOR SOME GOD DAMN REASON USE /S instead
    regex_list = ['python [A-Za-z_]+[\\/]\S* --allexamples']
    dpath_list = [
        ut.ensure_crossplat_path(ut.truepath('~/code/utool/utool')),
        ut.ensure_crossplat_path(ut.truepath('~/code/ibeis/ibeis')),
        ut.ensure_crossplat_path(ut.truepath('~/code/vtool/vtool')),
        ut.ensure_crossplat_path(ut.truepath('~/code/plottool/plottool')),
        ut.ensure_crossplat_path(ut.truepath('~/code/guitool/guitool')),
    ]
    #ut.named_field_repl(['python ', ('modrelpath',),])
    #['python ', ('modrelpath', 'utool[\\/].*'), '--allexamples'])
    res = ut.grep(regex_list, recursive=True, dpath_list=dpath_list, verbose=True)
    found_filestr_list, found_lines_list, found_lxs_list = res
    fpath = res[0][0]

    import re
    keypat_list = [
        ('prefix', 'python\s*'),
        ('modrelpath', '[A-Za-z_]+[\\/]\S*'),
        ('suffix', '.*'),
    ]
    namedregex = ut.named_field_regex(keypat_list)

    # Define function to pass to re.sub
    def replmodpath(matchobj):
        groupdict_ = matchobj.groupdict()
        relpath = groupdict_['modrelpath']
        prefix = groupdict_['prefix']
        suffix = groupdict_['suffix']
        modname = relpath
        modname = modname.replace('\\', '.')
        modname = modname.replace('/', '.')
        modname = modname.replace('.py', '')
        return prefix + '-m ' + modname + suffix

    for fpath in found_filestr_list:
        text = ut.read_from(fpath)
        #matchobj = re.search(namedregex, text, flags=re.MULTILINE)
        #print(text)
        #for matchobj in re.finditer(namedregex, text):
        #    print(ut.get_match_text(matchobj))
        #    print('--')
        newtext = re.sub(namedregex, replmodpath, text)
        # Perform replacement
        ut.write_to(fpath, newtext)
Esempio n. 18
0
def inject_python_code(fpath,
                       patch_code,
                       tag=None,
                       inject_location='after_imports'):
    """
    DEPRICATE
    puts code into files on disk
    """
    import utool as ut
    assert tag is not None, 'TAG MUST BE SPECIFIED IN INJECTED CODETEXT'
    text = ut.read_from(fpath)
    comment_start_tag = '# <util_inject:%s>' % tag
    comment_end_tag = '# </util_inject:%s>' % tag

    tagstart_txtpos = text.find(comment_start_tag)
    tagend_txtpos = text.find(comment_end_tag)

    text_lines = ut.split_python_text_into_lines(text)

    # split the file into two parts and inject code between them
    if tagstart_txtpos != -1 or tagend_txtpos != -1:
        assert tagstart_txtpos != -1, 'both tags must not be found'
        assert tagend_txtpos != -1, 'both tags must not be found'

        for pos, line in enumerate(text_lines):
            if line.startswith(comment_start_tag):
                tagstart_pos = pos
            if line.startswith(comment_end_tag):
                tagend_pos = pos
        part1 = text_lines[0:tagstart_pos]
        part2 = text_lines[tagend_pos + 1:]
    else:
        if inject_location == 'after_imports':
            first_nonimport_pos = 0
            for line in text_lines:
                list_ = ['import ', 'from ', '#', ' ']
                isvalid = (len(line) == 0
                           or any(line.startswith(str_) for str_ in list_))
                if not isvalid:
                    break
                first_nonimport_pos += 1
            part1 = text_lines[0:first_nonimport_pos]
            part2 = text_lines[first_nonimport_pos:]
        else:
            raise AssertionError('Unknown inject location')

    newtext = ('\n'.join(part1 + [comment_start_tag]) + '\n' + patch_code +
               '\n' + '\n'.join([comment_end_tag] + part2))
    text_backup_fname = fpath + '.' + ut.get_timestamp() + '.bak'
    ut.write_to(text_backup_fname, text)
    ut.write_to(fpath, newtext)
Esempio n. 19
0
def fix_section_common_errors(tex_fpath, dryrun=True):
    # Read in text and ensure ascii format
    text = ut.read_from(tex_fpath)

    new_text = text
    # Fix all capitals
    search_repl_list = constants_tex_fixes.CAPITAL_LIST
    for repl in search_repl_list:
        pattern = ut.regex_word(re.escape(repl))
        new_text = re.sub(pattern, repl, new_text, flags=re.IGNORECASE)
    #new_text = re.sub(pattern, fix_capitalization, text, flags=re.MULTILINE)

    if not dryrun:
        ut.write_to(tex_fpath, new_text)
    else:
        ut.print_difftext(ut.get_textdiff(text, new_text, 0))
Esempio n. 20
0
def dump_autogen_code(fpath, autogen_text, codetype='python', fullprint=None):
    """
    Helper that write a file if -w is given on command line, otherwise
    it just prints it out. It has the opption of comparing a diff to the file.
    """
    import utool as ut
    dowrite = ut.get_argflag(('-w', '--write'))
    show_diff = ut.get_argflag('--diff')
    num_context_lines = ut.get_argval('--diff', type_=int, default=None)
    show_diff = show_diff or num_context_lines is not None

    num_context_lines = ut.get_argval('--diff', type_=int, default=None)

    if fullprint is None:
        fullprint = True

    if fullprint is False:
        fullprint = ut.get_argflag('--print')

    print('[autogen] Autogenerated %s...\n+---\n' % (fpath,))
    if not dowrite:
        if fullprint:
            ut.print_code(autogen_text, lexer_name=codetype)
            print('\nL___')
        else:
            print('specify --print to write to stdout')
            pass
        print('specify -w to write, or --diff to compare')
        print('...would write to: %s' % fpath)
    if show_diff:
        if ut.checkpath(fpath, verbose=True):
            prev_text = ut.read_from(fpath)
            textdiff = ut.get_textdiff(prev_text, autogen_text,
                                       num_context_lines=num_context_lines)
            try:
                ut.print_difftext(textdiff)
            except UnicodeDecodeError:
                import unicodedata
                textdiff = unicodedata.normalize('NFKD', textdiff).encode('ascii', 'ignore')
                ut.print_difftext(textdiff)

        if dowrite:
            print('WARNING: Not writing. Remove --diff from command line')
    elif dowrite:
        ut.write_to(fpath, autogen_text)
Esempio n. 21
0
def update_wildbook_ia_config(ibs, wildbook_tomcat_path, dryrun=False):
    """
    #if use_config_file and wildbook_tomcat_path:
    #    # Update the Wildbook configuration to see *THIS* ibeis database
    #    with lockfile.LockFile(lock_fpath):
    #        update_wildbook_ia_config(ibs, wildbook_tomcat_path, dryrun)
    """
    wildbook_properteis_dpath = join(wildbook_tomcat_path,
                                     'WEB-INF/classes/bundles/')
    print('[ibs.update_wildbook_ia_config()] Wildbook properties=%r' % (
        wildbook_properteis_dpath, ))
    # The src file is non-standard. It should be remove here as well
    wildbook_config_fpath_dst = join(wildbook_properteis_dpath,
                                     'commonConfiguration.properties')
    ut.assert_exists(wildbook_properteis_dpath)
    # for come reason the .default file is not there, that should be ok though
    orig_content = ut.read_from(wildbook_config_fpath_dst)
    content = orig_content
    # Make sure wildbook knows where to find us
    if False:
        # Old way of telling WB where to find IA
        content = re.sub('IBEIS_DB_path = .*',
                         'IBEIS_DB_path = ' + ibs.get_db_core_path(), content)
        content = re.sub('IBEIS_image_path = .*',
                         'IBEIS_image_path = ' + ibs.get_imgdir(), content)

    ia_hostport = 'http://localhost:5000'
    ia_rest_prefix = ut.named_field('prefix', 'IBEISIARestUrl.*')
    host_port = ut.named_field('host_port', 'http://.*?:[0-9]+')
    content = re.sub(ia_rest_prefix + host_port, ut.bref_field('prefix') + ia_hostport, content)

    # Write to the configuration if it is different
    if orig_content != content:
        need_sudo = not ut.is_file_writable(wildbook_config_fpath_dst)
        if need_sudo:
            quoted_content = '"%s"' % (content, )
            print('Attempting to gain sudo access to update wildbook config')
            command = ['sudo', 'sh', '-c', '\'', 'echo',
                       quoted_content, '>', wildbook_config_fpath_dst, '\'']
            # ut.cmd(command, sudo=True)
            command = ' '.join(command)
            if not dryrun:
                os.system(command)
        else:
            ut.write_to(wildbook_config_fpath_dst, content)
Esempio n. 22
0
def translate_fpath(py_fpath):
    """ creates a cython pyx file from a python file with cyth tags
    >>> from cyth.cyth_script import *  # NOQA
    >>> py_fpath = utool.unixpath('~/code/vtool/vtool/linalg.py')
    """
    # If -a is given, generate cython html for each pyx file
    # Get cython pyx and benchmark output path
    cy_pyxpath = cyth_helpers.get_cyth_path(py_fpath)
    cy_pxdpath = cyth_helpers.get_cyth_pxd_path(py_fpath)
    cy_benchpath = cyth_helpers.get_cyth_bench_path(py_fpath)
    # Infer the python module name
    py_modname = cyth_helpers.get_py_module_name(py_fpath)
    # Read the python file
    py_text = utool.read_from(py_fpath, verbose=False)
    # dont parse files without tags
    if py_text.find('CYTH') == -1:
        return None
    print('\n___________________')
    print('[cyth.translate_fpath] py_fpath=%r' % py_fpath)
    # Parse the python file
    visitor = cyth_parser.CythVisitor(py_modname=py_modname)
    visitor.visit(ast.parse(py_text))
    # Get the generated pyx file and benchmark file
    pyx_text, pxd_text = visitor.get_result()
    bench_text = visitor.get_benchmarks()
    # Write pyx and benchmark
    utool.write_to(cy_pyxpath, pyx_text)
    utool.write_to(cy_pxdpath, pxd_text, verbose=False)
    utool.write_to(cy_benchpath, bench_text, verbose=False)
    if CYTHON_HTML:
        print('[cyth.translate_fpath] generating annotation html')
        cython_exe = utool.get_cython_exe()
        os.system(cython_exe + ' -a ' + cy_pyxpath)
    if CYTHON_MAKE_C:
        print('[cyth.translate_fpath] generating cython c')
        cython_exe = utool.get_cython_exe()
        os.system(cython_exe + ' ' + cy_pyxpath)
    if CYTHON_BUILD:
        gcc_exe = 'gcc'
        print('[cyth.translate_fpath] generating c library')
        c_path = cyth_helpers.get_c_path(cy_pyxpath)
        #C:\MinGW\bin\gcc.exe -w -Wall -m32 -lpython27 -IC:\Python27\Lib\site-packages\numpy\core\include -IC:\Python27\include -IC:\Python27\PC -IC:\Python27\Lib\site-packages\numpy\core\include -LC:\Python27\libs -o _linalg_cyth.pyd -c _linalg_cyth.c
        os.system(gcc_exe + ' ' + c_path)
    return cy_benchpath
Esempio n. 23
0
def translate_fpath(py_fpath):
    """ creates a cython pyx file from a python file with cyth tags
    >>> from cyth.cyth_script import *  # NOQA
    >>> py_fpath = utool.unixpath('~/code/vtool/vtool/linalg.py')
    """
    # If -a is given, generate cython html for each pyx file
    # Get cython pyx and benchmark output path
    cy_pyxpath = cyth_helpers.get_cyth_path(py_fpath)
    cy_pxdpath = cyth_helpers.get_cyth_pxd_path(py_fpath)
    cy_benchpath = cyth_helpers.get_cyth_bench_path(py_fpath)
    # Infer the python module name
    py_modname = cyth_helpers.get_py_module_name(py_fpath)
    # Read the python file
    py_text = utool.read_from(py_fpath, verbose=False)
    # dont parse files without tags
    if py_text.find('CYTH') == -1:
        return None
    print('\n___________________')
    print('[cyth.translate_fpath] py_fpath=%r' % py_fpath)
    # Parse the python file
    visitor = cyth_parser.CythVisitor(py_modname=py_modname)
    visitor.visit(ast.parse(py_text))
    # Get the generated pyx file and benchmark file
    pyx_text, pxd_text = visitor.get_result()
    bench_text = visitor.get_benchmarks()
    # Write pyx and benchmark
    utool.write_to(cy_pyxpath, pyx_text)
    utool.write_to(cy_pxdpath, pxd_text, verbose=False)
    utool.write_to(cy_benchpath, bench_text, verbose=False)
    if CYTHON_HTML:
        print('[cyth.translate_fpath] generating annotation html')
        cython_exe = utool.get_cython_exe()
        os.system(cython_exe + ' -a ' + cy_pyxpath)
    if CYTHON_MAKE_C:
        print('[cyth.translate_fpath] generating cython c')
        cython_exe = utool.get_cython_exe()
        os.system(cython_exe + ' ' + cy_pyxpath)
    if CYTHON_BUILD:
        gcc_exe = 'gcc'
        print('[cyth.translate_fpath] generating c library')
        c_path = cyth_helpers.get_c_path(cy_pyxpath)
        #C:\MinGW\bin\gcc.exe -w -Wall -m32 -lpython27 -IC:\Python27\Lib\site-packages\numpy\core\include -IC:\Python27\include -IC:\Python27\PC -IC:\Python27\Lib\site-packages\numpy\core\include -LC:\Python27\libs -o _linalg_cyth.pyd -c _linalg_cyth.c
        os.system(gcc_exe + ' ' + c_path)
    return cy_benchpath
Esempio n. 24
0
def find_used_citations(tex_fpath_list, return_inverse=False):
    """
    fpaths = get_thesis_tex_fpaths()
    """
    citekey_list = []
    inverse = ut.ddict(list)
    for tex_fpath in tex_fpath_list:
        text = ut.read_from(tex_fpath)
        #print('\n\n+-----')
        local_cites = find_citations(text)
        citekey_list.extend(local_cites)
        for key in local_cites:
            inverse[key].append(tex_fpath)

    citekey_list = sorted(set(citekey_list))
    if return_inverse:
        return citekey_list, inverse
    else:
        return citekey_list
Esempio n. 25
0
def print_system_users():
    r"""

    prints users on the system

    On unix looks for /bin/bash users in /etc/passwd

    CommandLine:
        python -m utool.util_cplat --test-print_system_users

    Example:
        >>> # SCRIPT
        >>> from utool.util_cplat import *  # NOQA
        >>> result = print_system_users()
        >>> print(result)
    """
    import utool as ut
    text = ut.read_from('/etc/passwd')
    userinfo_text_list = text.splitlines()
    userinfo_list = [uitext.split(':') for uitext in userinfo_text_list]
    #print(ut.list_str(sorted(userinfo_list)))
    bash_users = [tup for tup in userinfo_list if tup[-1] == '/bin/bash']
    print(ut.list_str(sorted(bash_users)))
Esempio n. 26
0
def print_system_users():
    r"""

    prints users on the system

    On unix looks for /bin/bash users in /etc/passwd

    CommandLine:
        python -m utool.util_cplat --test-print_system_users

    Example:
        >>> # SCRIPT
        >>> from utool.util_cplat import *  # NOQA
        >>> result = print_system_users()
        >>> print(result)
    """
    import utool as ut
    text = ut.read_from('/etc/passwd')
    userinfo_text_list = text.splitlines()
    userinfo_list = [uitext.split(':') for uitext in userinfo_text_list]
    #print(ut.repr4(sorted(userinfo_list)))
    bash_users = [tup for tup in userinfo_list if tup[-1] == '/bin/bash']
    print(ut.repr4(sorted(bash_users)))
Esempio n. 27
0
def ensure_explicit_namespace(fpath, namespace, varname_list):
    import re
    import utool as ut

    text = ut.read_from(fpath)
    orig_text = text
    new_text = text

    for varname in varname_list:
        regex = ''.join((
            ut.named_field('prefix', '[^.]'),
            ut.named_field('var', ut.whole_word(varname)),
        ))
        repl = ''.join(
            (ut.bref_field('prefix'), namespace, '.', ut.bref_field('var')))

        new_text = re.sub(regex, repl, new_text)

    textdiff = ut.get_textdiff(orig_text, new_text)
    print(textdiff)
    if ut.user_cmdline_prompt('Does the text look good?'):
        # if diff looks good write
        ut.write_to(fpath, new_text)
Esempio n. 28
0
def send_public_key_to_server(username, server):
    """
    Can just use this instead

    ssh-copy-id id@server

    ssh-copy-id [email protected]
    ssh-copy-id [email protected]
    ssh-copy-id [email protected]

    ut.copy_text_to_clipboard(remote_cmdstr)

    chmod 700 ~git/.ssh
    chmod 600 ~git/.ssh/authorized_keys

    """

    public_key = ut.read_from(ut.truepath('~/.ssh/id_rsa.pub'))
    fmtstr = 'ssh {user}@{server} "{remote_cmdstr}"'
    remote_cmdstr = 'echo {public_key} >> ~{username}/.ssh/authorized_keys'.format(public_key=public_key.replace(
        '\\', '\\\\'), username=username)
    sshcmdstr = fmtstr.format(server=server, remote_cmdstr=remote_cmdstr)
    ut.copy_text_to_clipboard(sshcmdstr)
    print('You need to run the command in your clipboard')
Esempio n. 29
0
def send_public_key_to_server(username, server):
    """
    Can just use this instead

    ssh-copy-id id@server

    ssh-copy-id [email protected]
    ssh-copy-id [email protected]
    ssh-copy-id [email protected]

    ut.copy_text_to_clipboard(remote_cmdstr)

    chmod 700 ~git/.ssh
    chmod 600 ~git/.ssh/authorized_keys

    """

    public_key = ut.read_from(ut.truepath('~/.ssh/id_rsa.pub'))
    fmtstr = 'ssh {user}@{server} "{remote_cmdstr}"'
    remote_cmdstr = 'echo {public_key} >> ~{username}/.ssh/authorized_keys'.format(
        public_key=public_key.replace('\\', '\\\\'), username=username)
    sshcmdstr = fmtstr.format(server=server, remote_cmdstr=remote_cmdstr)
    ut.copy_text_to_clipboard(sshcmdstr)
    print('You need to run the command in your clipboard')
Esempio n. 30
0
def fix_section_title_capitalization(tex_fpath, dryrun=True):
    # Read in text and ensure ascii format
    text = ut.read_from(tex_fpath)

    section_type_list = [
        'chapter',
        'section',
        'subsection',
        'subsubsection',
        'paragraph',
    ]
    re_section_type = ut.named_field('section_type',
                                     ut.regex_or(section_type_list))
    re_section_title = ut.named_field('section_title', '[^}]*')

    re_spaces = ut.named_field('spaces', '^ *')

    pattern = re_spaces + re.escape(
        '\\') + re_section_type + '{' + re_section_title + '}'

    def fix_capitalization(match):
        dict_ = match.groupdict()
        section_title = dict_['section_title']
        #if section_title == 'The Great Zebra Count':
        #    return match.string[slice(*match.span())]
        #    #return 'The Great Zebra Count'
        # general logic
        #words = section_title.split(' ')
        tokens = re.split(ut.regex_or([' ', '/']), section_title)
        #if 'Coverage' in section_title:
        #    ut.embed()
        #    pass
        #words = [word if count == 0 else word.lower() for count, word in enumerate(words)]
        #new_section_title = ' '.join(words)
        tokens = [
            t if count == 0 else t.lower() for count, t in enumerate(tokens)
        ]
        new_section_title = ''.join(tokens)

        # hacks for caps of expanded titles
        search_repl_list = constants_tex_fixes.CAPITAL_TITLE_LIST
        for repl in search_repl_list:
            new_section_title = re.sub(re.escape(repl),
                                       repl,
                                       new_section_title,
                                       flags=re.IGNORECASE)
        # hacks fo acronyms
        for full, acro in constants_tex_fixes.ACRONYMN_LIST:
            new_section_title = re.sub(r'\b' + re.escape(acro) + r'\b',
                                       acro,
                                       new_section_title,
                                       flags=re.IGNORECASE)

        #'the great zebra and giraffe count'

        #new_section_title = section_title.lower()
        new_text = dict_['spaces'] + '\\' + dict_[
            'section_type'] + '{' + new_section_title + '}'
        VERBOSE = 0
        if VERBOSE:
            old_text = match.string[slice(*match.span())]
            if new_text != old_text:
                print(ut.dict_str(dict_))
                print('--- REPL ---')
                print(old_text)
                print(new_text)
        return new_text

    #for match in re.finditer(pattern, text, flags=re.MULTILINE):
    #    fix_capitalization(match)

    new_text = re.sub(pattern, fix_capitalization, text, flags=re.MULTILINE)

    if not dryrun:
        ut.write_to(tex_fpath, new_text)
    else:
        ut.print_difftext(ut.get_textdiff(text, new_text, 0))
Esempio n. 31
0
def autogenerate_nth_schema_version(schema_spec, n=-1):
    r"""
    dumps, prints, or diffs autogen schema based on command line

    Args:
        n (int):

    CommandLine:
        python -m ibeis.control._sql_helpers --test-autogenerate_nth_schema_version

    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis.control._sql_helpers import *  # NOQA
        >>> from ibeis.control import DB_SCHEMA
        >>> # build test data
        >>> schema_spec = DB_SCHEMA
        >>> n = 1
        >>> # execute function
        >>> tablename = autogenerate_nth_schema_version(schema_spec, n)
        >>> # verify results
        >>> result = str(tablename)
        >>> print(result)
    """
    import utool as ut
    print('[_SQL] AUTOGENERATING CURRENT SCHEMA')
    db = get_nth_test_schema_version(schema_spec, n=n)
    # Auto-generate the version skip schema file
    schema_spec_dir, schema_spec_fname = split(schema_spec.__file__)
    schema_spec_fname = splitext(schema_spec_fname)[0]
    # HACK TO GET AUTOGEN COMMAND
    # FIXME: Make this autogen command a bit more sane and not completely
    # coupled with ibeis
    autogen_cmd = ut.codeblock(
        '''
        python -m ibeis.control.{schema_spec_fname} --test-autogen_{funcname} --force-incremental-db-update --write
        python -m ibeis.control.{schema_spec_fname} --test-autogen_{funcname} --force-incremental-db-update --diff=1
        python -m ibeis.control.{schema_spec_fname} --test-autogen_{funcname} --force-incremental-db-update
        '''
    ).format(schema_spec_fname=schema_spec_fname, funcname=schema_spec_fname.lower())
    autogen_text = db.get_schema_current_autogeneration_str(autogen_cmd)

    autogen_fname = '%s_CURRENT.py' % schema_spec_fname
    autogen_fpath = join(schema_spec_dir, autogen_fname)

    dowrite = ut.get_argflag(('-w', '--write', '--dump-autogen-schema'))
    show_diff = ut.get_argflag('--diff')
    num_context_lines = ut.get_argval('--diff', type_=int, default=None)
    show_diff = show_diff or num_context_lines is not None
    dowrite = dowrite and not show_diff

    if dowrite:
        ut.write_to(autogen_fpath, autogen_text)
    else:
        if show_diff:
            if ut.checkpath(autogen_fpath, verbose=True):
                prev_text = ut.read_from(autogen_fpath)
                textdiff = ut.util_str.get_textdiff(prev_text, autogen_text, num_context_lines=num_context_lines)
                ut.print_difftext(textdiff)
        else:
            ut.util_print.print_python_code(autogen_text)
        print('\nL___\n...would write to: %s' % autogen_fpath)

    print(' Run with -n=%r to get a specific schema version by index. -1 == latest')
    print(' Run with --write to autogenerate latest schema version')
    print(' Run with --diff or --diff=<numcontextlines> to see the difference between current and requested')
    return db
Esempio n. 32
0
 def get_file_stats(fpath):
     text = utool.read_from(fpath, verbose=False)
     lc = len(text.splitlines())
     wc = len(text.split(' '))
     return lc, wc
Esempio n. 33
0
def ingest_oxford_style_db(dbdir, dryrun=False):
    """
    Ingest either oxford or paris

    Args:
        dbdir (str):

    CommandLine:
        python -m ibeis.dbio.ingest_database --exec-ingest_oxford_style_db --show

    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis.dbio.ingest_database import *  # NOQA
        >>> dbdir = '/raid/work/Oxford'
        >>> dryrun = True
        >>> ingest_oxford_style_db(dbdir)
        >>> ut.quit_if_noshow()
        >>> import plottool as pt
        >>> ut.show_if_requested()

    Ignore:
        >>> from ibeis.dbio.ingest_database import *  # NOQA
        >>> import ibeis
        >>> dbdir = '/raid/work/Oxford'
        >>> dbdir = '/raid/work/Paris'
        >>>
        #>>> ibeis.dbio.convert_db.ingest_oxford_style_db(dbdir)
    """
    from PIL import Image
    print('Loading Oxford Style Images from: ' + dbdir)

    def _parse_oxsty_gtfname(gt_fname):
        """ parse gtfname for: (gt_name, quality_lbl, num) """
        # num is an id, not a number of annots
        gt_format = '{}_{:d}_{:D}.txt'
        name, num, quality = parse.parse(gt_format, gt_fname)
        return (name, num, quality)

    def _read_oxsty_gtfile(gt_fpath, name, quality, img_dpath, ignore_list):
        oxsty_annot_info_list = []
        # read the individual ground truth file
        with open(gt_fpath, 'r') as file:
            line_list = file.read().splitlines()
            for line in line_list:
                if line == '':
                    continue
                fields = line.split(' ')
                gname = fields[0].replace('oxc1_', '') + '.jpg'
                # >:( Because PARIS just cant keep paths consistent
                if gname.find('paris_') >= 0:
                    paris_hack = gname[6:gname.rfind('_')]
                    gname = join(paris_hack, gname)
                if gname in ignore_list:
                    continue
                if len(fields) > 1:  # if has bbox
                    bbox =  [int(round(float(x))) for x in fields[1:]]
                else:
                    # Get annotation width / height
                    gpath = join(img_dpath, gname)
                    (w, h) = Image.open(gpath).size
                    bbox = [0, 0, w, h]
                oxsty_annot_info = (gname, bbox)
                oxsty_annot_info_list.append(oxsty_annot_info)
        return oxsty_annot_info_list

    gt_dpath = ut.existing_subpath(dbdir,
                                      ['oxford_style_gt',
                                       'gt_files_170407',
                                       'oxford_groundtruth'])

    img_dpath = ut.existing_subpath(dbdir,
                                       ['oxbuild_images',
                                        'images'])

    corrupted_file_fpath = join(gt_dpath, 'corrupted_files.txt')
    ignore_list = []
    # Check for corrupted files (Looking at your Paris Buildings Dataset)
    if ut.checkpath(corrupted_file_fpath):
        ignore_list = ut.read_from(corrupted_file_fpath).splitlines()

    gname_list = ut.list_images(img_dpath, ignore_list=ignore_list,
                                   recursive=True, full=False)

    # just in case utool broke
    for ignore in ignore_list:
        assert ignore not in gname_list

    # Read the Oxford Style Groundtruth files
    print('Loading Oxford Style Names and Annots')
    gt_fname_list = os.listdir(gt_dpath)
    num_gt_files = len(gt_fname_list)
    query_annots  = []
    gname2_annots_raw = ut.ddict(list)
    name_set = set([])
    print(' * num_gt_files = %d ' % num_gt_files)
    #
    # Iterate over each groundtruth file
    for gtx, gt_fname in enumerate(ut.ProgIter(gt_fname_list,
                                               'parsed oxsty gtfile: ')):
        if gt_fname == 'corrupted_files.txt':
            continue
        #Get name, quality, and num from fname
        (name, num, quality) = _parse_oxsty_gtfname(gt_fname)
        gt_fpath = join(gt_dpath, gt_fname)
        name_set.add(name)
        oxsty_annot_info_sublist = _read_oxsty_gtfile(
            gt_fpath, name, quality, img_dpath, ignore_list)
        if quality == 'query':
            for (gname, bbox) in oxsty_annot_info_sublist:
                query_annots.append((gname, bbox, name, num))
        else:
            for (gname, bbox) in oxsty_annot_info_sublist:
                gname2_annots_raw[gname].append((name, bbox, quality))
    print(' * num_query images = %d ' % len(query_annots))
    #
    # Remove duplicates img.jpg : (*1.txt, *2.txt, ...) -> (*.txt)
    gname2_annots     = ut.ddict(list)
    multinamed_gname_list = []
    for gname, val in gname2_annots_raw.iteritems():
        val_repr = list(map(repr, val))
        unique_reprs = set(val_repr)
        unique_indexes = [val_repr.index(urep) for urep in unique_reprs]
        for ux in unique_indexes:
            gname2_annots[gname].append(val[ux])
        if len(gname2_annots[gname]) > 1:
            multinamed_gname_list.append(gname)
    # print some statistics
    query_gname_list = [tup[0] for tup in query_annots]
    gname_with_groundtruth_list = gname2_annots.keys()
    gname_with_groundtruth_set = set(gname_with_groundtruth_list)
    gname_set = set(gname_list)
    query_gname_set = set(query_gname_list)
    gname_without_groundtruth_list = list(gname_set - gname_with_groundtruth_set)
    print(' * num_images = %d ' % len(gname_list))
    print(' * images with groundtruth    = %d ' % len(gname_with_groundtruth_list))
    print(' * images without groundtruth = %d ' % len(gname_without_groundtruth_list))
    print(' * images with multi-groundtruth = %d ' % len(multinamed_gname_list))
    #make sure all queries have ground truth and there are no duplicate queries
    #
    assert len(query_gname_list) == len(query_gname_set.intersection(gname_with_groundtruth_list))
    assert len(query_gname_list) == len(set(query_gname_list))
    #=======================================================
    # Build IBEIS database

    if not dryrun:
        ibs = ibeis.opendb(dbdir, allow_newdir=True)
        ibs.cfg.other_cfg.auto_localize = False
        print('adding to table: ')
        # Add images to ibeis
        gpath_list = [join(img_dpath, gname).replace('\\', '/') for gname in gname_list]
        gid_list = ibs.add_images(gpath_list)

        # 1) Add Query Annotations
        qgname_list, qbbox_list, qname_list, qid_list = zip(*query_annots)
        # get image ids of queries
        qgid_list = [gid_list[gname_list.index(gname)] for gname in qgname_list]
        qnote_list = ['query'] * len(qgid_list)
        # 2) Add nonquery database annots
        dgname_list = list(gname2_annots.keys())  # NOQA
        dgid_list = []
        dname_list = []
        dbbox_list = []
        dnote_list = []
        for gname in gname2_annots.keys():
            gid = gid_list[gname_list.index(gname)]
            annots = gname2_annots[gname]
            for name, bbox, quality in annots:
                dgid_list.append(gid)
                dbbox_list.append(bbox)
                dname_list.append(name)
                dnote_list.append(quality)
        # 3) Add distractors: TODO: 100k
        ugid_list = [gid_list[gname_list.index(gname)]
                     for gname in gname_without_groundtruth_list]
        ubbox_list = [[0, 0, w, h] for (w, h) in ibs.get_image_sizes(ugid_list)]
        unote_list = ['distractor'] * len(ugid_list)

        # TODO Annotation consistency in terms of duplicate bounding boxes
        qaid_list = ibs.add_annots(qgid_list, bbox_list=qbbox_list,
                                   name_list=qname_list, notes_list=qnote_list)
        daid_list = ibs.add_annots(dgid_list, bbox_list=dbbox_list,
                                   name_list=dname_list, notes_list=dnote_list)
        uaid_list = ibs.add_annots(ugid_list, bbox_list=ubbox_list, notes_list=unote_list)
        print('Added %d query annototations' % len(qaid_list))
        print('Added %d database annototations' % len(daid_list))
        print('Added %d distractor annototations' % len(uaid_list))

    update = False
    if update:
        # TODO: integrate this into normal ingest pipeline
        'Oxford'
        ibs = ibeis.opendb(dbdir)
        aid_list = ibs.get_valid_aids()
        notes_list = ibs.get_annot_notes(aid_list)
        _dict = {
            'ok': ibs.const.QUAL_OK,
            'good': ibs.const.QUAL_GOOD,
            'junk': ibs.const.QUAL_JUNK,
            #'distractor': ibs.const.QUAL_JUNK
        }
        qual_text_list = [_dict.get(note, ibs.const.QUAL_UNKNOWN) for note in notes_list]
        ibs.set_annot_quality_texts(aid_list, qual_text_list)
        ibs._overwrite_all_annot_species_to('building')

        tags_list = [[note] if note in ['query', 'distractor'] else [] for note in notes_list]
        from ibeis import tag_funcs
        tag_funcs.append_annot_case_tags(ibs, aid_list, tags_list)
        #ibs._set
        # tags_ = ibs.get_annot_case_tags(aid_list)
        # pass
        """
Esempio n. 34
0
print('---------------------')
print('Cyth Visit Simple')

visitor = CythVisitor()
visitor.visit(pt)
print('---------------------')
print(visitor.get_result())


# More complicated test
import vtool
import utool
from os.path import join
py_fpath = join(vtool.__path__[0], 'keypoint.py')
py_text = utool.read_from(py_fpath)

parse_tree = ast.parse(py_text)
print(py_fpath)

print('---------------------')
#print('Abstract Syntax Tree 2:')
#print(ast.dump(parse_tree))

print('---------------------')
print('Cyth Visit Complex 2')
visitor2 = CythVisitor()
visitor2.visit(parse_tree)
print('---------------------')
print(visitor2.get_result())
print('---------------------')
Esempio n. 35
0
def sort_module_functions():
    from os.path import dirname, join
    import utool as ut
    import ibeis.control
    import re
    #import re
    #regex = r'[^@]*\ndef'
    modfpath = dirname(ibeis.control.__file__)
    fpath = join(modfpath, 'manual_annot_funcs.py')
    #fpath = join(modfpath, 'manual_dependant_funcs.py')
    #fpath = join(modfpath, 'manual_lblannot_funcs.py')
    #fpath = join(modfpath, 'manual_name_species_funcs.py')
    text = ut.read_from(fpath, verbose=False)
    lines =  text.splitlines()
    indent_list = [ut.get_indentation(line) for line in lines]
    isfunc_list = [line.startswith('def ') for line in lines]
    isblank_list = [len(line.strip(' ')) == 0 for line in lines]
    isdec_list = [line.startswith('@') for line in lines]

    tmp = ['def' if isfunc else indent for isfunc, indent in  zip(isfunc_list, indent_list)]
    tmp = ['b' if isblank else t for isblank, t in  zip(isblank_list, tmp)]
    tmp = ['@' if isdec else t for isdec, t in  zip(isdec_list, tmp)]
    #print('\n'.join([str((t, count + 1)) for (count, t) in enumerate(tmp)]))
    block_list = re.split('\n\n\n', text, flags=re.MULTILINE)

    #for block in block_list:
    #    print('#====')
    #    print(block)

    isfunc_list = [re.search('^def ', block, re.MULTILINE) is not None for block in block_list]

    whole_varname = ut.whole_word(ut.REGEX_VARNAME)
    funcname_regex = r'def\s+' + ut.named_field('funcname', whole_varname)

    def findfuncname(block):
        match = re.search(funcname_regex, block)
        return match.group('funcname')

    funcnameblock_list = [findfuncname(block) if isfunc else None
                          for isfunc, block in zip(isfunc_list, block_list)]

    funcblock_list = ut.filter_items(block_list, isfunc_list)
    funcname_list = ut.filter_items(funcnameblock_list, isfunc_list)

    nonfunc_list = ut.filterfalse_items(block_list, isfunc_list)

    nonfunc_list = ut.filterfalse_items(block_list, isfunc_list)
    ismain_list = [re.search('^if __name__ == ["\']__main__["\']', nonfunc) is not None
                   for nonfunc in nonfunc_list]

    mainblock_list = ut.filter_items(nonfunc_list, ismain_list)
    nonfunc_list = ut.filterfalse_items(nonfunc_list, ismain_list)

    newtext_list = []

    for nonfunc in nonfunc_list:
        newtext_list.append(nonfunc)
        newtext_list.append('\n')

    #funcname_list
    for funcblock in ut.sortedby(funcblock_list, funcname_list):
        newtext_list.append(funcblock)
        newtext_list.append('\n')

    for mainblock in mainblock_list:
        newtext_list.append(mainblock)

    newtext = '\n'.join(newtext_list)
    print('newtext = %s' % (newtext,))
    print('len(newtext) = %r' % (len(newtext),))
    print('len(text) = %r' % (len(text),))

    backup_fpath = ut.augpath(fpath, augext='.bak', augdir='_backup', ensure=True)

    ut.write_to(backup_fpath, text)
    ut.write_to(fpath, newtext)
Esempio n. 36
0
def sort_module_functions():
    from os.path import dirname, join
    import utool as ut
    import ibeis.control
    import re
    #import re
    #regex = r'[^@]*\ndef'
    modfpath = dirname(ibeis.control.__file__)
    fpath = join(modfpath, 'manual_annot_funcs.py')
    #fpath = join(modfpath, 'manual_dependant_funcs.py')
    #fpath = join(modfpath, 'manual_lblannot_funcs.py')
    #fpath = join(modfpath, 'manual_name_species_funcs.py')
    text = ut.read_from(fpath, verbose=False)
    lines = text.splitlines()
    indent_list = [ut.get_indentation(line) for line in lines]
    isfunc_list = [line.startswith('def ') for line in lines]
    isblank_list = [len(line.strip(' ')) == 0 for line in lines]
    isdec_list = [line.startswith('@') for line in lines]

    tmp = [
        'def' if isfunc else indent
        for isfunc, indent in zip(isfunc_list, indent_list)
    ]
    tmp = ['b' if isblank else t for isblank, t in zip(isblank_list, tmp)]
    tmp = ['@' if isdec else t for isdec, t in zip(isdec_list, tmp)]
    #print('\n'.join([str((t, count + 1)) for (count, t) in enumerate(tmp)]))
    block_list = re.split('\n\n\n', text, flags=re.MULTILINE)

    #for block in block_list:
    #    print('#====')
    #    print(block)

    isfunc_list = [
        re.search('^def ', block, re.MULTILINE) is not None
        for block in block_list
    ]

    whole_varname = ut.whole_word(ut.REGEX_VARNAME)
    funcname_regex = r'def\s+' + ut.named_field('funcname', whole_varname)

    def findfuncname(block):
        match = re.search(funcname_regex, block)
        return match.group('funcname')

    funcnameblock_list = [
        findfuncname(block) if isfunc else None
        for isfunc, block in zip(isfunc_list, block_list)
    ]

    funcblock_list = ut.filter_items(block_list, isfunc_list)
    funcname_list = ut.filter_items(funcnameblock_list, isfunc_list)

    nonfunc_list = ut.filterfalse_items(block_list, isfunc_list)

    nonfunc_list = ut.filterfalse_items(block_list, isfunc_list)
    ismain_list = [
        re.search('^if __name__ == ["\']__main__["\']', nonfunc) is not None
        for nonfunc in nonfunc_list
    ]

    mainblock_list = ut.filter_items(nonfunc_list, ismain_list)
    nonfunc_list = ut.filterfalse_items(nonfunc_list, ismain_list)

    newtext_list = []

    for nonfunc in nonfunc_list:
        newtext_list.append(nonfunc)
        newtext_list.append('\n')

    #funcname_list
    for funcblock in ut.sortedby(funcblock_list, funcname_list):
        newtext_list.append(funcblock)
        newtext_list.append('\n')

    for mainblock in mainblock_list:
        newtext_list.append(mainblock)

    newtext = '\n'.join(newtext_list)
    print('newtext = %s' % (newtext, ))
    print('len(newtext) = %r' % (len(newtext), ))
    print('len(text) = %r' % (len(text), ))

    backup_fpath = ut.augpath(fpath,
                              augext='.bak',
                              augdir='_backup',
                              ensure=True)

    ut.write_to(backup_fpath, text)
    ut.write_to(fpath, newtext)
Esempio n. 37
0
def parse_latex_comments_for_commmands():
    r"""
    CommandLine:
        python -m ibeis.scripts.gen_cand_expts --exec-parse_latex_comments_for_commmands

    Example:
        >>> # SCRIPT
        >>> from ibeis.scripts.gen_cand_expts import *  # NOQA
        >>> parse_latex_comments_for_commmands()
    """
    fname = ut.get_argval('--fname', type_=str, default='figdefexpt.tex')
    text = ut.read_from(ut.truepath('~/latex/crall-candidacy-2015/' + fname))
    #text = ut.read_from(ut.truepath('~/latex/crall-candidacy-2015/figdefindiv.tex'))
    lines = text.split('\n')
    cmd_list = ['']
    in_comment = True
    for line in lines:
        if line.startswith('% ---'):
            # Keep separators
            toadd = line.replace('%', '#')
            if not (len(cmd_list) > 1 and cmd_list[-1].startswith('# ---')):
                cmd_list[-1] += (toadd)
            else:
                cmd_list.append(toadd)
            cmd_list.append('')

        if line.strip().startswith(r'\begin{comment}'):
            in_comment = True
            continue
        if in_comment:
            line = line.strip()
            if line == '' or line.startswith('#') or line.startswith('%'):
                in_comment = False
            else:
                cmd_list[-1] = cmd_list[-1] + line
                if not line.strip().endswith('\\'):
                    cmd_list[-1] = cmd_list[-1] + ' $@'
                    #cmd_list.append('')
                    #cmd_list.append('#--')
                    cmd_list.append('')
                    in_comment = False
                else:
                    cmd_list[-1] = cmd_list[-1] + '\n'

    cmd_list = [cmd.replace('--render', '').replace('--diskshow', '')
                for cmd in cmd_list]

    # formatting
    cmd_list2 = []
    for cmd in cmd_list:
        #cmd = cmd.replace(' -t ', ' \\\n    -t ')
        #cmd = cmd.replace('--db', '\\\n    --db')
        #cmd = cmd.replace('python -m ibeis.dev', './dev.py')
        cmd = cmd.replace('python -m ibeis.dev -e', 'ibeis -e')
        cmd_list2.append(cmd)
    cmd_list = cmd_list2

    print('cmd_list = %s' % (ut.repr2(cmd_list),))
    from os.path import splitext
    script_fname =  'regen_' + splitext(fname)[0] + '.sh'
    fname, script, line_list = write_script_lines(cmd_list, script_fname)
Esempio n. 38
0
def git_sequence_editor_squash(fpath):
    """
    squashes wip messages

    CommandLine:
        python -m utool.util_git --exec-git_sequence_editor_squash

    Example:
        >>> # SCRIPT
        >>> import utool as ut
        >>> from utool.util_git import *  # NOQA
        >>> fpath = ut.get_argval('--fpath', str, default=None)
        >>> git_sequence_editor_squash(fpath)

    Ignore:
        text = ut.codeblock(
            '''
            pick 852aa05 better doctest for tips
            pick 3c779b8 wip
            pick 02bc21d wip
            pick 1853828 Fixed root tablename
            pick 9d50233 doctest updates
            pick 66230a5 wip
            pick c612e98 wip
            pick b298598 Fixed tablename error
            pick 1120a87 wip
            pick f6c4838 wip
            pick 7f92575 wip
            ''')

    Ignore:
        def squash_consecutive_commits_with_same_message():
            # http://stackoverflow.com/questions/8226278/git-alias-to-squash-all-commits-with-a-particular-commit-message
            # Can do interactively with this. Can it be done automatically and pay attention to
            # Timestamps etc?
            git rebase --interactive HEAD~40 --autosquash
            git rebase --interactive $(git merge-base HEAD master) --autosquash

            # Lookbehind correct version
            %s/\([a-z]* [a-z0-9]* wip\n\)\@<=pick \([a-z0-9]*\) wip/squash \2 wip/gc

           # THE FULL NON-INTERACTIVE AUTOSQUASH SCRIPT
           # TODO: Dont squash if there is a one hour timedelta between commits

           GIT_EDITOR="cat $1" GIT_SEQUENCE_EDITOR="python -m utool.util_git --exec-git_sequence_editor_squash \
                   --fpath $1" git rebase -i $(git rev-list HEAD | tail -n 1) --autosquash --no-verify
           GIT_EDITOR="cat $1" GIT_SEQUENCE_EDITOR="python -m utool.util_git --exec-git_sequence_editor_squash \
                   --fpath $1" git rebase -i HEAD~10 --autosquash --no-verify

           GIT_EDITOR="cat $1" GIT_SEQUENCE_EDITOR="python -m utool.util_git --exec-git_sequence_editor_squash \
                   --fpath $1" git rebase -i $(git merge-base HEAD master) --autosquash --no-verify

           # 14d778fa30a93f85c61f34d09eddb6d2cafd11e2
           # c509a95d4468ebb61097bd9f4d302367424772a3
           # b0ffc26011e33378ee30730c5e0ef1994bfe1a90
           # GIT_SEQUENCE_EDITOR=<script> git rebase -i <params>
           # GIT_SEQUENCE_EDITOR="echo 'FOOBAR $1' " git rebase -i HEAD~40 --autosquash
           # git checkout master
           # git branch -D tmp
           # git checkout -b tmp
           # option to get the tail commit
           $(git rev-list HEAD | tail -n 1)
           # GIT_SEQUENCE_EDITOR="python -m utool.util_git --exec-git_sequence_editor_squash \
                   --fpath $1" git rebase -i HEAD~40 --autosquash
           # GIT_SEQUENCE_EDITOR="python -m utool.util_git --exec-git_sequence_editor_squash \
                   --fpath $1" git rebase -i HEAD~40 --autosquash --no-verify
           <params>
    """
    # print(sys.argv)
    import utool as ut

    text = ut.read_from(fpath)
    # print('fpath = %r' % (fpath,))
    print(text)
    # Doesnt work because of fixed witdth requirement
    # search = (ut.util_regex.positive_lookbehind('[a-z]* [a-z0-9]* wip\n') + 'pick ' +
    #           ut.reponamed_field('hash', '[a-z0-9]*') + ' wip')
    # repl = ('squash ' + ut.bref_field('hash') + ' wip')
    # import re
    # new_text = re.sub(search, repl, text, flags=re.MULTILINE)
    # print(new_text)
    prev_msg = None
    prev_dt = None
    new_lines = []

    def get_commit_date(hashid):
        out, err, ret = ut.cmd("git show -s --format=%ci " + hashid, verbose=False, quiet=True, pad_stdout=False)
        # from datetime import datetime
        from dateutil import parser

        # print('out = %r' % (out,))
        stamp = out.strip("\n")
        # print('stamp = %r' % (stamp,))
        dt = parser.parse(stamp)
        # dt = datetime.strptime(stamp, '%Y-%m-%d %H:%M:%S %Z')
        # print('dt = %r' % (dt,))
        return dt

    for line in text.split("\n"):
        commit_line = line.split(" ")
        if len(commit_line) < 3:
            prev_msg = None
            prev_dt = None
            new_lines += [line]
            continue
        action = commit_line[0]
        hashid = commit_line[1]
        msg = " ".join(commit_line[2:])
        try:
            dt = get_commit_date(hashid)
        except ValueError:
            prev_msg = None
            prev_dt = None
            new_lines += [line]
            continue
        orig_msg = msg
        can_squash = action == "pick" and msg == "wip" and prev_msg == "wip"
        if prev_dt is not None and prev_msg == "wip":
            tdelta = dt - prev_dt
            # Only squash closely consecutive commits
            threshold_minutes = 45
            td_min = tdelta.total_seconds() / 60.0
            # print(tdelta)
            can_squash &= td_min < threshold_minutes
            msg = msg + " -- tdelta=%r" % (ut.get_timedelta_str(tdelta),)
        if can_squash:
            new_line = " ".join(["squash", hashid, msg])
            new_lines += [new_line]
        else:
            new_lines += [line]
        prev_msg = orig_msg
        prev_dt = dt
    new_text = "\n".join(new_lines)

    def get_commit_date(hashid):
        out = ut.cmd("git show -s --format=%ci " + hashid, verbose=False)
        print("out = %r" % (out,))

    # print('Dry run')
    # ut.dump_autogen_code(fpath, new_text)
    print(new_text)
    ut.write_to(fpath, new_text, n=None)
Esempio n. 39
0
def fix_sentences():
    """
    fixtex --fixsent
    """
    text = ut.read_from('main.tex')
    root = latex_parser.LatexDocPart.parse_text(text, debug=None)
    document = root.find_descendant_type('document')
    chapters = list(document.find_descendant_types('chapter'))

    def separate_math(line):
        # Break line into math and english parts
        mathsep = ut.negative_lookbehind(re.escape('\\')) + re.escape('$')
        pos = [0]
        for count, match in enumerate(re.finditer(mathsep, line)):
            pos.append(match.start() if count % 2 == 0 else match.end())
        pos.append(len(line))
        english = []
        math = []
        for count, (l, r) in enumerate(ut.itertwo(pos)):
            if count % 2 == 0 and line[l:r]:
                english.append(line[l:r])
            else:
                math.append(line[l:r])
        return english, math

    def print_acronymn_def(english_line):
        words = re.split('[~\s]', english_line.rstrip('.'))
        words = [w.rstrip(',').rstrip('.') for w in words]
        flag = 0
        for count, word in enumerate(words):
            if re.match('\\([A-Z]+\\)', word):
                ut.cprint(word, 'blue')
                flag = True
        if flag:
            print(re.sub('\\\\cite{[^}]*}', '', line))

    def has_consec_cap_words(words):
        for count, (u, v) in enumerate(ut.itertwo(words)):
            if u[0].isupper() and v[0].isupper():
                if count > 0:
                    return True

    def gen_sentences():
        for chapter in chapters:
            # ut.cprint(chapter.fpath_root(), 'yellow')
            for line in chapter.find_sentences():
                context = {'chapter': chapter}
                yield line, context

    import re
    found = ut.ddict(list)
    for line, context in gen_sentences():
        english, math = separate_math(line)
        english_line = ' '.join(english).replace(',',
                                                 '').rstrip('.').strip(' ')
        words = re.split('[~\s]+', english_line)
        words = [w.rstrip(',').rstrip('.') for w in words]

        if has_consec_cap_words(words):
            print(line)

        # print_acronymn_def(english_line)

        if 'locality sensitive' in line:
            print("LSH NEEDS DASH")

        multicap_words = []
        for count, word in enumerate(words):
            word = word.strip(')').strip('(')
            if sum(c.isupper() for c in word) > 1:
                if word.startswith('\\') and word.endswith('{}'):
                    continue
                if word.startswith('\\Cref') and word.endswith('}'):
                    if count != 0:
                        print("FIX CREF UPPER")
                        print(line)
                    continue
                if word.startswith('\\cref') and word.endswith('}'):
                    if count == 0:
                        print("FIX CREF LOWER")
                        print(line)
                    continue
                if not word.isalpha():
                    continue
                multicap_words.append(word)
        if multicap_words:
            found[context['chapter']].append(multicap_words)
        # print(ut.repr4(ut.dict_hist(found)))

    def english_tokens(line):
        # Break line into math and english parts
        mathsep = ut.negative_lookbehind(re.escape('\\')) + re.escape('$')

        def clean_word(word):
            if word.startswith('``'):
                word = word[2:]
            if word.endswith("''"):
                word = word[:-2]
            return word.strip(',').rstrip('.')

        prev = 0
        tokens = []
        for count, match in enumerate(re.finditer(mathsep, line)):
            if count % 2 == 0:
                curr = match.start()
                english = line[prev:curr]
                parts = re.split('[~\s]+', english)
                parts = (clean_word(p) for p in parts)
                parts = (p for p in parts if p)
                tokens.extend(parts)
            else:
                curr = match.end()
                math = line[prev:curr]
                tokens.append(math)
            prev = curr
        return tokens

    from fixtex.svn_converter.latexparser import DocParser
    from fixtex.svn_converter.docnodes import CaptionNode, FigureNode
    from fixtex.svn_converter.tokenizer import Tokenizer

    def caption_sentences(fpath):
        text = ut.readfrom(fpath)
        tokenstream = Tokenizer(text).tokenize()
        self = DocParser(tokenstream, fpath)
        tree = self.parse()
        for node in tree.walk():
            if isinstance(node, FigureNode):
                for x in node.walk():
                    if isinstance(x, CaptionNode):
                        for sent in ut.split_sentences2(x.resolve()):
                            yield sent

    def gen_cap():
        fpaths = [
            ut.truepath('~/latex/crall-thesis-2017/figdef1.tex'),
            ut.truepath('~/latex/crall-thesis-2017/figdef2.tex'),
            ut.truepath('~/latex/crall-thesis-2017/figdef3.tex'),
            ut.truepath('~/latex/crall-thesis-2017/figdef4.tex'),
            ut.truepath('~/latex/crall-thesis-2017/figdef5.tex'),
        ]
        for fpath in fpaths:
            context = {'fpath': fpath}
            for sent in caption_sentences(fpath):
                yield sent, context

    # Find A, An grammar errors

    # Define special cases:
    cons_sounds = {
        'unit', 'user', 'unique', 'one', 'uniform', 'unified', 'useful'
    }
    vowel_sounds = {'roc', 'mcc', 'lnbnn', 'l1', 'hour'}

    def startswith_vowel_sound(after):
        # do our best guess
        if after.startswith('$'):
            if after[1] == '8':
                return True
            if after[1] == 'x':
                return True
        if after in vowel_sounds:
            return True
        if after in cons_sounds:
            return False
        return after[0] in 'aeiou'

    cmd_map, cmd_map1 = latex_parser.LatexDocPart.read_static_defs()

    simple_cmd_re = re.compile('\\\\[A-Za-z]*{}')

    print('\nCHECK FOR A / AN ERRORS')
    import itertools as it
    generators = [
        # gen_sentences(),
        gen_cap(),
    ]

    for line, context in it.chain(*generators):
        words = english_tokens(line)
        for u, v in ut.itertwo(words):
            v_orig = v
            if simple_cmd_re.match(v):
                key = v[:-2]
                try:
                    v = cmd_map[key]
                except:
                    print(line)
                    raise

            v = v.split('-')[0]
            article = u.lower()
            if article in {'a', 'an'}:
                after = v.lower()
                # TODO ensure v is a singular countable noun
                is_vowel_sound = startswith_vowel_sound(after)

                flag = False
                if article == 'a' and is_vowel_sound:
                    flag = 'after is a consonent sound should start with a'
                if article == 'an' and not is_vowel_sound:
                    flag = 'after is a vowel sound should start with an'

                if flag:
                    print('---------')
                    print(flag)
                    print(article, after)
                    print('{} {}'.format(u, v_orig))
                    print(line)
Esempio n. 40
0
def git_sequence_editor_squash(fpath):
    """
    squashes wip messages

    CommandLine:
        python -m utool.util_git --exec-git_sequence_editor_squash

    Example:
        >>> # SCRIPT
        >>> import utool as ut
        >>> from utool.util_git import *  # NOQA
        >>> fpath = ut.get_argval('--fpath', str, default=None)
        >>> git_sequence_editor_squash(fpath)

    Ignore:
        text = ut.codeblock(
            '''
            pick 852aa05 better doctest for tips
            pick 3c779b8 wip
            pick 02bc21d wip
            pick 1853828 Fixed root tablename
            pick 9d50233 doctest updates
            pick 66230a5 wip
            pick c612e98 wip
            pick b298598 Fixed tablename error
            pick 1120a87 wip
            pick f6c4838 wip
            pick 7f92575 wip
            ''')

    Ignore:
        def squash_consecutive_commits_with_same_message():
            # http://stackoverflow.com/questions/8226278/git-alias-to-squash-all-commits-with-a-particular-commit-message
            # Can do interactively with this. Can it be done automatically and pay attention to
            # Timestamps etc?
            git rebase --interactive HEAD~40 --autosquash
            git rebase --interactive $(git merge-base HEAD master) --autosquash

            # Lookbehind correct version
            %s/\([a-z]* [a-z0-9]* wip\n\)\@<=pick \([a-z0-9]*\) wip/squash \2 wip/gc

           # THE FULL NON-INTERACTIVE AUTOSQUASH SCRIPT
           # TODO: Dont squash if there is a one hour timedelta between commits

           GIT_EDITOR="cat $1" GIT_SEQUENCE_EDITOR="python -m utool.util_git --exec-git_sequence_editor_squash \
                   --fpath $1" git rebase -i $(git rev-list HEAD | tail -n 1) --autosquash --no-verify
           GIT_EDITOR="cat $1" GIT_SEQUENCE_EDITOR="python -m utool.util_git --exec-git_sequence_editor_squash \
                   --fpath $1" git rebase -i HEAD~10 --autosquash --no-verify

           GIT_EDITOR="cat $1" GIT_SEQUENCE_EDITOR="python -m utool.util_git --exec-git_sequence_editor_squash \
                   --fpath $1" git rebase -i $(git merge-base HEAD master) --autosquash --no-verify

           # 14d778fa30a93f85c61f34d09eddb6d2cafd11e2
           # c509a95d4468ebb61097bd9f4d302367424772a3
           # b0ffc26011e33378ee30730c5e0ef1994bfe1a90
           # GIT_SEQUENCE_EDITOR=<script> git rebase -i <params>
           # GIT_SEQUENCE_EDITOR="echo 'FOOBAR $1' " git rebase -i HEAD~40 --autosquash
           # git checkout master
           # git branch -D tmp
           # git checkout -b tmp
           # option to get the tail commit
           $(git rev-list HEAD | tail -n 1)
           # GIT_SEQUENCE_EDITOR="python -m utool.util_git --exec-git_sequence_editor_squash \
                   --fpath $1" git rebase -i HEAD~40 --autosquash
           # GIT_SEQUENCE_EDITOR="python -m utool.util_git --exec-git_sequence_editor_squash \
                   --fpath $1" git rebase -i HEAD~40 --autosquash --no-verify
           <params>
    """
    # print(sys.argv)
    import utool as ut
    text = ut.read_from(fpath)
    # print('fpath = %r' % (fpath,))
    print(text)
    # Doesnt work because of fixed witdth requirement
    # search = (ut.util_regex.positive_lookbehind('[a-z]* [a-z0-9]* wip\n') + 'pick ' +
    #           ut.reponamed_field('hash', '[a-z0-9]*') + ' wip')
    # repl = ('squash ' + ut.bref_field('hash') + ' wip')
    # import re
    # new_text = re.sub(search, repl, text, flags=re.MULTILINE)
    # print(new_text)
    prev_msg = None
    prev_dt = None
    new_lines = []

    def get_commit_date(hashid):
        out, err, ret = ut.cmd('git show -s --format=%ci ' + hashid, verbose=False, quiet=True, pad_stdout=False)
        # from datetime import datetime
        from dateutil import parser
        # print('out = %r' % (out,))
        stamp = out.strip('\n')
        # print('stamp = %r' % (stamp,))
        dt = parser.parse(stamp)
        # dt = datetime.strptime(stamp, '%Y-%m-%d %H:%M:%S %Z')
        # print('dt = %r' % (dt,))
        return dt

    for line in text.split('\n'):
        commit_line = line.split(' ')
        if len(commit_line) < 3:
            prev_msg = None
            prev_dt = None
            new_lines += [line]
            continue
        action = commit_line[0]
        hashid = commit_line[1]
        msg = ' ' .join(commit_line[2:])
        try:
            dt = get_commit_date(hashid)
        except ValueError:
            prev_msg = None
            prev_dt = None
            new_lines += [line]
            continue
        orig_msg = msg
        can_squash = action == 'pick' and msg == 'wip' and prev_msg == 'wip'
        if prev_dt is not None and prev_msg == 'wip':
            tdelta = dt - prev_dt
            # Only squash closely consecutive commits
            threshold_minutes = 45
            td_min = (tdelta.total_seconds() / 60.)
            # print(tdelta)
            can_squash &= td_min < threshold_minutes
            msg = msg + ' -- tdelta=%r' % (ut.get_timedelta_str(tdelta),)
        if can_squash:
            new_line = ' ' .join(['squash', hashid, msg])
            new_lines += [new_line]
        else:
            new_lines += [line]
        prev_msg = orig_msg
        prev_dt = dt
    new_text = '\n'.join(new_lines)

    def get_commit_date(hashid):
        out = ut.cmd('git show -s --format=%ci ' + hashid, verbose=False)
        print('out = %r' % (out,))

    # print('Dry run')
    # ut.dump_autogen_code(fpath, new_text)
    print(new_text)
    ut.write_to(fpath, new_text, n=None)
Esempio n. 41
0
def autogen_sphinx_apidoc():
    r"""
    autogen_sphinx_docs.py

    Ignore:
        C:\Python27\Scripts\autogen_sphinx_docs.py
        autogen_sphinx_docs.py

        pip uninstall sphinx
        pip install sphinx
        pip install sphinxcontrib-napoleon
        pip install sphinx --upgrade
        pip install sphinxcontrib-napoleon --upgrade

        cd C:\Python27\Scripts
        ls C:\Python27\Scripts

        python -c "import sphinx; print(sphinx.__version__)"

    CommandLine:
        python -m utool.util_setup --exec-autogen_sphinx_apidoc

    Example:
        >>> # SCRIPT
        >>> from utool.util_setup import *  # NOQA
        >>> autogen_sphinx_apidoc()
    """
    # TODO: assert sphinx-apidoc exe is found
    # TODO: make find_exe work?
    import utool as ut

    def build_sphinx_apidoc_cmdstr():
        print('')
        print('if this fails try: sudo pip install sphinx')
        print('')
        apidoc = 'sphinx-apidoc'
        if ut.WIN32:
            winprefix = 'C:/Python27/Scripts/'
            sphinx_apidoc_exe = winprefix + apidoc + '.exe'
        else:
            sphinx_apidoc_exe = apidoc
        apidoc_argfmt_list = [
            sphinx_apidoc_exe,
            '--force',
            '--full',
            '--maxdepth="{maxdepth}"',
            '--doc-author="{author}"',
            '--doc-version="{doc_version}"',
            '--doc-release="{doc_release}"',
            '--output-dir="_doc"',
            #'--separate',  # Put documentation for each module on its own page
            '--private',  # Include "_private" modules
            '{pkgdir}',
        ]
        outputdir = '_doc'
        author = ut.parse_author()
        packages = ut.find_packages(maxdepth=1)
        assert len(
            packages) != 0, 'directory must contain at least one package'
        if len(packages) > 1:
            assert len(packages) == 1,\
                ('FIXME I dont know what to do with more than one root package: %r'
                 % (packages,))
        pkgdir = packages[0]
        version = ut.parse_package_for_version(pkgdir)
        modpath = dirname(ut.truepath(pkgdir))

        apidoc_fmtdict = {
            'author': author,
            'maxdepth': '8',
            'pkgdir': pkgdir,
            'doc_version': version,
            'doc_release': version,
            'outputdir': outputdir,
        }
        ut.assert_exists('setup.py')
        ut.ensuredir('_doc')
        apidoc_fmtstr = ' '.join(apidoc_argfmt_list)
        apidoc_cmdstr = apidoc_fmtstr.format(**apidoc_fmtdict)
        print('[util_setup] autogenerate sphinx docs for %r' % (pkgdir, ))
        if ut.VERBOSE:
            print(ut.dict_str(apidoc_fmtdict))
        return apidoc_cmdstr, modpath, outputdir

    def build_conf_replstr():
        #
        # Make custom edits to conf.py
        # FIXME:
        #ext_search_text = ut.unindent(
        #    r'''
        #    extensions = [
        #    [^\]]*
        #    ]
        #    ''')
        ext_search_text = r'extensions = \[[^/]*\]'
        # TODO: http://sphinx-doc.org/ext/math.html#module-sphinx.ext.pngmath
        #'sphinx.ext.mathjax',
        exclude_modules = []  # ['ibeis.all_imports']
        ext_repl_text = ut.codeblock('''
            MOCK_MODULES = {exclude_modules}
            if len(MOCK_MODULES) > 0:
                import mock
                for mod_name in MOCK_MODULES:
                    sys.modules[mod_name] = mock.Mock()

            extensions = [
                'sphinx.ext.autodoc',
                'sphinx.ext.viewcode',
                # For LaTeX
                'sphinx.ext.pngmath',
                # For Google Sytle Docstrs
                # https://pypi.python.org/pypi/sphinxcontrib-napoleon
                'sphinxcontrib.napoleon',
                #'sphinx.ext.napoleon',
            ]
            ''').format(exclude_modules=str(exclude_modules))
        #theme_search = 'html_theme = \'default\''
        theme_search = 'html_theme = \'[a-zA-Z_1-3]*\''
        theme_repl = ut.codeblock('''
            import sphinx_rtd_theme
            html_theme = "sphinx_rtd_theme"
            html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
            ''')
        head_text = ut.codeblock('''
            from sphinx.ext.autodoc import between
            import sphinx_rtd_theme
            import sys
            import os

            # Dont parse IBEIS args
            os.environ['IBIES_PARSE_ARGS'] = 'OFF'
            os.environ['UTOOL_AUTOGEN_SPHINX_RUNNING'] = 'ON'

            sys.path.append('{modpath}')
            sys.path.append(sys.path.insert(0, os.path.abspath("../")))

            autosummary_generate = True

            modindex_common_prefix = ['_']
            ''').format(modpath=ut.truepath(modpath))
        tail_text = ut.codeblock('''
            def setup(app):
                # Register a sphinx.ext.autodoc.between listener to ignore everything
                # between lines that contain the word IGNORE
                app.connect('autodoc-process-docstring', between('^.*IGNORE.*$', exclude=True))
                return app
            ''')
        return (ext_search_text, ext_repl_text, theme_search, theme_repl,
                head_text, tail_text)

    apidoc_cmdstr, modpath, outputdir = build_sphinx_apidoc_cmdstr()
    ext_search_text, ext_repl_text, theme_search, theme_repl, head_text, tail_text = build_conf_replstr(
    )

    dry = ut.get_argflag('--dry')

    if not dry:
        # Execute sphinx-apidoc
        ut.cmd(apidoc_cmdstr, shell=True)
        # sphinx-apidoc outputs conf.py to <outputdir>, add custom commands
        #
        # Change dir to <outputdir>
        print('chdir' + outputdir)
        os.chdir(outputdir)
        conf_fname = 'conf.py'
        conf_text = ut.read_from(conf_fname)
        conf_text = conf_text.replace('import sys', 'import sys  # NOQA')
        conf_text = conf_text.replace('import os', 'import os  # NOQA')
        conf_text = ut.regex_replace(theme_search, theme_repl, conf_text)
        conf_text = ut.regex_replace(ext_search_text, ext_repl_text, conf_text)
        conf_text = head_text + '\n' + conf_text + tail_text
        ut.write_to(conf_fname, conf_text)
        # Make the documentation
        #if ut.LINUX:
        #    ut.cmd('make html', shell=True)
        #if ut.WIN32:
        #raw_input('waiting')
        if not ut.get_argflag('--nomake'):
            ut.cmd('make', 'html', shell=True)
    else:
        print(apidoc_cmdstr)
        print('cd ' + outputdir)
        print('manual edits of conf.py')
        print('make html')
Esempio n. 42
0
 def get_file_stats(fpath):
     text = utool.read_from(fpath, verbose=False)
     lc = len(text.splitlines())
     wc = len(text.split(' '))
     return lc, wc
Esempio n. 43
0
def autogen_sphinx_apidoc():
    r"""
    autogen_sphinx_docs.py

    Ignore:
        C:\Python27\Scripts\autogen_sphinx_docs.py
        autogen_sphinx_docs.py

        pip uninstall sphinx
        pip install sphinx
        pip install sphinxcontrib-napoleon
        pip install sphinx --upgrade
        pip install sphinxcontrib-napoleon --upgrade

        cd C:\Python27\Scripts
        ls C:\Python27\Scripts

        python -c "import sphinx; print(sphinx.__version__)"

    CommandLine:
        python -m utool.util_setup --exec-autogen_sphinx_apidoc

    Example:
        >>> # SCRIPT
        >>> from utool.util_setup import *  # NOQA
        >>> autogen_sphinx_apidoc()
    """
    # TODO: assert sphinx-apidoc exe is found
    # TODO: make find_exe work?
    import utool as ut

    def build_sphinx_apidoc_cmdstr():
        print('')
        print('if this fails try: sudo pip install sphinx')
        print('')
        apidoc = 'sphinx-apidoc'
        if ut.WIN32:
            winprefix = 'C:/Python27/Scripts/'
            sphinx_apidoc_exe = winprefix + apidoc + '.exe'
        else:
            sphinx_apidoc_exe = apidoc
        apidoc_argfmt_list = [
            sphinx_apidoc_exe,
            '--force',
            '--full',
            '--maxdepth="{maxdepth}"',
            '--doc-author="{author}"',
            '--doc-version="{doc_version}"',
            '--doc-release="{doc_release}"',
            '--output-dir="_doc"',
            #'--separate',  # Put documentation for each module on its own page
            '--private',  # Include "_private" modules
            '{pkgdir}',
        ]
        outputdir = '_doc'
        author = ut.parse_author()
        packages = ut.find_packages(maxdepth=1)
        assert len(packages) != 0, 'directory must contain at least one package'
        if len(packages) > 1:
            assert len(packages) == 1,\
                ('FIXME I dont know what to do with more than one root package: %r'
                 % (packages,))
        pkgdir = packages[0]
        version = ut.parse_package_for_version(pkgdir)
        modpath = dirname(ut.truepath(pkgdir))

        apidoc_fmtdict = {
            'author': author,
            'maxdepth': '8',
            'pkgdir': pkgdir,
            'doc_version': version,
            'doc_release': version,
            'outputdir': outputdir,
        }
        ut.assert_exists('setup.py')
        ut.ensuredir('_doc')
        apidoc_fmtstr = ' '.join(apidoc_argfmt_list)
        apidoc_cmdstr = apidoc_fmtstr.format(**apidoc_fmtdict)
        print('[util_setup] autogenerate sphinx docs for %r' % (pkgdir,))
        if ut.VERBOSE:
            print(ut.dict_str(apidoc_fmtdict))
        return apidoc_cmdstr, modpath, outputdir

    def build_conf_replstr():
        #
        # Make custom edits to conf.py
        # FIXME:
        #ext_search_text = ut.unindent(
        #    r'''
        #    extensions = [
        #    [^\]]*
        #    ]
        #    ''')
        ext_search_text = r'extensions = \[[^/]*\]'
        # TODO: http://sphinx-doc.org/ext/math.html#module-sphinx.ext.pngmath
        #'sphinx.ext.mathjax',
        exclude_modules = []  # ['ibeis.all_imports']
        ext_repl_text = ut.codeblock(
            '''
            MOCK_MODULES = {exclude_modules}
            if len(MOCK_MODULES) > 0:
                import mock
                for mod_name in MOCK_MODULES:
                    sys.modules[mod_name] = mock.Mock()

            extensions = [
                'sphinx.ext.autodoc',
                'sphinx.ext.viewcode',
                # For LaTeX
                'sphinx.ext.pngmath',
                # For Google Sytle Docstrs
                # https://pypi.python.org/pypi/sphinxcontrib-napoleon
                'sphinxcontrib.napoleon',
                #'sphinx.ext.napoleon',
            ]
            '''
        ).format(exclude_modules=str(exclude_modules))
        #theme_search = 'html_theme = \'default\''
        theme_search = 'html_theme = \'[a-zA-Z_1-3]*\''
        theme_repl = ut.codeblock(
            '''
            import sphinx_rtd_theme
            html_theme = "sphinx_rtd_theme"
            html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
            ''')
        head_text = ut.codeblock(
            '''
            from sphinx.ext.autodoc import between
            import sphinx_rtd_theme
            import sys
            import os

            # Dont parse IBEIS args
            os.environ['IBIES_PARSE_ARGS'] = 'OFF'
            os.environ['UTOOL_AUTOGEN_SPHINX_RUNNING'] = 'ON'

            sys.path.append('{modpath}')
            sys.path.append(sys.path.insert(0, os.path.abspath("../")))

            autosummary_generate = True

            modindex_common_prefix = ['_']
            '''
        ).format(modpath=ut.truepath(modpath))
        tail_text = ut.codeblock(
            '''
            def setup(app):
                # Register a sphinx.ext.autodoc.between listener to ignore everything
                # between lines that contain the word IGNORE
                app.connect('autodoc-process-docstring', between('^.*IGNORE.*$', exclude=True))
                return app
            '''
        )
        return (ext_search_text, ext_repl_text, theme_search, theme_repl, head_text, tail_text)

    apidoc_cmdstr, modpath, outputdir = build_sphinx_apidoc_cmdstr()
    ext_search_text, ext_repl_text, theme_search, theme_repl, head_text, tail_text = build_conf_replstr()

    dry = ut.get_argflag('--dry')

    if not dry:
        # Execute sphinx-apidoc
        ut.cmd(apidoc_cmdstr, shell=True)
        # sphinx-apidoc outputs conf.py to <outputdir>, add custom commands
        #
        # Change dir to <outputdir>
        print('chdir' + outputdir)
        os.chdir(outputdir)
        conf_fname = 'conf.py'
        conf_text = ut.read_from(conf_fname)
        conf_text = conf_text.replace('import sys', 'import sys  # NOQA')
        conf_text = conf_text.replace('import os', 'import os  # NOQA')
        conf_text = ut.regex_replace(theme_search, theme_repl, conf_text)
        conf_text = ut.regex_replace(ext_search_text, ext_repl_text, conf_text)
        conf_text = head_text + '\n' + conf_text + tail_text
        ut.write_to(conf_fname, conf_text)
        # Make the documentation
        #if ut.LINUX:
        #    ut.cmd('make html', shell=True)
        #if ut.WIN32:
        #raw_input('waiting')
        if not ut.get_argflag('--nomake'):
            ut.cmd('make', 'html', shell=True)
    else:
        print(apidoc_cmdstr)
        print('cd ' + outputdir)
        print('manual edits of conf.py')
        print('make html')
Esempio n. 44
0
def findcite():
    """
    prints info about used and unused citations
    """
    tex_fpath_list = testdata_fpaths()
    citekey_list = find_used_citations(tex_fpath_list)

    # Find uncited entries
    #bibtexparser = ut.tryimport('bibtexparser')
    bib_fpath = 'My_Library_clean.bib'
    bibtex_str = ut.read_from(bib_fpath)
    bib_database = bibtexparser.loads(bibtex_str)
    bibtex_dict = bib_database.get_entry_dict()

    for key in bibtex_dict.keys():
        entry = bibtex_dict[key]
        entry = ut.map_dict_keys(six.text_type, entry)
        entry = ut.map_dict_keys(six.text_type.lower, entry)
        bibtex_dict[key] = entry

    print('ALL')
    ignore = ['JP', '?']
    citekey_list = ut.setdiff_ordered(sorted(ut.unique(citekey_list)), ignore)
    #print(ut.indentjoin(citekey_list))
    print('len(citekey_list) = %r' % (len(citekey_list), ))

    unknown_keys = list(set(citekey_list) - set(bibtex_dict.keys()))
    unused_keys = list(set(bibtex_dict.keys()) - set(citekey_list))

    try:
        if len(unknown_keys) != 0:
            print('\nUNKNOWN KEYS:')
            print(ut.list_str(unknown_keys))
            raise AssertionError('unknown keys')
    except AssertionError as ex:
        ut.printex(ex, iswarning=True, keys=['unknown_keys'])

    @ut.argv_flag_dec(indent='    ')
    def close_keys():
        if len(unknown_keys) > 0:
            bibtex_dict.keys()
            print('\nDid you mean:')
            for key in unknown_keys:
                print('---')
                print(key)
                print(ut.closet_words(key, bibtex_dict.keys(), 3))
            print('L___')
        else:
            print('no unkown keys')

    close_keys()

    @ut.argv_flag_dec(indent='    ')
    def print_unused():
        print(ut.indentjoin(ut.sortedby(unused_keys, map(len, unused_keys))))

        print('len(unused_keys) = %r' % (len(unused_keys), ))

    print_unused()

    all_authors = []
    for key in bibtex_dict.keys():
        entry = bibtex_dict[key]
        toremove = ['author', '{', '}', r'\\textbackslash']
        author = ut.multi_replace(entry.get('author', ''), toremove, '')
        authors = author.split(' and ')
        all_authors.extend(authors)

    @ut.argv_flag_dec(indent='    ')
    def author_hist():
        #print(all_authors)
        hist_ = ut.dict_hist(all_authors, ordered=True)
        hist_[''] = None
        del hist_['']
        print('Author histogram')
        print(ut.dict_str(hist_)[-1000:])

    author_hist()

    @ut.argv_flag_dec(indent='    ')
    def unused_important():
        important_authors = [
            'hinton',
            'chum',
            'Jegou',
            'zisserman',
            'schmid',
            'sivic',
            'matas',
            'lowe',
            'perronnin',
            'douze',
        ]

        for key in unused_keys:
            entry = bibtex_dict[key]
            author = entry.get('author', '')
            #authors = author.split(' and ')
            hasimportant = any(auth in author.lower()
                               for auth in important_authors)
            if hasimportant or 'smk' in str(entry).lower():
                toremove = [
                    'note', 'month', 'type', 'pages', 'urldate', 'language',
                    'volume', 'number', 'publisher'
                ]
                entry = ut.delete_dict_keys(entry, toremove)
                print(
                    ut.dict_str(entry,
                                strvals=True,
                                key_order=['title', 'author', 'id']))

    unused_important()
Esempio n. 45
0
def update_wildbook_install_config(webapps_dpath, unpacked_war_dpath):
    """
    CommandLine:
        python -m ibeis ensure_local_war
        python -m ibeis update_wildbook_install_config
        python -m ibeis update_wildbook_install_config --show

    Example:
        >>> from ibeis.control.wildbook_manager import *  # NOQA
        >>> import ibeis
        >>> tomcat_dpath = find_installed_tomcat()
        >>> webapps_dpath = join(tomcat_dpath, 'webapps')
        >>> wb_target = ibeis.const.WILDBOOK_TARGET
        >>> unpacked_war_dpath = join(webapps_dpath, wb_target)
        >>> locals_ = ut.exec_func_src(update_wildbook_install_config, globals())
        >>> #update_wildbook_install_config(webapps_dpath, unpacked_war_dpath)
        >>> ut.quit_if_noshow()
        >>> ut.vd(unpacked_war_dpath)
        >>> ut.editfile(locals_['permission_fpath'])
        >>> ut.editfile(locals_['jdoconfig_fpath'])
        >>> ut.editfile(locals_['asset_store_fpath'])
    """
    mysql_mode = not ut.get_argflag('--nomysql')

    #if ut.get_argflag('--vd'):
    #    ut.vd(unpacked_war_dpath)
    #find_installed_tomcat
    # Make sure permissions are correctly set in wildbook
    # Comment out the line that requires authentication
    permission_fpath = join(unpacked_war_dpath, 'WEB-INF/web.xml')
    ut.assertpath(permission_fpath)
    permission_text = ut.readfrom(permission_fpath)
    lines_to_remove = [
        # '/ImageSetSetMarkedIndividual = authc, roles[admin]'
        '/EncounterSetMarkedIndividual = authc, roles[admin]'
    ]
    new_permission_text = permission_text[:]
    for line in lines_to_remove:
        re.search(re.escape(line), permission_text)
        prefix = ut.named_field('prefix', '\\s*')
        suffix = ut.named_field('suffix', '\\s*\n')
        pattern = ('^' + prefix + re.escape(line) + suffix)
        match = re.search(pattern,
                          permission_text,
                          flags=re.MULTILINE | re.DOTALL)
        if match is None:
            continue
        newline = '<!--%s -->' % (line, )
        repl = ut.bref_field('prefix') + newline + ut.bref_field('suffix')
        new_permission_text = re.sub(pattern,
                                     repl,
                                     permission_text,
                                     flags=re.MULTILINE | re.DOTALL)
        assert new_permission_text != permission_text, (
            'text should have changed')
    if new_permission_text != permission_text:
        print('Need to write new permission texts')
        ut.writeto(permission_fpath, new_permission_text)
    else:
        print('Permission file seems to be ok')

    # Make sure we are using a non-process based database
    jdoconfig_fpath = join(unpacked_war_dpath,
                           'WEB-INF/classes/bundles/jdoconfig.properties')
    print('Fixing backend database config')
    print('jdoconfig_fpath = %r' % (jdoconfig_fpath, ))
    ut.assertpath(jdoconfig_fpath)
    jdoconfig_text = ut.readfrom(jdoconfig_fpath)
    #ut.vd(dirname(jdoconfig_fpath))
    #ut.editfile(jdoconfig_fpath)

    if mysql_mode:
        jdoconfig_text = ut.toggle_comment_lines(jdoconfig_text, 'mysql',
                                                 False)
        jdoconfig_text = ut.toggle_comment_lines(jdoconfig_text, 'derby', 1)
        jdoconfig_text = ut.toggle_comment_lines(jdoconfig_text, 'sqlite', 1)
        mysql_user = '******'
        mysql_passwd = 'somepassword'
        mysql_dbname = 'ibeiswbtestdb'
        # Use mysql
        jdoconfig_text = re.sub('datanucleus.ConnectionUserName = .*$',
                                'datanucleus.ConnectionUserName = '******'datanucleus.ConnectionPassword = .*$',
                                'datanucleus.ConnectionPassword = '******'datanucleus.ConnectionURL *= *jdbc:mysql:.*$',
            'datanucleus.ConnectionURL = jdbc:mysql://localhost:3306/' +
            mysql_dbname,
            jdoconfig_text,
            flags=re.MULTILINE)
        jdoconfig_text = re.sub('^.*jdbc:mysql://localhost:3306/shepherd.*$',
                                '',
                                jdoconfig_text,
                                flags=re.MULTILINE)
    else:
        # Use SQLIIte
        jdoconfig_text = ut.toggle_comment_lines(jdoconfig_text, 'derby', 1)
        jdoconfig_text = ut.toggle_comment_lines(jdoconfig_text, 'mysql', 1)
        jdoconfig_text = ut.toggle_comment_lines(jdoconfig_text, 'sqlite',
                                                 False)
    ut.writeto(jdoconfig_fpath, jdoconfig_text)

    # Need to make sure wildbook can store information in a reasonalbe place
    #tomcat_data_dir = join(tomcat_startup_dir, 'webapps', 'wildbook_data_dir')
    tomcat_data_dir = join(webapps_dpath, 'wildbook_data_dir')
    ut.ensuredir(tomcat_data_dir)
    ut.writeto(join(tomcat_data_dir, 'test.txt'), 'A hosted test file')
    asset_store_fpath = join(unpacked_war_dpath, 'createAssetStore.jsp')
    asset_store_text = ut.read_from(asset_store_fpath)
    #data_path_pat = ut.named_field('data_path', 'new File(".*?").toPath')
    new_line = 'LocalAssetStore as = new LocalAssetStore("example Local AssetStore", new File("%s").toPath(), "%s", true);' % (
        tomcat_data_dir, 'http://localhost:8080/' + basename(tomcat_data_dir))
    # HACKY
    asset_store_text2 = re.sub('^LocalAssetStore as = .*$',
                               new_line,
                               asset_store_text,
                               flags=re.MULTILINE)
    ut.writeto(asset_store_fpath, asset_store_text2)
Esempio n. 46
0
def update_wildbook_install_config(webapps_dpath, unpacked_war_dpath):
    """
    CommandLine:
        python -m ibeis ensure_local_war
        python -m ibeis update_wildbook_install_config
        python -m ibeis update_wildbook_install_config --show

    Example:
        >>> from ibeis.control.wildbook_manager import *  # NOQA
        >>> import ibeis
        >>> tomcat_dpath = find_installed_tomcat()
        >>> webapps_dpath = join(tomcat_dpath, 'webapps')
        >>> wb_target = ibeis.const.WILDBOOK_TARGET
        >>> unpacked_war_dpath = join(webapps_dpath, wb_target)
        >>> locals_ = ut.exec_func_src(update_wildbook_install_config, globals())
        >>> #update_wildbook_install_config(webapps_dpath, unpacked_war_dpath)
        >>> ut.quit_if_noshow()
        >>> ut.vd(unpacked_war_dpath)
        >>> ut.editfile(locals_['permission_fpath'])
        >>> ut.editfile(locals_['jdoconfig_fpath'])
        >>> ut.editfile(locals_['asset_store_fpath'])
    """
    mysql_mode = not ut.get_argflag('--nomysql')

    #if ut.get_argflag('--vd'):
    #    ut.vd(unpacked_war_dpath)
    #find_installed_tomcat
    # Make sure permissions are correctly set in wildbook
    # Comment out the line that requires authentication
    permission_fpath = join(unpacked_war_dpath, 'WEB-INF/web.xml')
    ut.assertpath(permission_fpath)
    permission_text = ut.readfrom(permission_fpath)
    lines_to_remove = [
        # '/ImageSetSetMarkedIndividual = authc, roles[admin]'
        '/EncounterSetMarkedIndividual = authc, roles[admin]'
    ]
    new_permission_text = permission_text[:]
    for line in lines_to_remove:
        re.search(re.escape(line), permission_text)
        prefix = ut.named_field('prefix', '\\s*')
        suffix = ut.named_field('suffix', '\\s*\n')
        pattern = ('^' + prefix + re.escape(line) + suffix)
        match = re.search(pattern, permission_text,
                          flags=re.MULTILINE | re.DOTALL)
        if match is None:
            continue
        newline = '<!--%s -->' % (line,)
        repl = ut.bref_field('prefix') + newline + ut.bref_field('suffix')
        new_permission_text = re.sub(pattern, repl, permission_text,
                                     flags=re.MULTILINE | re.DOTALL)
        assert new_permission_text != permission_text, (
            'text should have changed')
    if new_permission_text != permission_text:
        print('Need to write new permission texts')
        ut.writeto(permission_fpath, new_permission_text)
    else:
        print('Permission file seems to be ok')

    # Make sure we are using a non-process based database
    jdoconfig_fpath = join(unpacked_war_dpath,
                           'WEB-INF/classes/bundles/jdoconfig.properties')
    print('Fixing backend database config')
    print('jdoconfig_fpath = %r' % (jdoconfig_fpath,))
    ut.assertpath(jdoconfig_fpath)
    jdoconfig_text = ut.readfrom(jdoconfig_fpath)
    #ut.vd(dirname(jdoconfig_fpath))
    #ut.editfile(jdoconfig_fpath)

    if mysql_mode:
        jdoconfig_text = ut.toggle_comment_lines(jdoconfig_text, 'mysql', False)
        jdoconfig_text = ut.toggle_comment_lines(jdoconfig_text, 'derby', 1)
        jdoconfig_text = ut.toggle_comment_lines(jdoconfig_text, 'sqlite', 1)
        mysql_user = '******'
        mysql_passwd = 'somepassword'
        mysql_dbname = 'ibeiswbtestdb'
        # Use mysql
        jdoconfig_text = re.sub(
            'datanucleus.ConnectionUserName = .*$',
            'datanucleus.ConnectionUserName = '******'datanucleus.ConnectionPassword = .*$',
            'datanucleus.ConnectionPassword = '******'datanucleus.ConnectionURL *= *jdbc:mysql:.*$',
            'datanucleus.ConnectionURL = jdbc:mysql://localhost:3306/' + mysql_dbname,
            jdoconfig_text, flags=re.MULTILINE)
        jdoconfig_text = re.sub(
            '^.*jdbc:mysql://localhost:3306/shepherd.*$', '',
            jdoconfig_text, flags=re.MULTILINE)
    else:
        # Use SQLIIte
        jdoconfig_text = ut.toggle_comment_lines(jdoconfig_text, 'derby', 1)
        jdoconfig_text = ut.toggle_comment_lines(jdoconfig_text, 'mysql', 1)
        jdoconfig_text = ut.toggle_comment_lines(jdoconfig_text, 'sqlite', False)
    ut.writeto(jdoconfig_fpath, jdoconfig_text)

    # Need to make sure wildbook can store information in a reasonalbe place
    #tomcat_data_dir = join(tomcat_startup_dir, 'webapps', 'wildbook_data_dir')
    tomcat_data_dir = join(webapps_dpath, 'wildbook_data_dir')
    ut.ensuredir(tomcat_data_dir)
    ut.writeto(join(tomcat_data_dir, 'test.txt'), 'A hosted test file')
    asset_store_fpath = join(unpacked_war_dpath, 'createAssetStore.jsp')
    asset_store_text = ut.read_from(asset_store_fpath)
    #data_path_pat = ut.named_field('data_path', 'new File(".*?").toPath')
    new_line = 'LocalAssetStore as = new LocalAssetStore("example Local AssetStore", new File("%s").toPath(), "%s", true);' % (
        tomcat_data_dir,
        'http://localhost:8080/' + basename(tomcat_data_dir)
    )
    # HACKY
    asset_store_text2 = re.sub('^LocalAssetStore as = .*$', new_line, asset_store_text, flags=re.MULTILINE)
    ut.writeto(asset_store_fpath, asset_store_text2)