def ensure_explicit_namespace(fpath, namespace, varname_list):
    import re
    import utool as ut

    text = ut.read_from(fpath)
    orig_text = text
    new_text = text

    for varname in varname_list:
        regex = ''.join((
            ut.named_field('prefix', '[^.]'),
            ut.named_field('var', ut.whole_word(varname)),
        ))
        repl = ''.join((
            ut.bref_field('prefix'),
            namespace, '.',
            ut.bref_field('var')
        ))

        new_text = re.sub(regex, repl, new_text)

    textdiff = ut.get_textdiff(orig_text, new_text)
    print(textdiff)
    if ut.user_cmdline_prompt('Does the text look good?'):
        # if diff looks good write
        ut.write_to(fpath, new_text)
Example #2
0
def update_wildbook_config(ibs, wildbook_tomcat_path, dryrun=False):
    wildbook_properteis_dpath = join(wildbook_tomcat_path,
                                     'WEB-INF/classes/bundles/')
    print('[ibs.wildbook_signal_eid_list()] Wildbook properties=%r' % (
        wildbook_properteis_dpath, ))
    # The src file is non-standard. It should be remove here as well
    wildbook_config_fpath_dst = join(wildbook_properteis_dpath,
                                     'commonConfiguration.properties')
    ut.assert_exists(wildbook_properteis_dpath)
    # for come reason the .default file is not there, that should be ok though
    orig_content = ut.read_from(wildbook_config_fpath_dst)
    content = orig_content
    content = re.sub('IBEIS_DB_path = .*',
                     'IBEIS_DB_path = ' + ibs.get_db_core_path(), content)
    content = re.sub('IBEIS_image_path = .*',
                     'IBEIS_image_path = ' + ibs.get_imgdir(), content)

    # Write to the configuration if it is different
    if orig_content != content:
        need_sudo = not ut.is_file_writable(wildbook_config_fpath_dst)
        if need_sudo:
            quoted_content = '"%s"' % (content, )
            print('Attempting to gain sudo access to update wildbook config')
            command = ['sudo', 'sh', '-c', '\'', 'echo',
                       quoted_content, '>', wildbook_config_fpath_dst, '\'']
            # ut.cmd(command, sudo=True)
            command = ' '.join(command)
            if not dryrun:
                os.system(command)
        else:
            ut.write_to(wildbook_config_fpath_dst, content)
Example #3
0
def update_wildbook_ia_config(ibs, wildbook_tomcat_path, dryrun=False):
    """
    #if use_config_file and wildbook_tomcat_path:
    #    # Update the Wildbook configuration to see *THIS* wbia database
    #    with lockfile.LockFile(lock_fpath):
    #        update_wildbook_ia_config(ibs, wildbook_tomcat_path, dryrun)
    """
    wildbook_properteis_dpath = join(wildbook_tomcat_path,
                                     'WEB-INF/classes/bundles/')
    logger.info('[ibs.update_wildbook_ia_config()] Wildbook properties=%r' %
                (wildbook_properteis_dpath, ))
    # The src file is non-standard. It should be remove here as well
    wildbook_config_fpath_dst = join(wildbook_properteis_dpath,
                                     'commonConfiguration.properties')
    ut.assert_exists(wildbook_properteis_dpath)
    # for come reason the .default file is not there, that should be ok though
    orig_content = ut.read_from(wildbook_config_fpath_dst)
    content = orig_content
    # Make sure wildbook knows where to find us
    if False:
        # Old way of telling WB where to find IA
        content = re.sub('IBEIS_DB_path = .*',
                         'IBEIS_DB_path = ' + ibs.get_db_core_path(), content)
        content = re.sub('IBEIS_image_path = .*',
                         'IBEIS_image_path = ' + ibs.get_imgdir(), content)

    web_port = ibs.get_web_port_via_scan()
    if web_port is None:
        raise ValueError('IA web server is not running on any expected port')
    ia_hostport = 'http://localhost:%s' % (web_port, )
    ia_rest_prefix = ut.named_field('prefix', 'IBEISIARestUrl.*')
    host_port = ut.named_field('host_port', 'http://.*?:[0-9]+')
    content = re.sub(ia_rest_prefix + host_port,
                     ut.bref_field('prefix') + ia_hostport, content)

    # Write to the configuration if it is different
    if orig_content != content:
        need_sudo = not ut.is_file_writable(wildbook_config_fpath_dst)
        if need_sudo:
            quoted_content = '"%s"' % (content, )
            logger.info(
                'Attempting to gain sudo access to update wildbook config')
            command = [
                'sudo',
                'sh',
                '-c',
                "'",
                'echo',
                quoted_content,
                '>',
                wildbook_config_fpath_dst,
                "'",
            ]
            # ut.cmd(command, sudo=True)
            command = ' '.join(command)
            if not dryrun:
                os.system(command)
        else:
            ut.write_to(wildbook_config_fpath_dst, content)
Example #4
0
def ensure_inno_script():
    """ writes inno script to disk for win32 installer build """
    cwd = get_setup_dpath()
    iss_script_fpath = join(cwd, '_installers', 'win_installer_script.iss')
    # THE ISS USES {} AS SYNTAX. CAREFUL
    #app_publisher = 'Rensselaer Polytechnic Institute'
    #app_name = 'IBEIS'
    import ibeis
    iss_script_code = ut.codeblock(
        r'''
        ; Script generated by the Inno Setup Script Wizard.
        ; SEE THE DOCUMENTATION FOR DETAILS ON CREATING INNO SETUP SCRIPT FILES!
        ; http://www.jrsoftware.org/isdl.php

        [Setup]
        ; NOTE: The value of AppId uniquely identifies this application.
        ; Do not use the same AppId value in installers for other applications.
        ; (To generate a new GUID, click Tools | Generate GUID inside the IDE.)
        ; Also it seems like the off-balanced curly brace is necessary
        AppId={{47BE3DA2-261D-4672-9849-18BB2EB382FC}
        AppName=IBEIS
        AppVersion=''' + str(ibeis.__version__) + '''
        ;AppVerName=IBEIS 1
        AppPublisher=Rensselaer Polytechnic Institute
        AppPublisherURL=ibeis.org ;www.rpi.edu/~crallj/
        AppSupportURL=ibeis.org ;ww.rpi.edu/~crallj/
        AppUpdatesURL=ibeis.org ;www.rpi.edu/~crallj/
        DefaultDirName={pf}\IBEIS
        DefaultGroupName=IBEIS
        OutputBaseFilename=ibeis-win32-setup
        SetupIconFile=ibsicon.ico
        Compression=lzma
        SolidCompression=yes

        [Languages]
        Name: "english"; MessagesFile: "compiler:Default.isl"

        [Tasks]
        Name: "desktopicon"; Description: "{cm:CreateDesktopIcon}"; GroupDescription: "{cm:AdditionalIcons}"; Flags: unchecked

        [Files]
        Source: "..\dist\ibeis\IBEISApp.exe"; DestDir: "{app}"; Flags: ignoreversion
        Source: "..\dist\ibeis\*"; DestDir: "{app}"; Flags: ignoreversion recursesubdirs createallsubdirs
        ; NOTE: Don't use "Flags: ignoreversion" on any shared system files

        [Icons]
        Name: "{group}\ibeis"; Filename: "{app}\IBEISApp.exe"
        Name: "{commondesktop}\ibeis"; Filename: "{app}\IBEISApp.exe"; Tasks: desktopicon

        [Run]
        Filename: "{app}\IBEISApp.exe"; Description: "{cm:LaunchProgram,IBEIS}"; Flags: nowait postinstall skipifsilent
        '''
    )
    ut.write_to(iss_script_fpath, iss_script_code, onlyifdiff=True)
    assert ut.checkpath(iss_script_fpath, verbose=True, info=True), 'cannot find iss_script_fpath'
    return iss_script_fpath
Example #5
0
def test_file_hash():
    resdir = utool.get_app_resource_dir('utool')
    test_fpath = join(resdir, 'lorium_ipsum.txt')
    if not utool.checkpath(test_fpath, verbose=True, n=100):
        utool.write_to(test_fpath, lorium_text)
    hash_ = utool.get_file_hash(test_fpath)
    target_hash_ = b'\xd1Y\xe5\xa2\xc1\xd8\xb8\nS\xb1?\x16\xfe\xc5\x88\xbd\x9e\xb4\xe3\xda'
    print(repr(hash_))
    print(repr(target_hash_))
    assert hash_ == target_hash_
Example #6
0
def test_file_hash():
    resdir = utool.get_app_resource_dir('utool')
    test_fpath = join(resdir, 'lorium_ipsum.txt')
    if not utool.checkpath(test_fpath, verbose=True, n=100):
        utool.write_to(test_fpath, lorium_text)
    hash_ = utool.get_file_hash(test_fpath)
    target_hash_ = b'\xd1Y\xe5\xa2\xc1\xd8\xb8\nS\xb1?\x16\xfe\xc5\x88\xbd\x9e\xb4\xe3\xda'
    print(repr(hash_))
    print(repr(target_hash_))
    assert hash_ == target_hash_
Example #7
0
def change_doctestcommand_to_use_dashm_flag():
    r"""
    VimRegex: # note sure how to execute replace command in vim in one lin
    %s/python\s*\([A-Za-z_]+[\\/]\S*\)\.py\(.*\)/python -m \1 \2

    """
    # http://stackoverflow.com/questions/18737863/passing-a-function-to-re-sub-in-python
    # CANNOT USE [^ ] FOR SOME GOD DAMN REASON USE /S instead
    regex_list = ['python [A-Za-z_]+[\\/]\S* --allexamples']
    dpath_list = [
        ut.ensure_crossplat_path(ut.truepath('~/code/utool/utool')),
        ut.ensure_crossplat_path(ut.truepath('~/code/ibeis/ibeis')),
        ut.ensure_crossplat_path(ut.truepath('~/code/vtool/vtool')),
        ut.ensure_crossplat_path(ut.truepath('~/code/plottool/plottool')),
        ut.ensure_crossplat_path(ut.truepath('~/code/guitool/guitool')),
    ]
    #ut.named_field_repl(['python ', ('modrelpath',),])
    #['python ', ('modrelpath', 'utool[\\/].*'), '--allexamples'])
    res = ut.grep(regex_list,
                  recursive=True,
                  dpath_list=dpath_list,
                  verbose=True)
    found_filestr_list, found_lines_list, found_lxs_list = res
    fpath = res[0][0]

    import re
    keypat_list = [
        ('prefix', 'python\s*'),
        ('modrelpath', '[A-Za-z_]+[\\/]\S*'),
        ('suffix', '.*'),
    ]
    namedregex = ut.named_field_regex(keypat_list)

    # Define function to pass to re.sub
    def replmodpath(matchobj):
        groupdict_ = matchobj.groupdict()
        relpath = groupdict_['modrelpath']
        prefix = groupdict_['prefix']
        suffix = groupdict_['suffix']
        modname = relpath
        modname = modname.replace('\\', '.')
        modname = modname.replace('/', '.')
        modname = modname.replace('.py', '')
        return prefix + '-m ' + modname + suffix

    for fpath in found_filestr_list:
        text = ut.read_from(fpath)
        #matchobj = re.search(namedregex, text, flags=re.MULTILINE)
        #print(text)
        #for matchobj in re.finditer(namedregex, text):
        #    print(ut.get_match_text(matchobj))
        #    print('--')
        newtext = re.sub(namedregex, replmodpath, text)
        # Perform replacement
        ut.write_to(fpath, newtext)
def change_doctestcommand_to_use_dashm_flag():
    r"""
    VimRegex: # note sure how to execute replace command in vim in one lin
    %s/python\s*\([A-Za-z_]+[\\/]\S*\)\.py\(.*\)/python -m \1 \2

    """
    # http://stackoverflow.com/questions/18737863/passing-a-function-to-re-sub-in-python
    # CANNOT USE [^ ] FOR SOME GOD DAMN REASON USE /S instead
    regex_list = ['python [A-Za-z_]+[\\/]\S* --allexamples']
    dpath_list = [
        ut.ensure_crossplat_path(ut.truepath('~/code/utool/utool')),
        ut.ensure_crossplat_path(ut.truepath('~/code/ibeis/ibeis')),
        ut.ensure_crossplat_path(ut.truepath('~/code/vtool/vtool')),
        ut.ensure_crossplat_path(ut.truepath('~/code/plottool/plottool')),
        ut.ensure_crossplat_path(ut.truepath('~/code/guitool/guitool')),
    ]
    #ut.named_field_repl(['python ', ('modrelpath',),])
    #['python ', ('modrelpath', 'utool[\\/].*'), '--allexamples'])
    res = ut.grep(regex_list, recursive=True, dpath_list=dpath_list, verbose=True)
    found_filestr_list, found_lines_list, found_lxs_list = res
    fpath = res[0][0]

    import re
    keypat_list = [
        ('prefix', 'python\s*'),
        ('modrelpath', '[A-Za-z_]+[\\/]\S*'),
        ('suffix', '.*'),
    ]
    namedregex = ut.named_field_regex(keypat_list)

    # Define function to pass to re.sub
    def replmodpath(matchobj):
        groupdict_ = matchobj.groupdict()
        relpath = groupdict_['modrelpath']
        prefix = groupdict_['prefix']
        suffix = groupdict_['suffix']
        modname = relpath
        modname = modname.replace('\\', '.')
        modname = modname.replace('/', '.')
        modname = modname.replace('.py', '')
        return prefix + '-m ' + modname + suffix

    for fpath in found_filestr_list:
        text = ut.read_from(fpath)
        #matchobj = re.search(namedregex, text, flags=re.MULTILINE)
        #print(text)
        #for matchobj in re.finditer(namedregex, text):
        #    print(ut.get_match_text(matchobj))
        #    print('--')
        newtext = re.sub(namedregex, replmodpath, text)
        # Perform replacement
        ut.write_to(fpath, newtext)
Example #9
0
def inject_python_code(fpath, patch_code, tag=None,
                       inject_location='after_imports'):
    """
    DEPRICATE
    puts code into files on disk
    """
    import utool as ut
    assert tag is not None, 'TAG MUST BE SPECIFIED IN INJECTED CODETEXT'
    text = ut.read_from(fpath)
    comment_start_tag = '# <util_inject:%s>' % tag
    comment_end_tag  = '# </util_inject:%s>' % tag

    tagstart_txtpos = text.find(comment_start_tag)
    tagend_txtpos = text.find(comment_end_tag)

    text_lines = ut.split_python_text_into_lines(text)

    # split the file into two parts and inject code between them
    if tagstart_txtpos != -1 or tagend_txtpos != -1:
        assert tagstart_txtpos != -1, 'both tags must not be found'
        assert tagend_txtpos != -1, 'both tags must not be found'

        for pos, line in enumerate(text_lines):
            if line.startswith(comment_start_tag):
                tagstart_pos = pos
            if line.startswith(comment_end_tag):
                tagend_pos = pos
        part1 = text_lines[0:tagstart_pos]
        part2 = text_lines[tagend_pos + 1:]
    else:
        if inject_location == 'after_imports':
            first_nonimport_pos = 0
            for line in text_lines:
                list_ = ['import ', 'from ', '#', ' ']
                isvalid = (len(line) == 0 or
                           any(line.startswith(str_) for str_ in list_))
                if not isvalid:
                    break
                first_nonimport_pos += 1
            part1 = text_lines[0:first_nonimport_pos]
            part2 = text_lines[first_nonimport_pos:]
        else:
            raise AssertionError('Unknown inject location')

    newtext = (
        '\n'.join(part1 + [comment_start_tag]) +
        '\n' + patch_code + '\n' +
        '\n'.join( [comment_end_tag] + part2)
    )
    text_backup_fname = fpath + '.' + ut.get_timestamp() + '.bak'
    ut.write_to(text_backup_fname, text)
    ut.write_to(fpath, newtext)
Example #10
0
def inject_python_code(fpath,
                       patch_code,
                       tag=None,
                       inject_location='after_imports'):
    """
    DEPRICATE
    puts code into files on disk
    """
    import utool as ut
    assert tag is not None, 'TAG MUST BE SPECIFIED IN INJECTED CODETEXT'
    text = ut.read_from(fpath)
    comment_start_tag = '# <util_inject:%s>' % tag
    comment_end_tag = '# </util_inject:%s>' % tag

    tagstart_txtpos = text.find(comment_start_tag)
    tagend_txtpos = text.find(comment_end_tag)

    text_lines = ut.split_python_text_into_lines(text)

    # split the file into two parts and inject code between them
    if tagstart_txtpos != -1 or tagend_txtpos != -1:
        assert tagstart_txtpos != -1, 'both tags must not be found'
        assert tagend_txtpos != -1, 'both tags must not be found'

        for pos, line in enumerate(text_lines):
            if line.startswith(comment_start_tag):
                tagstart_pos = pos
            if line.startswith(comment_end_tag):
                tagend_pos = pos
        part1 = text_lines[0:tagstart_pos]
        part2 = text_lines[tagend_pos + 1:]
    else:
        if inject_location == 'after_imports':
            first_nonimport_pos = 0
            for line in text_lines:
                list_ = ['import ', 'from ', '#', ' ']
                isvalid = (len(line) == 0
                           or any(line.startswith(str_) for str_ in list_))
                if not isvalid:
                    break
                first_nonimport_pos += 1
            part1 = text_lines[0:first_nonimport_pos]
            part2 = text_lines[first_nonimport_pos:]
        else:
            raise AssertionError('Unknown inject location')

    newtext = ('\n'.join(part1 + [comment_start_tag]) + '\n' + patch_code +
               '\n' + '\n'.join([comment_end_tag] + part2))
    text_backup_fname = fpath + '.' + ut.get_timestamp() + '.bak'
    ut.write_to(text_backup_fname, text)
    ut.write_to(fpath, newtext)
def init_theanorc():
    theanorc_fpath = join(os.getenv('HOME'), '.theanorc')
    theanorc_text = ut.codeblock('''
        [global]
        floatX = float32
        device = gpu0
        openmp = True

        [nvcc]
        fastmath = True
        ''')
    if ut.checkpath(theanorc_fpath, verbose=True):
        if not ut.arg_you_sure('overwrite?'):
            return
    ut.write_to(theanorc_fpath, theanorc_text)
Example #12
0
def fix_section_common_errors(tex_fpath, dryrun=True):
    # Read in text and ensure ascii format
    text = ut.read_from(tex_fpath)

    new_text = text
    # Fix all capitals
    search_repl_list = constants_tex_fixes.CAPITAL_LIST
    for repl in search_repl_list:
        pattern = ut.regex_word(re.escape(repl))
        new_text = re.sub(pattern, repl, new_text, flags=re.IGNORECASE)
    #new_text = re.sub(pattern, fix_capitalization, text, flags=re.MULTILINE)

    if not dryrun:
        ut.write_to(tex_fpath, new_text)
    else:
        ut.print_difftext(ut.get_textdiff(text, new_text, 0))
Example #13
0
def update_wildbook_ia_config(ibs, wildbook_tomcat_path, dryrun=False):
    """
    #if use_config_file and wildbook_tomcat_path:
    #    # Update the Wildbook configuration to see *THIS* ibeis database
    #    with lockfile.LockFile(lock_fpath):
    #        update_wildbook_ia_config(ibs, wildbook_tomcat_path, dryrun)
    """
    wildbook_properteis_dpath = join(wildbook_tomcat_path,
                                     'WEB-INF/classes/bundles/')
    print('[ibs.update_wildbook_ia_config()] Wildbook properties=%r' % (
        wildbook_properteis_dpath, ))
    # The src file is non-standard. It should be remove here as well
    wildbook_config_fpath_dst = join(wildbook_properteis_dpath,
                                     'commonConfiguration.properties')
    ut.assert_exists(wildbook_properteis_dpath)
    # for come reason the .default file is not there, that should be ok though
    orig_content = ut.read_from(wildbook_config_fpath_dst)
    content = orig_content
    # Make sure wildbook knows where to find us
    if False:
        # Old way of telling WB where to find IA
        content = re.sub('IBEIS_DB_path = .*',
                         'IBEIS_DB_path = ' + ibs.get_db_core_path(), content)
        content = re.sub('IBEIS_image_path = .*',
                         'IBEIS_image_path = ' + ibs.get_imgdir(), content)

    ia_hostport = 'http://localhost:5000'
    ia_rest_prefix = ut.named_field('prefix', 'IBEISIARestUrl.*')
    host_port = ut.named_field('host_port', 'http://.*?:[0-9]+')
    content = re.sub(ia_rest_prefix + host_port, ut.bref_field('prefix') + ia_hostport, content)

    # Write to the configuration if it is different
    if orig_content != content:
        need_sudo = not ut.is_file_writable(wildbook_config_fpath_dst)
        if need_sudo:
            quoted_content = '"%s"' % (content, )
            print('Attempting to gain sudo access to update wildbook config')
            command = ['sudo', 'sh', '-c', '\'', 'echo',
                       quoted_content, '>', wildbook_config_fpath_dst, '\'']
            # ut.cmd(command, sudo=True)
            command = ' '.join(command)
            if not dryrun:
                os.system(command)
        else:
            ut.write_to(wildbook_config_fpath_dst, content)
Example #14
0
def dump_autogen_code(fpath, autogen_text, codetype='python', fullprint=None):
    """
    Helper that write a file if -w is given on command line, otherwise
    it just prints it out. It has the opption of comparing a diff to the file.
    """
    import utool as ut
    dowrite = ut.get_argflag(('-w', '--write'))
    show_diff = ut.get_argflag('--diff')
    num_context_lines = ut.get_argval('--diff', type_=int, default=None)
    show_diff = show_diff or num_context_lines is not None

    num_context_lines = ut.get_argval('--diff', type_=int, default=None)

    if fullprint is None:
        fullprint = True

    if fullprint is False:
        fullprint = ut.get_argflag('--print')

    print('[autogen] Autogenerated %s...\n+---\n' % (fpath,))
    if not dowrite:
        if fullprint:
            ut.print_code(autogen_text, lexer_name=codetype)
            print('\nL___')
        else:
            print('specify --print to write to stdout')
            pass
        print('specify -w to write, or --diff to compare')
        print('...would write to: %s' % fpath)
    if show_diff:
        if ut.checkpath(fpath, verbose=True):
            prev_text = ut.read_from(fpath)
            textdiff = ut.get_textdiff(prev_text, autogen_text,
                                       num_context_lines=num_context_lines)
            try:
                ut.print_difftext(textdiff)
            except UnicodeDecodeError:
                import unicodedata
                textdiff = unicodedata.normalize('NFKD', textdiff).encode('ascii', 'ignore')
                ut.print_difftext(textdiff)

        if dowrite:
            print('WARNING: Not writing. Remove --diff from command line')
    elif dowrite:
        ut.write_to(fpath, autogen_text)
Example #15
0
def __dump_text_report(allres, report_type):
    if not 'report_type' in vars():
        report_type = 'rankres_str'
    print('[rr2] Dumping textfile: ' + report_type)
    report_str = allres.__dict__[report_type]
    # Get directories
    result_dir    = allres.ibs.dirs.result_dir
    timestamp_dir = join(result_dir, 'timestamped_results')
    utool.ensurepath(timestamp_dir)
    utool.ensurepath(result_dir)
    # Write to timestamp and result dir
    timestamp = utool.get_timestamp()
    csv_timestamp_fname = report_type + allres.title_suffix + timestamp + '.csv'
    csv_timestamp_fpath = join(timestamp_dir, csv_timestamp_fname)
    csv_fname  = report_type + allres.title_suffix + '.csv'
    csv_fpath = join(result_dir, csv_fname)
    utool.write_to(csv_fpath, report_str)
    utool.write_to(csv_timestamp_fpath, report_str)
Example #16
0
def __dump_text_report(allres, report_type):
    if not 'report_type' in vars():
        report_type = 'rankres_str'
    print('[rr2] Dumping textfile: ' + report_type)
    report_str = allres.__dict__[report_type]
    # Get directories
    result_dir = allres.ibs.dirs.result_dir
    timestamp_dir = join(result_dir, 'timestamped_results')
    utool.ensurepath(timestamp_dir)
    utool.ensurepath(result_dir)
    # Write to timestamp and result dir
    timestamp = utool.get_timestamp()
    csv_timestamp_fname = report_type + allres.title_suffix + timestamp + '.csv'
    csv_timestamp_fpath = join(timestamp_dir, csv_timestamp_fname)
    csv_fname = report_type + allres.title_suffix + '.csv'
    csv_fpath = join(result_dir, csv_fname)
    utool.write_to(csv_fpath, report_str)
    utool.write_to(csv_timestamp_fpath, report_str)
Example #17
0
def translate(*paths):
    """ Translates a list of paths """

    cy_bench_list = []
    for fpath in paths:
        if isfile(fpath):
            abspath = utool.unixpath(fpath)
            cy_bench = translate_fpath(abspath)
            if cy_bench is not None:
                cy_bench_list.append(cy_bench)

    if len(cy_bench_list) > 0:
        runbench_shtext = cyth_benchmarks.build_runbench_shell_text(cy_bench_list)
        runbench_pytext = cyth_benchmarks.build_runbench_pyth_text(cy_bench_list)

        utool.write_to('_old_run_cyth_benchmarks.sh', runbench_shtext)
        utool.write_to('run_cyth_benchmarks.py', runbench_pytext)
        #try:
        os.chmod('_old_run_cyth_benchmarks.sh', 33277)
        os.chmod('run_cyth_benchmarks.py', 33277)
Example #18
0
def write_modscript_alias(fpath, modname, args='', pyscript='python'):
    """
    convinience function because $@ is annoying to paste into the terminal
    """
    import utool as ut
    from os.path import splitext
    allargs_dict = {
        '.sh': ' $@',
        '.bat': ' %1', }
    _, script_ext = splitext(fpath)
    if script_ext not in ['.sh', '.bat']:
        script_ext = '.bat' if ut.WIN32 else 'sh'
    allargs = (args + allargs_dict[script_ext]).strip(' ')
    if not modname.endswith('.py'):
        fmtstr = '{pyscript} -m {modname} {allargs}'
    else:
        fmtstr = '{pyscript} {modname} {allargs}'

    cmdstr = fmtstr.format(pyscript=pyscript, modname=modname, allargs=allargs)
    ut.write_to(fpath, cmdstr)
    os.system('chmod +x ' + fpath)
Example #19
0
def translate(*paths):
    """ Translates a list of paths """

    cy_bench_list = []
    for fpath in paths:
        if isfile(fpath):
            abspath = utool.unixpath(fpath)
            cy_bench = translate_fpath(abspath)
            if cy_bench is not None:
                cy_bench_list.append(cy_bench)

    if len(cy_bench_list) > 0:
        runbench_shtext = cyth_benchmarks.build_runbench_shell_text(
            cy_bench_list)
        runbench_pytext = cyth_benchmarks.build_runbench_pyth_text(
            cy_bench_list)

        utool.write_to('_old_run_cyth_benchmarks.sh', runbench_shtext)
        utool.write_to('run_cyth_benchmarks.py', runbench_pytext)
        #try:
        os.chmod('_old_run_cyth_benchmarks.sh', 33277)
        os.chmod('run_cyth_benchmarks.py', 33277)
Example #20
0
def ensure_explicit_namespace(fpath, namespace, varname_list):
    import re
    import utool as ut

    text = ut.read_from(fpath)
    orig_text = text
    new_text = text

    for varname in varname_list:
        regex = ''.join((
            ut.named_field('prefix', '[^.]'),
            ut.named_field('var', ut.whole_word(varname)),
        ))
        repl = ''.join(
            (ut.bref_field('prefix'), namespace, '.', ut.bref_field('var')))

        new_text = re.sub(regex, repl, new_text)

    textdiff = ut.get_textdiff(orig_text, new_text)
    print(textdiff)
    if ut.user_cmdline_prompt('Does the text look good?'):
        # if diff looks good write
        ut.write_to(fpath, new_text)
Example #21
0
def translate_fpath(py_fpath):
    """ creates a cython pyx file from a python file with cyth tags
    >>> from cyth.cyth_script import *  # NOQA
    >>> py_fpath = utool.unixpath('~/code/vtool/vtool/linalg.py')
    """
    # If -a is given, generate cython html for each pyx file
    # Get cython pyx and benchmark output path
    cy_pyxpath = cyth_helpers.get_cyth_path(py_fpath)
    cy_pxdpath = cyth_helpers.get_cyth_pxd_path(py_fpath)
    cy_benchpath = cyth_helpers.get_cyth_bench_path(py_fpath)
    # Infer the python module name
    py_modname = cyth_helpers.get_py_module_name(py_fpath)
    # Read the python file
    py_text = utool.read_from(py_fpath, verbose=False)
    # dont parse files without tags
    if py_text.find('CYTH') == -1:
        return None
    print('\n___________________')
    print('[cyth.translate_fpath] py_fpath=%r' % py_fpath)
    # Parse the python file
    visitor = cyth_parser.CythVisitor(py_modname=py_modname)
    visitor.visit(ast.parse(py_text))
    # Get the generated pyx file and benchmark file
    pyx_text, pxd_text = visitor.get_result()
    bench_text = visitor.get_benchmarks()
    # Write pyx and benchmark
    utool.write_to(cy_pyxpath, pyx_text)
    utool.write_to(cy_pxdpath, pxd_text, verbose=False)
    utool.write_to(cy_benchpath, bench_text, verbose=False)
    if CYTHON_HTML:
        print('[cyth.translate_fpath] generating annotation html')
        cython_exe = utool.get_cython_exe()
        os.system(cython_exe + ' -a ' + cy_pyxpath)
    if CYTHON_MAKE_C:
        print('[cyth.translate_fpath] generating cython c')
        cython_exe = utool.get_cython_exe()
        os.system(cython_exe + ' ' + cy_pyxpath)
    if CYTHON_BUILD:
        gcc_exe = 'gcc'
        print('[cyth.translate_fpath] generating c library')
        c_path = cyth_helpers.get_c_path(cy_pyxpath)
        #C:\MinGW\bin\gcc.exe -w -Wall -m32 -lpython27 -IC:\Python27\Lib\site-packages\numpy\core\include -IC:\Python27\include -IC:\Python27\PC -IC:\Python27\Lib\site-packages\numpy\core\include -LC:\Python27\libs -o _linalg_cyth.pyd -c _linalg_cyth.c
        os.system(gcc_exe + ' ' + c_path)
    return cy_benchpath
Example #22
0
def translate_fpath(py_fpath):
    """ creates a cython pyx file from a python file with cyth tags
    >>> from cyth.cyth_script import *  # NOQA
    >>> py_fpath = utool.unixpath('~/code/vtool/vtool/linalg.py')
    """
    # If -a is given, generate cython html for each pyx file
    # Get cython pyx and benchmark output path
    cy_pyxpath = cyth_helpers.get_cyth_path(py_fpath)
    cy_pxdpath = cyth_helpers.get_cyth_pxd_path(py_fpath)
    cy_benchpath = cyth_helpers.get_cyth_bench_path(py_fpath)
    # Infer the python module name
    py_modname = cyth_helpers.get_py_module_name(py_fpath)
    # Read the python file
    py_text = utool.read_from(py_fpath, verbose=False)
    # dont parse files without tags
    if py_text.find('CYTH') == -1:
        return None
    print('\n___________________')
    print('[cyth.translate_fpath] py_fpath=%r' % py_fpath)
    # Parse the python file
    visitor = cyth_parser.CythVisitor(py_modname=py_modname)
    visitor.visit(ast.parse(py_text))
    # Get the generated pyx file and benchmark file
    pyx_text, pxd_text = visitor.get_result()
    bench_text = visitor.get_benchmarks()
    # Write pyx and benchmark
    utool.write_to(cy_pyxpath, pyx_text)
    utool.write_to(cy_pxdpath, pxd_text, verbose=False)
    utool.write_to(cy_benchpath, bench_text, verbose=False)
    if CYTHON_HTML:
        print('[cyth.translate_fpath] generating annotation html')
        cython_exe = utool.get_cython_exe()
        os.system(cython_exe + ' -a ' + cy_pyxpath)
    if CYTHON_MAKE_C:
        print('[cyth.translate_fpath] generating cython c')
        cython_exe = utool.get_cython_exe()
        os.system(cython_exe + ' ' + cy_pyxpath)
    if CYTHON_BUILD:
        gcc_exe = 'gcc'
        print('[cyth.translate_fpath] generating c library')
        c_path = cyth_helpers.get_c_path(cy_pyxpath)
        #C:\MinGW\bin\gcc.exe -w -Wall -m32 -lpython27 -IC:\Python27\Lib\site-packages\numpy\core\include -IC:\Python27\include -IC:\Python27\PC -IC:\Python27\Lib\site-packages\numpy\core\include -LC:\Python27\libs -o _linalg_cyth.pyd -c _linalg_cyth.c
        os.system(gcc_exe + ' ' + c_path)
    return cy_benchpath
Example #23
0
        def_test('DOC', testcmds=testcmds, default=True)
    ]

    shscript_text = ut.make_run_tests_script_text(test_headers, test_argvs, quick_tests, repodir, exclude_list)

    return shscript_text, pyscript_text

if __name__ == '__main__':
    """
    CommandLine:
        python autogen_test_script.py
        python autogen_test_script.py -w
    """
    shscript_text, pyscript_text = autogen_ibeis_runtest()
    runtest_fname = None

    if runtest_fname is None and ut.get_argflag('-w'):
        runtest_fname = 'run_tests'

    if runtest_fname is None and ut.get_argflag('-t'):
        runtest_fname = '_run_tests2'

    if runtest_fname is not None:
        ut.write_to('shell_' + runtest_fname + '.sh', shscript_text)
        ut.write_to(runtest_fname + '.py', pyscript_text)

    elif ut.get_argflag(('--verbose', '-v')):
        print(shscript_text)
        print('')
        print(pyscript_text)
Example #24
0
def tune_flann(dpts,
               target_precision=.90,
               build_weight=0.50,
               memory_weight=0.00,
               sample_fraction=0.01):
    r"""

    References:
        http://www.cs.ubc.ca/research/flann/uploads/FLANN/flann_pami2014.pdf
        http://www.cs.ubc.ca/research/flann/uploads/FLANN/flann_manual-1.8.4.pdf
        http://docs.opencv.org/trunk/modules/flann/doc/flann_fast_approximate_nearest_neighbor_search.html

    Math::
        cost of an algorithm is:

        LaTeX:
            \cost = \frac
                {\search + build_weight * \build }
                { \minoverparams( \search + build_weight \build)} +
                memory_weight * \memory

    Args:
        dpts (ndarray):

        target_precision (float): number between 0 and 1 representing desired
            accuracy. Higher values are more accurate.

        build_weight (float): importance weight given to minimizing build time
            relative to search time. This number can range from 0 to infinity.
            typically because building is a more complex computation you want
            to keep the number relatively low, (less than 1) otherwise you'll
            end up getting a linear search (no build time).

        memory_weight (float): Importance of memory relative to total speed.
            A value less than 1 gives more importance to the time spent and a
            value greater than 1 gives more importance to the memory usage.

        sample_fraction (float): number between 0 and 1 representing the
            fraction of the input data to use in the optimization. A higher
            number uses more data.

    Returns:
        dict: tuned_params

    CommandLine:
        python -m vtool.nearest_neighbors --test-tune_flann

    """
    with ut.Timer('tuning flann'):
        print('Autotuning flann with %d %dD vectors' %
              (dpts.shape[0], dpts.shape[1]))
        print('a sample of %d vectors will be used' %
              (int(dpts.shape[0] * sample_fraction)))
        flann = pyflann.FLANN()
        #num_data = len(dpts)
        flann_atkwargs = dict(algorithm='autotuned',
                              target_precision=target_precision,
                              build_weight=build_weight,
                              memory_weight=memory_weight,
                              sample_fraction=sample_fraction)
        suffix = repr(flann_atkwargs)
        badchar_list = ',{}\': '
        for badchar in badchar_list:
            suffix = suffix.replace(badchar, '')
        print('flann_atkwargs:')
        print(utool.dict_str(flann_atkwargs))
        print('starting optimization')
        tuned_params = flann.build_index(dpts, **flann_atkwargs)
        print('finished optimization')

        # The algorithm is sometimes returned as default which is
        # very unuseful as the default name is embeded in the pyflann
        # module where most would not care to look. This finds the default
        # name for you.
        for key in ['algorithm', 'centers_init', 'log_level']:
            val = tuned_params.get(key, None)
            if val == 'default':
                dict_ = pyflann.FLANNParameters._translation_[key]
                other_algs = ut.dict_find_other_sameval_keys(dict_, 'default')
                assert len(other_algs
                           ) == 1, 'more than 1 default for key=%r' % (key, )
                tuned_params[key] = other_algs[0]

        common_params = [
            'algorithm',
            'checks',
        ]
        relevant_params_dict = dict(
            linear=['algorithm'],
            #---
            kdtree=['trees'],
            #---
            kmeans=[
                'branching',
                'iterations',
                'centers_init',
                'cb_index',
            ],
            #---
            lsh=[
                'table_number',
                'key_size',
                'multi_probe_level',
            ],
        )
        relevant_params_dict['composite'] = relevant_params_dict[
            'kmeans'] + relevant_params_dict['kdtree'] + common_params
        relevant_params_dict['kmeans'] += common_params
        relevant_params_dict['kdtree'] += common_params
        relevant_params_dict['lsh'] += common_params

        #kdtree_single_params = [
        #    'leaf_max_size',
        #]
        #other_params = [
        #    'build_weight',
        #    'sorted',
        #]
        out_file = 'flann_tuned' + suffix
        utool.write_to(out_file,
                       ut.dict_str(tuned_params, sorted_=True, newlines=True))
        flann.delete_index()
        if tuned_params['algorithm'] in relevant_params_dict:
            print('relevant_params=')
            relevant_params = relevant_params_dict[tuned_params['algorithm']]
            print(
                ut.dict_str(ut.dict_subset(tuned_params, relevant_params),
                            sorted_=True,
                            newlines=True))
            print('irrelevant_params=')
            print(
                ut.dict_str(ut.dict_setdiff(tuned_params, relevant_params),
                            sorted_=True,
                            newlines=True))
        else:
            print('unknown tuned algorithm=%r' % (tuned_params['algorithm'], ))

        print('all_tuned_params=')
        print(ut.dict_str(tuned_params, sorted_=True, newlines=True))
    return tuned_params
Example #25
0
def autogen_sphinx_apidoc():
    r"""
    autogen_sphinx_docs.py

    Ignore:
        C:\Python27\Scripts\autogen_sphinx_docs.py
        autogen_sphinx_docs.py

        pip uninstall sphinx
        pip install sphinx
        pip install sphinxcontrib-napoleon
        pip install sphinx --upgrade
        pip install sphinxcontrib-napoleon --upgrade

        cd C:\Python27\Scripts
        ls C:\Python27\Scripts

        python -c "import sphinx; print(sphinx.__version__)"

    CommandLine:
        python -m utool.util_setup --exec-autogen_sphinx_apidoc

    Example:
        >>> # SCRIPT
        >>> from utool.util_setup import *  # NOQA
        >>> autogen_sphinx_apidoc()
    """
    # TODO: assert sphinx-apidoc exe is found
    # TODO: make find_exe work?
    import utool as ut

    def build_sphinx_apidoc_cmdstr():
        print('')
        print('if this fails try: sudo pip install sphinx')
        print('')
        apidoc = 'sphinx-apidoc'
        if ut.WIN32:
            winprefix = 'C:/Python27/Scripts/'
            sphinx_apidoc_exe = winprefix + apidoc + '.exe'
        else:
            sphinx_apidoc_exe = apidoc
        apidoc_argfmt_list = [
            sphinx_apidoc_exe,
            '--force',
            '--full',
            '--maxdepth="{maxdepth}"',
            '--doc-author="{author}"',
            '--doc-version="{doc_version}"',
            '--doc-release="{doc_release}"',
            '--output-dir="_doc"',
            #'--separate',  # Put documentation for each module on its own page
            '--private',  # Include "_private" modules
            '{pkgdir}',
        ]
        outputdir = '_doc'
        author = ut.parse_author()
        packages = ut.find_packages(maxdepth=1)
        assert len(
            packages) != 0, 'directory must contain at least one package'
        if len(packages) > 1:
            assert len(packages) == 1,\
                ('FIXME I dont know what to do with more than one root package: %r'
                 % (packages,))
        pkgdir = packages[0]
        version = ut.parse_package_for_version(pkgdir)
        modpath = dirname(ut.truepath(pkgdir))

        apidoc_fmtdict = {
            'author': author,
            'maxdepth': '8',
            'pkgdir': pkgdir,
            'doc_version': version,
            'doc_release': version,
            'outputdir': outputdir,
        }
        ut.assert_exists('setup.py')
        ut.ensuredir('_doc')
        apidoc_fmtstr = ' '.join(apidoc_argfmt_list)
        apidoc_cmdstr = apidoc_fmtstr.format(**apidoc_fmtdict)
        print('[util_setup] autogenerate sphinx docs for %r' % (pkgdir, ))
        if ut.VERBOSE:
            print(ut.dict_str(apidoc_fmtdict))
        return apidoc_cmdstr, modpath, outputdir

    def build_conf_replstr():
        #
        # Make custom edits to conf.py
        # FIXME:
        #ext_search_text = ut.unindent(
        #    r'''
        #    extensions = [
        #    [^\]]*
        #    ]
        #    ''')
        ext_search_text = r'extensions = \[[^/]*\]'
        # TODO: http://sphinx-doc.org/ext/math.html#module-sphinx.ext.pngmath
        #'sphinx.ext.mathjax',
        exclude_modules = []  # ['ibeis.all_imports']
        ext_repl_text = ut.codeblock('''
            MOCK_MODULES = {exclude_modules}
            if len(MOCK_MODULES) > 0:
                import mock
                for mod_name in MOCK_MODULES:
                    sys.modules[mod_name] = mock.Mock()

            extensions = [
                'sphinx.ext.autodoc',
                'sphinx.ext.viewcode',
                # For LaTeX
                'sphinx.ext.pngmath',
                # For Google Sytle Docstrs
                # https://pypi.python.org/pypi/sphinxcontrib-napoleon
                'sphinxcontrib.napoleon',
                #'sphinx.ext.napoleon',
            ]
            ''').format(exclude_modules=str(exclude_modules))
        #theme_search = 'html_theme = \'default\''
        theme_search = 'html_theme = \'[a-zA-Z_1-3]*\''
        theme_repl = ut.codeblock('''
            import sphinx_rtd_theme
            html_theme = "sphinx_rtd_theme"
            html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
            ''')
        head_text = ut.codeblock('''
            from sphinx.ext.autodoc import between
            import sphinx_rtd_theme
            import sys
            import os

            # Dont parse IBEIS args
            os.environ['IBIES_PARSE_ARGS'] = 'OFF'
            os.environ['UTOOL_AUTOGEN_SPHINX_RUNNING'] = 'ON'

            sys.path.append('{modpath}')
            sys.path.append(sys.path.insert(0, os.path.abspath("../")))

            autosummary_generate = True

            modindex_common_prefix = ['_']
            ''').format(modpath=ut.truepath(modpath))
        tail_text = ut.codeblock('''
            def setup(app):
                # Register a sphinx.ext.autodoc.between listener to ignore everything
                # between lines that contain the word IGNORE
                app.connect('autodoc-process-docstring', between('^.*IGNORE.*$', exclude=True))
                return app
            ''')
        return (ext_search_text, ext_repl_text, theme_search, theme_repl,
                head_text, tail_text)

    apidoc_cmdstr, modpath, outputdir = build_sphinx_apidoc_cmdstr()
    ext_search_text, ext_repl_text, theme_search, theme_repl, head_text, tail_text = build_conf_replstr(
    )

    dry = ut.get_argflag('--dry')

    if not dry:
        # Execute sphinx-apidoc
        ut.cmd(apidoc_cmdstr, shell=True)
        # sphinx-apidoc outputs conf.py to <outputdir>, add custom commands
        #
        # Change dir to <outputdir>
        print('chdir' + outputdir)
        os.chdir(outputdir)
        conf_fname = 'conf.py'
        conf_text = ut.read_from(conf_fname)
        conf_text = conf_text.replace('import sys', 'import sys  # NOQA')
        conf_text = conf_text.replace('import os', 'import os  # NOQA')
        conf_text = ut.regex_replace(theme_search, theme_repl, conf_text)
        conf_text = ut.regex_replace(ext_search_text, ext_repl_text, conf_text)
        conf_text = head_text + '\n' + conf_text + tail_text
        ut.write_to(conf_fname, conf_text)
        # Make the documentation
        #if ut.LINUX:
        #    ut.cmd('make html', shell=True)
        #if ut.WIN32:
        #raw_input('waiting')
        if not ut.get_argflag('--nomake'):
            ut.cmd('make', 'html', shell=True)
    else:
        print(apidoc_cmdstr)
        print('cd ' + outputdir)
        print('manual edits of conf.py')
        print('make html')
Example #26
0
def write_research(r, to_write, rate=-5):
    fname = join(split(__file__)[0], 'to_speak.txt')
    import utool as ut
    ut.write_to(fname, to_write)
Example #27
0
if __name__ == '__main__':
    """
    CommandLine:
        python autogen_test_script.py
        python autogen_test_script.py
        python autogen_test_script.py --verbose > run_tests.sh
        python autogen_test_script.py -w
        reset_dbs.sh && run_tests.sh
        reset_dbs.sh && run_tests.sh --testall
        ./reset_dbs.sh
        ./run_tests.sh --testall
    """
    shscript_text, pyscript_text = autogen_ibeis_runtest()
    runtest_fname = None

    if runtest_fname is None and ut.get_argflag('-w'):
        runtest_fname = 'run_tests'

    if runtest_fname is None and ut.get_argflag('-t'):
        runtest_fname = '_run_tests2'

    if runtest_fname is not None:
        ut.write_to('shell_' + runtest_fname + '.sh', shscript_text)
        ut.write_to(runtest_fname + '.py', pyscript_text)

    elif ut.get_argflag(('--verbose', '-v')):
        print(shscript_text)
        print('')
        print(pyscript_text)
Example #28
0
def sort_module_functions():
    from os.path import dirname, join
    import utool as ut
    import ibeis.control
    import re
    #import re
    #regex = r'[^@]*\ndef'
    modfpath = dirname(ibeis.control.__file__)
    fpath = join(modfpath, 'manual_annot_funcs.py')
    #fpath = join(modfpath, 'manual_dependant_funcs.py')
    #fpath = join(modfpath, 'manual_lblannot_funcs.py')
    #fpath = join(modfpath, 'manual_name_species_funcs.py')
    text = ut.read_from(fpath, verbose=False)
    lines =  text.splitlines()
    indent_list = [ut.get_indentation(line) for line in lines]
    isfunc_list = [line.startswith('def ') for line in lines]
    isblank_list = [len(line.strip(' ')) == 0 for line in lines]
    isdec_list = [line.startswith('@') for line in lines]

    tmp = ['def' if isfunc else indent for isfunc, indent in  zip(isfunc_list, indent_list)]
    tmp = ['b' if isblank else t for isblank, t in  zip(isblank_list, tmp)]
    tmp = ['@' if isdec else t for isdec, t in  zip(isdec_list, tmp)]
    #print('\n'.join([str((t, count + 1)) for (count, t) in enumerate(tmp)]))
    block_list = re.split('\n\n\n', text, flags=re.MULTILINE)

    #for block in block_list:
    #    print('#====')
    #    print(block)

    isfunc_list = [re.search('^def ', block, re.MULTILINE) is not None for block in block_list]

    whole_varname = ut.whole_word(ut.REGEX_VARNAME)
    funcname_regex = r'def\s+' + ut.named_field('funcname', whole_varname)

    def findfuncname(block):
        match = re.search(funcname_regex, block)
        return match.group('funcname')

    funcnameblock_list = [findfuncname(block) if isfunc else None
                          for isfunc, block in zip(isfunc_list, block_list)]

    funcblock_list = ut.filter_items(block_list, isfunc_list)
    funcname_list = ut.filter_items(funcnameblock_list, isfunc_list)

    nonfunc_list = ut.filterfalse_items(block_list, isfunc_list)

    nonfunc_list = ut.filterfalse_items(block_list, isfunc_list)
    ismain_list = [re.search('^if __name__ == ["\']__main__["\']', nonfunc) is not None
                   for nonfunc in nonfunc_list]

    mainblock_list = ut.filter_items(nonfunc_list, ismain_list)
    nonfunc_list = ut.filterfalse_items(nonfunc_list, ismain_list)

    newtext_list = []

    for nonfunc in nonfunc_list:
        newtext_list.append(nonfunc)
        newtext_list.append('\n')

    #funcname_list
    for funcblock in ut.sortedby(funcblock_list, funcname_list):
        newtext_list.append(funcblock)
        newtext_list.append('\n')

    for mainblock in mainblock_list:
        newtext_list.append(mainblock)

    newtext = '\n'.join(newtext_list)
    print('newtext = %s' % (newtext,))
    print('len(newtext) = %r' % (len(newtext),))
    print('len(text) = %r' % (len(text),))

    backup_fpath = ut.augpath(fpath, augext='.bak', augdir='_backup', ensure=True)

    ut.write_to(backup_fpath, text)
    ut.write_to(fpath, newtext)
Example #29
0
def autogenerate_nth_schema_version(schema_spec, n=-1):
    r"""
    dumps, prints, or diffs autogen schema based on command line

    Args:
        n (int):

    CommandLine:
        python -m ibeis.control._sql_helpers --test-autogenerate_nth_schema_version

    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis.control._sql_helpers import *  # NOQA
        >>> from ibeis.control import DB_SCHEMA
        >>> # build test data
        >>> schema_spec = DB_SCHEMA
        >>> n = 1
        >>> # execute function
        >>> tablename = autogenerate_nth_schema_version(schema_spec, n)
        >>> # verify results
        >>> result = str(tablename)
        >>> print(result)
    """
    import utool as ut
    print('[_SQL] AUTOGENERATING CURRENT SCHEMA')
    db = get_nth_test_schema_version(schema_spec, n=n)
    # Auto-generate the version skip schema file
    schema_spec_dir, schema_spec_fname = split(schema_spec.__file__)
    schema_spec_fname = splitext(schema_spec_fname)[0]
    # HACK TO GET AUTOGEN COMMAND
    # FIXME: Make this autogen command a bit more sane and not completely
    # coupled with ibeis
    autogen_cmd = ut.codeblock(
        '''
        python -m ibeis.control.{schema_spec_fname} --test-autogen_{funcname} --force-incremental-db-update --write
        python -m ibeis.control.{schema_spec_fname} --test-autogen_{funcname} --force-incremental-db-update --diff=1
        python -m ibeis.control.{schema_spec_fname} --test-autogen_{funcname} --force-incremental-db-update
        '''
    ).format(schema_spec_fname=schema_spec_fname, funcname=schema_spec_fname.lower())
    autogen_text = db.get_schema_current_autogeneration_str(autogen_cmd)

    autogen_fname = '%s_CURRENT.py' % schema_spec_fname
    autogen_fpath = join(schema_spec_dir, autogen_fname)

    dowrite = ut.get_argflag(('-w', '--write', '--dump-autogen-schema'))
    show_diff = ut.get_argflag('--diff')
    num_context_lines = ut.get_argval('--diff', type_=int, default=None)
    show_diff = show_diff or num_context_lines is not None
    dowrite = dowrite and not show_diff

    if dowrite:
        ut.write_to(autogen_fpath, autogen_text)
    else:
        if show_diff:
            if ut.checkpath(autogen_fpath, verbose=True):
                prev_text = ut.read_from(autogen_fpath)
                textdiff = ut.util_str.get_textdiff(prev_text, autogen_text, num_context_lines=num_context_lines)
                ut.print_difftext(textdiff)
        else:
            ut.util_print.print_python_code(autogen_text)
        print('\nL___\n...would write to: %s' % autogen_fpath)

    print(' Run with -n=%r to get a specific schema version by index. -1 == latest')
    print(' Run with --write to autogenerate latest schema version')
    print(' Run with --diff or --diff=<numcontextlines> to see the difference between current and requested')
    return db
Example #30
0
    pkgname = basename(repodir)
    packages = utool.ls_moduledirs(repodir, full=False)
    print(pkgname)
    setup_text = setup_text_fmt.format(
        packages=packages,
        repodir=repodir,
        timestamp=timestamp,
        pkgname=pkgname,
    )
    return setup_text


if __name__ == '__main__':
    writeflag = utool.get_argflag(('--write', '-w'))
    overwriteflag = utool.get_argflag(('--yes', '-y'))
    repodir = utool.unixpath(os.getcwd())
    print('[utool] making setup.py for: %r' % repodir)
    setup_text = make_setup(repodir)
    if writeflag:
        setup_fpath = utool.unixjoin(repodir, 'setup.py')
        if utool.checkpath(setup_fpath):
            confirm_flag = overwriteflag
        else:
            confirm_flag = True
        if confirm_flag:
            utool.write_to(setup_fpath, setup_text)
        else:
            print('setup.py file exists not writing')
    else:
        print(setup_text)
Example #31
0
def git_sequence_editor_squash(fpath):
    """
    squashes wip messages

    CommandLine:
        python -m utool.util_git --exec-git_sequence_editor_squash

    Example:
        >>> # SCRIPT
        >>> import utool as ut
        >>> from utool.util_git import *  # NOQA
        >>> fpath = ut.get_argval('--fpath', str, default=None)
        >>> git_sequence_editor_squash(fpath)

    Ignore:
        text = ut.codeblock(
            '''
            pick 852aa05 better doctest for tips
            pick 3c779b8 wip
            pick 02bc21d wip
            pick 1853828 Fixed root tablename
            pick 9d50233 doctest updates
            pick 66230a5 wip
            pick c612e98 wip
            pick b298598 Fixed tablename error
            pick 1120a87 wip
            pick f6c4838 wip
            pick 7f92575 wip
            ''')

    Ignore:
        def squash_consecutive_commits_with_same_message():
            # http://stackoverflow.com/questions/8226278/git-alias-to-squash-all-commits-with-a-particular-commit-message
            # Can do interactively with this. Can it be done automatically and pay attention to
            # Timestamps etc?
            git rebase --interactive HEAD~40 --autosquash
            git rebase --interactive $(git merge-base HEAD master) --autosquash

            # Lookbehind correct version
            %s/\([a-z]* [a-z0-9]* wip\n\)\@<=pick \([a-z0-9]*\) wip/squash \2 wip/gc

           # THE FULL NON-INTERACTIVE AUTOSQUASH SCRIPT
           # TODO: Dont squash if there is a one hour timedelta between commits

           GIT_EDITOR="cat $1" GIT_SEQUENCE_EDITOR="python -m utool.util_git --exec-git_sequence_editor_squash \
                   --fpath $1" git rebase -i $(git rev-list HEAD | tail -n 1) --autosquash --no-verify
           GIT_EDITOR="cat $1" GIT_SEQUENCE_EDITOR="python -m utool.util_git --exec-git_sequence_editor_squash \
                   --fpath $1" git rebase -i HEAD~10 --autosquash --no-verify

           GIT_EDITOR="cat $1" GIT_SEQUENCE_EDITOR="python -m utool.util_git --exec-git_sequence_editor_squash \
                   --fpath $1" git rebase -i $(git merge-base HEAD master) --autosquash --no-verify

           # 14d778fa30a93f85c61f34d09eddb6d2cafd11e2
           # c509a95d4468ebb61097bd9f4d302367424772a3
           # b0ffc26011e33378ee30730c5e0ef1994bfe1a90
           # GIT_SEQUENCE_EDITOR=<script> git rebase -i <params>
           # GIT_SEQUENCE_EDITOR="echo 'FOOBAR $1' " git rebase -i HEAD~40 --autosquash
           # git checkout master
           # git branch -D tmp
           # git checkout -b tmp
           # option to get the tail commit
           $(git rev-list HEAD | tail -n 1)
           # GIT_SEQUENCE_EDITOR="python -m utool.util_git --exec-git_sequence_editor_squash \
                   --fpath $1" git rebase -i HEAD~40 --autosquash
           # GIT_SEQUENCE_EDITOR="python -m utool.util_git --exec-git_sequence_editor_squash \
                   --fpath $1" git rebase -i HEAD~40 --autosquash --no-verify
           <params>
    """
    # print(sys.argv)
    import utool as ut

    text = ut.read_from(fpath)
    # print('fpath = %r' % (fpath,))
    print(text)
    # Doesnt work because of fixed witdth requirement
    # search = (ut.util_regex.positive_lookbehind('[a-z]* [a-z0-9]* wip\n') + 'pick ' +
    #           ut.reponamed_field('hash', '[a-z0-9]*') + ' wip')
    # repl = ('squash ' + ut.bref_field('hash') + ' wip')
    # import re
    # new_text = re.sub(search, repl, text, flags=re.MULTILINE)
    # print(new_text)
    prev_msg = None
    prev_dt = None
    new_lines = []

    def get_commit_date(hashid):
        out, err, ret = ut.cmd("git show -s --format=%ci " + hashid, verbose=False, quiet=True, pad_stdout=False)
        # from datetime import datetime
        from dateutil import parser

        # print('out = %r' % (out,))
        stamp = out.strip("\n")
        # print('stamp = %r' % (stamp,))
        dt = parser.parse(stamp)
        # dt = datetime.strptime(stamp, '%Y-%m-%d %H:%M:%S %Z')
        # print('dt = %r' % (dt,))
        return dt

    for line in text.split("\n"):
        commit_line = line.split(" ")
        if len(commit_line) < 3:
            prev_msg = None
            prev_dt = None
            new_lines += [line]
            continue
        action = commit_line[0]
        hashid = commit_line[1]
        msg = " ".join(commit_line[2:])
        try:
            dt = get_commit_date(hashid)
        except ValueError:
            prev_msg = None
            prev_dt = None
            new_lines += [line]
            continue
        orig_msg = msg
        can_squash = action == "pick" and msg == "wip" and prev_msg == "wip"
        if prev_dt is not None and prev_msg == "wip":
            tdelta = dt - prev_dt
            # Only squash closely consecutive commits
            threshold_minutes = 45
            td_min = tdelta.total_seconds() / 60.0
            # print(tdelta)
            can_squash &= td_min < threshold_minutes
            msg = msg + " -- tdelta=%r" % (ut.get_timedelta_str(tdelta),)
        if can_squash:
            new_line = " ".join(["squash", hashid, msg])
            new_lines += [new_line]
        else:
            new_lines += [line]
        prev_msg = orig_msg
        prev_dt = dt
    new_text = "\n".join(new_lines)

    def get_commit_date(hashid):
        out = ut.cmd("git show -s --format=%ci " + hashid, verbose=False)
        print("out = %r" % (out,))

    # print('Dry run')
    # ut.dump_autogen_code(fpath, new_text)
    print(new_text)
    ut.write_to(fpath, new_text, n=None)
Example #32
0
def autogen_sphinx_apidoc():
    r"""
    autogen_sphinx_docs.py

    Ignore:
        C:\Python27\Scripts\autogen_sphinx_docs.py
        autogen_sphinx_docs.py

        pip uninstall sphinx
        pip install sphinx
        pip install sphinxcontrib-napoleon
        pip install sphinx --upgrade
        pip install sphinxcontrib-napoleon --upgrade

        cd C:\Python27\Scripts
        ls C:\Python27\Scripts

        python -c "import sphinx; print(sphinx.__version__)"

    CommandLine:
        python -m utool.util_setup --exec-autogen_sphinx_apidoc

    Example:
        >>> # SCRIPT
        >>> from utool.util_setup import *  # NOQA
        >>> autogen_sphinx_apidoc()
    """
    # TODO: assert sphinx-apidoc exe is found
    # TODO: make find_exe work?
    import utool as ut

    def build_sphinx_apidoc_cmdstr():
        print('')
        print('if this fails try: sudo pip install sphinx')
        print('')
        apidoc = 'sphinx-apidoc'
        if ut.WIN32:
            winprefix = 'C:/Python27/Scripts/'
            sphinx_apidoc_exe = winprefix + apidoc + '.exe'
        else:
            sphinx_apidoc_exe = apidoc
        apidoc_argfmt_list = [
            sphinx_apidoc_exe,
            '--force',
            '--full',
            '--maxdepth="{maxdepth}"',
            '--doc-author="{author}"',
            '--doc-version="{doc_version}"',
            '--doc-release="{doc_release}"',
            '--output-dir="_doc"',
            #'--separate',  # Put documentation for each module on its own page
            '--private',  # Include "_private" modules
            '{pkgdir}',
        ]
        outputdir = '_doc'
        author = ut.parse_author()
        packages = ut.find_packages(maxdepth=1)
        assert len(packages) != 0, 'directory must contain at least one package'
        if len(packages) > 1:
            assert len(packages) == 1,\
                ('FIXME I dont know what to do with more than one root package: %r'
                 % (packages,))
        pkgdir = packages[0]
        version = ut.parse_package_for_version(pkgdir)
        modpath = dirname(ut.truepath(pkgdir))

        apidoc_fmtdict = {
            'author': author,
            'maxdepth': '8',
            'pkgdir': pkgdir,
            'doc_version': version,
            'doc_release': version,
            'outputdir': outputdir,
        }
        ut.assert_exists('setup.py')
        ut.ensuredir('_doc')
        apidoc_fmtstr = ' '.join(apidoc_argfmt_list)
        apidoc_cmdstr = apidoc_fmtstr.format(**apidoc_fmtdict)
        print('[util_setup] autogenerate sphinx docs for %r' % (pkgdir,))
        if ut.VERBOSE:
            print(ut.dict_str(apidoc_fmtdict))
        return apidoc_cmdstr, modpath, outputdir

    def build_conf_replstr():
        #
        # Make custom edits to conf.py
        # FIXME:
        #ext_search_text = ut.unindent(
        #    r'''
        #    extensions = [
        #    [^\]]*
        #    ]
        #    ''')
        ext_search_text = r'extensions = \[[^/]*\]'
        # TODO: http://sphinx-doc.org/ext/math.html#module-sphinx.ext.pngmath
        #'sphinx.ext.mathjax',
        exclude_modules = []  # ['ibeis.all_imports']
        ext_repl_text = ut.codeblock(
            '''
            MOCK_MODULES = {exclude_modules}
            if len(MOCK_MODULES) > 0:
                import mock
                for mod_name in MOCK_MODULES:
                    sys.modules[mod_name] = mock.Mock()

            extensions = [
                'sphinx.ext.autodoc',
                'sphinx.ext.viewcode',
                # For LaTeX
                'sphinx.ext.pngmath',
                # For Google Sytle Docstrs
                # https://pypi.python.org/pypi/sphinxcontrib-napoleon
                'sphinxcontrib.napoleon',
                #'sphinx.ext.napoleon',
            ]
            '''
        ).format(exclude_modules=str(exclude_modules))
        #theme_search = 'html_theme = \'default\''
        theme_search = 'html_theme = \'[a-zA-Z_1-3]*\''
        theme_repl = ut.codeblock(
            '''
            import sphinx_rtd_theme
            html_theme = "sphinx_rtd_theme"
            html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
            ''')
        head_text = ut.codeblock(
            '''
            from sphinx.ext.autodoc import between
            import sphinx_rtd_theme
            import sys
            import os

            # Dont parse IBEIS args
            os.environ['IBIES_PARSE_ARGS'] = 'OFF'
            os.environ['UTOOL_AUTOGEN_SPHINX_RUNNING'] = 'ON'

            sys.path.append('{modpath}')
            sys.path.append(sys.path.insert(0, os.path.abspath("../")))

            autosummary_generate = True

            modindex_common_prefix = ['_']
            '''
        ).format(modpath=ut.truepath(modpath))
        tail_text = ut.codeblock(
            '''
            def setup(app):
                # Register a sphinx.ext.autodoc.between listener to ignore everything
                # between lines that contain the word IGNORE
                app.connect('autodoc-process-docstring', between('^.*IGNORE.*$', exclude=True))
                return app
            '''
        )
        return (ext_search_text, ext_repl_text, theme_search, theme_repl, head_text, tail_text)

    apidoc_cmdstr, modpath, outputdir = build_sphinx_apidoc_cmdstr()
    ext_search_text, ext_repl_text, theme_search, theme_repl, head_text, tail_text = build_conf_replstr()

    dry = ut.get_argflag('--dry')

    if not dry:
        # Execute sphinx-apidoc
        ut.cmd(apidoc_cmdstr, shell=True)
        # sphinx-apidoc outputs conf.py to <outputdir>, add custom commands
        #
        # Change dir to <outputdir>
        print('chdir' + outputdir)
        os.chdir(outputdir)
        conf_fname = 'conf.py'
        conf_text = ut.read_from(conf_fname)
        conf_text = conf_text.replace('import sys', 'import sys  # NOQA')
        conf_text = conf_text.replace('import os', 'import os  # NOQA')
        conf_text = ut.regex_replace(theme_search, theme_repl, conf_text)
        conf_text = ut.regex_replace(ext_search_text, ext_repl_text, conf_text)
        conf_text = head_text + '\n' + conf_text + tail_text
        ut.write_to(conf_fname, conf_text)
        # Make the documentation
        #if ut.LINUX:
        #    ut.cmd('make html', shell=True)
        #if ut.WIN32:
        #raw_input('waiting')
        if not ut.get_argflag('--nomake'):
            ut.cmd('make', 'html', shell=True)
    else:
        print(apidoc_cmdstr)
        print('cd ' + outputdir)
        print('manual edits of conf.py')
        print('make html')
Example #33
0
def test_reloading_metaclass():
    r"""
    CommandLine:
        python -m utool.util_class --test-test_reloading_metaclass

    References:
        http://stackoverflow.com/questions/8122734/pythons-imp-reload-function-is-not-working

    Example:
        >>> # ENABLE_DOCTEST
        >>> from utool.util_class import *  # NOQA
        >>> result = test_reloading_metaclass()
        >>> print(result)
    """
    import utool as ut
    testdir = ut.ensure_app_resource_dir('utool', 'metaclass_tests')
    testfoo_fpath = ut.unixjoin(testdir, 'testfoo.py')
    # os.chdir(testdir)
    #with ut.ChdirContext(testdir, stay=ut.inIPython()):
    with ut.ChdirContext(testdir):
        foo_code1 = ut.codeblock(
            r'''
            # STARTBLOCK
            import utool as ut
            import six


            @six.add_metaclass(ut.ReloadingMetaclass)
            class Foo(object):
                def __init__(self):
                    pass

            spamattr = 'version1'
            # ENDBLOCK
            '''
        )
        foo_code2 = ut.codeblock(
            r'''
            # STARTBLOCK
            import utool as ut
            import six


            @six.add_metaclass(ut.ReloadingMetaclass)
            class Foo(object):
                def __init__(self):
                    pass

                def bar(self):
                    return 'spam'

            eggsattr = 'version2'
            # ENDBLOCK
            '''
        )
        # Write a testclass to disk
        ut.delete(testfoo_fpath)
        ut.write_to(testfoo_fpath, foo_code1, verbose=True)
        testfoo = ut.import_module_from_fpath(testfoo_fpath)
        #import testfoo
        foo = testfoo.Foo()
        print('foo = %r' % (foo,))
        assert not hasattr(foo, 'bar'), 'foo should not have a bar attr'
        ut.delete(testfoo_fpath + 'c')  # remove the pyc file because of the identical creation time
        ut.write_to(testfoo_fpath, foo_code2, verbose=True)
        assert not hasattr(foo, 'bar'), 'foo should still not have a bar attr'
        foo.rrr()
        assert foo.bar() == 'spam'
        ut.delete(testfoo_fpath)
        print('Reloading worked nicely')
Example #34
0
def export(ibs, aid_pairs=None):
    """
    3 - 4 different animals
    2 views of each
    matching keypoint coordinates on each annotation
    """
    if aid_pairs is None:
        if ibs.get_dbname() == 'PZ_MOTHERS':
            aid_pair_list = MOTHERS_VIEWPOINT_EXPORT_PAIRS
        if ibs.get_dbname() == 'GZ_ALL':
            aid_pair_list = GZ_VIEWPOINT_EXPORT_PAIRS
    ibs.update_query_cfg(ratio_thresh=1.6)
    export_path = expanduser('~/Dropbox/Assignments/dataset')
    #utool.view_directory(export_path)
    # MOTHERS EG:
    for aid_pair in aid_pair_list:
        qaid2_qres = ibs.query_intra_encounter(aid_pair)
        #ibeis.viz.show_qres(ibs, qaid2_qres.values()[1]); df2.iup()
        mrids_list = []
        mkpts_list = []
        for qaid, qres in six.iteritems(qaid2_qres):
            print('Getting kpts from %r' % qaid)
            #qres.show_top(ibs)
            posrid_list = utool.ensure_iterable(qres.get_classified_pos())
            mrids_list.extend([(qaid, posrid) for posrid in posrid_list])
            mkpts_list.extend(qres.get_matching_keypoints(ibs, posrid_list))

        mkey2_kpts = {}
        for mrids_tup, mkpts_tup in zip(mrids_list, mkpts_list):
            assert len(mrids_tup) == 2, 'must be a match tuple'
            mrids_ = np.array(mrids_tup)
            sortx = mrids_.argsort()
            mrids_ = mrids_[sortx]
            mkpts_ = np.array(mkpts_tup)[sortx]
            if sortx[0] == 0:
                pass
            mkey = tuple(mrids_.tolist())
            try:
                kpts_list = mkey2_kpts[mkey]
                print('append to mkey=%r' % (mkey,))
            except KeyError:
                print('new mkey=%r' % (mkey,))
                kpts_list = []
            kpts_list.append(mkpts_)
            mkey2_kpts[mkey] = kpts_list

        mkeys_list = mkey2_kpts.keys()
        mkeys_keypoints = mkey2_kpts.values()

        for mkeys, mkpts_list in zip(mkeys_list, mkeys_keypoints):
            print(mkeys)
            print(len(kpts_list))
            kpts1_m = np.vstack([mkpts[0] for mkpts in mkpts_list])
            kpts2_m = np.vstack([mkpts[1] for mkpts in mkpts_list])
            match_lines = [
                repr(
                    (
                        tuple(kp1[ktool.LOC_DIMS].tolist()),
                        tuple(kp2[ktool.LOC_DIMS].tolist()),
                    )
                ) + ', '
                for kp1, kp2 in zip(kpts1_m, kpts2_m)]

            mcpaths_list = ibs.get_annot_cpaths(mkeys)
            fnames_list = map(lambda x: split(x)[1], mcpaths_list)
            for path in mcpaths_list:
                utool.copy(path, export_path)

            header_lines = ['# Exported keypoint matches (might be duplicates matches)',
                            '# matching_aids = %r' % (mkey,)]
            header_lines += ['# img%d = %r' % (count, fname) for count, fname in enumerate(fnames_list)]
            header_lines += ['# LINE FORMAT: match_pts = [(img1_xy, img2_xy) ... ]']
            header_text = '\n'.join(header_lines)
            match_text  = '\n'.join(['match_pts = ['] + match_lines + [']'])
            matchfile_text = '\n'.join([header_text, match_text])
            matchfile_name = ('match_aids(%d,%d).txt' % mkey)
            matchfile_path = join(export_path, matchfile_name)
            utool.write_to(matchfile_path, matchfile_text)
            print(header_text)
            print(utool.truncate_str(match_text, maxlen=500))
Example #35
0
def sort_module_functions():
    from os.path import dirname, join
    import utool as ut
    import ibeis.control
    import re
    #import re
    #regex = r'[^@]*\ndef'
    modfpath = dirname(ibeis.control.__file__)
    fpath = join(modfpath, 'manual_annot_funcs.py')
    #fpath = join(modfpath, 'manual_dependant_funcs.py')
    #fpath = join(modfpath, 'manual_lblannot_funcs.py')
    #fpath = join(modfpath, 'manual_name_species_funcs.py')
    text = ut.read_from(fpath, verbose=False)
    lines = text.splitlines()
    indent_list = [ut.get_indentation(line) for line in lines]
    isfunc_list = [line.startswith('def ') for line in lines]
    isblank_list = [len(line.strip(' ')) == 0 for line in lines]
    isdec_list = [line.startswith('@') for line in lines]

    tmp = [
        'def' if isfunc else indent
        for isfunc, indent in zip(isfunc_list, indent_list)
    ]
    tmp = ['b' if isblank else t for isblank, t in zip(isblank_list, tmp)]
    tmp = ['@' if isdec else t for isdec, t in zip(isdec_list, tmp)]
    #print('\n'.join([str((t, count + 1)) for (count, t) in enumerate(tmp)]))
    block_list = re.split('\n\n\n', text, flags=re.MULTILINE)

    #for block in block_list:
    #    print('#====')
    #    print(block)

    isfunc_list = [
        re.search('^def ', block, re.MULTILINE) is not None
        for block in block_list
    ]

    whole_varname = ut.whole_word(ut.REGEX_VARNAME)
    funcname_regex = r'def\s+' + ut.named_field('funcname', whole_varname)

    def findfuncname(block):
        match = re.search(funcname_regex, block)
        return match.group('funcname')

    funcnameblock_list = [
        findfuncname(block) if isfunc else None
        for isfunc, block in zip(isfunc_list, block_list)
    ]

    funcblock_list = ut.filter_items(block_list, isfunc_list)
    funcname_list = ut.filter_items(funcnameblock_list, isfunc_list)

    nonfunc_list = ut.filterfalse_items(block_list, isfunc_list)

    nonfunc_list = ut.filterfalse_items(block_list, isfunc_list)
    ismain_list = [
        re.search('^if __name__ == ["\']__main__["\']', nonfunc) is not None
        for nonfunc in nonfunc_list
    ]

    mainblock_list = ut.filter_items(nonfunc_list, ismain_list)
    nonfunc_list = ut.filterfalse_items(nonfunc_list, ismain_list)

    newtext_list = []

    for nonfunc in nonfunc_list:
        newtext_list.append(nonfunc)
        newtext_list.append('\n')

    #funcname_list
    for funcblock in ut.sortedby(funcblock_list, funcname_list):
        newtext_list.append(funcblock)
        newtext_list.append('\n')

    for mainblock in mainblock_list:
        newtext_list.append(mainblock)

    newtext = '\n'.join(newtext_list)
    print('newtext = %s' % (newtext, ))
    print('len(newtext) = %r' % (len(newtext), ))
    print('len(text) = %r' % (len(text), ))

    backup_fpath = ut.augpath(fpath,
                              augext='.bak',
                              augdir='_backup',
                              ensure=True)

    ut.write_to(backup_fpath, text)
    ut.write_to(fpath, newtext)
Example #36
0
    pkgname = basename(repodir)
    packages = utool.ls_moduledirs(repodir, full=False)
    print(pkgname)
    setup_text = setup_text_fmt.format(
        packages=packages,
        repodir=repodir,
        timestamp=timestamp,
        pkgname=pkgname,
    )
    return setup_text


if __name__ == '__main__':
    writeflag = utool.get_argflag(('--write', '-w'))
    overwriteflag = utool.get_argflag(('--yes', '-y'))
    repodir = utool.unixpath(os.getcwd())
    print('[utool] making setup.py for: %r' % repodir)
    setup_text = make_setup(repodir)
    if writeflag:
        setup_fpath = utool.unixjoin(repodir, 'setup.py')
        if utool.checkpath(setup_fpath):
            confirm_flag = overwriteflag
        else:
            confirm_flag = True
        if confirm_flag:
            utool.write_to(setup_fpath, setup_text)
        else:
            print('setup.py file exists not writing')
    else:
        print(setup_text)
Example #37
0
def export(ibs, aid_pairs=None):
    """
    3 - 4 different animals
    2 views of each
    matching keypoint coordinates on each annotation
    """
    if aid_pairs is None:
        if ibs.get_dbname() == 'PZ_MOTHERS':
            aid_pair_list = MOTHERS_VIEWPOINT_EXPORT_PAIRS
        if ibs.get_dbname() == 'GZ_ALL':
            aid_pair_list = GZ_VIEWPOINT_EXPORT_PAIRS
    ibs.update_query_cfg(ratio_thresh=1.6)
    export_path = expanduser('~/Dropbox/Assignments/dataset')
    #utool.view_directory(export_path)
    # MOTHERS EG:
    for aid_pair in aid_pair_list:
        cm_list, qreq_ = ibs.query_chips(aid_pair, aid_pair)
        #ibeis.viz.show_qres(ibs, qaid2_qres.values()[1]); df2.iup()
        mrids_list = []
        mkpts_list = []
        for cm in cm_list:
            qaid = cm.qaid
            print('Getting kpts from %r' % qaid)
            #cm.show_top(ibs)
            posrid_list = utool.ensure_iterable(cm.get_classified_pos())
            mrids_list.extend([(qaid, posrid) for posrid in posrid_list])
            mkpts_list.extend(cm.get_matching_keypoints(ibs, posrid_list))

        mkey2_kpts = {}
        for mrids_tup, mkpts_tup in zip(mrids_list, mkpts_list):
            assert len(mrids_tup) == 2, 'must be a match tuple'
            mrids_ = np.array(mrids_tup)
            sortx = mrids_.argsort()
            mrids_ = mrids_[sortx]
            mkpts_ = np.array(mkpts_tup)[sortx]
            if sortx[0] == 0:
                pass
            mkey = tuple(mrids_.tolist())
            try:
                kpts_list = mkey2_kpts[mkey]
                print('append to mkey=%r' % (mkey, ))
            except KeyError:
                print('new mkey=%r' % (mkey, ))
                kpts_list = []
            kpts_list.append(mkpts_)
            mkey2_kpts[mkey] = kpts_list

        mkeys_list = mkey2_kpts.keys()
        mkeys_keypoints = mkey2_kpts.values()

        for mkeys, mkpts_list in zip(mkeys_list, mkeys_keypoints):
            print(mkeys)
            print(len(kpts_list))
            kpts1_m = np.vstack([mkpts[0] for mkpts in mkpts_list])
            kpts2_m = np.vstack([mkpts[1] for mkpts in mkpts_list])
            match_lines = [
                repr((
                    tuple(kp1[ktool.LOC_DIMS].tolist()),
                    tuple(kp2[ktool.LOC_DIMS].tolist()),
                )) + ', ' for kp1, kp2 in zip(kpts1_m, kpts2_m)
            ]

            mcpaths_list = ibs.get_annot_chip_fpath(mkeys)
            fnames_list = list(map(lambda x: split(x)[1], mcpaths_list))
            for path in mcpaths_list:
                utool.copy(path, export_path)

            header_lines = [
                '# Exported keypoint matches (might be duplicates matches)',
                '# matching_aids = %r' % (mkey, )
            ]
            header_lines += [
                '# img%d = %r' % (count, fname)
                for count, fname in enumerate(fnames_list)
            ]
            header_lines += [
                '# LINE FORMAT: match_pts = [(img1_xy, img2_xy) ... ]'
            ]
            header_text = '\n'.join(header_lines)
            match_text = '\n'.join(['match_pts = ['] + match_lines + [']'])
            matchfile_text = '\n'.join([header_text, match_text])
            matchfile_name = ('match_aids(%d,%d).txt' % mkey)
            matchfile_path = join(export_path, matchfile_name)
            utool.write_to(matchfile_path, matchfile_text)
            print(header_text)
            print(utool.truncate_str(match_text, maxlen=500))
Example #38
0
def fix_section_title_capitalization(tex_fpath, dryrun=True):
    # Read in text and ensure ascii format
    text = ut.read_from(tex_fpath)

    section_type_list = [
        'chapter',
        'section',
        'subsection',
        'subsubsection',
        'paragraph',
    ]
    re_section_type = ut.named_field('section_type',
                                     ut.regex_or(section_type_list))
    re_section_title = ut.named_field('section_title', '[^}]*')

    re_spaces = ut.named_field('spaces', '^ *')

    pattern = re_spaces + re.escape(
        '\\') + re_section_type + '{' + re_section_title + '}'

    def fix_capitalization(match):
        dict_ = match.groupdict()
        section_title = dict_['section_title']
        #if section_title == 'The Great Zebra Count':
        #    return match.string[slice(*match.span())]
        #    #return 'The Great Zebra Count'
        # general logic
        #words = section_title.split(' ')
        tokens = re.split(ut.regex_or([' ', '/']), section_title)
        #if 'Coverage' in section_title:
        #    ut.embed()
        #    pass
        #words = [word if count == 0 else word.lower() for count, word in enumerate(words)]
        #new_section_title = ' '.join(words)
        tokens = [
            t if count == 0 else t.lower() for count, t in enumerate(tokens)
        ]
        new_section_title = ''.join(tokens)

        # hacks for caps of expanded titles
        search_repl_list = constants_tex_fixes.CAPITAL_TITLE_LIST
        for repl in search_repl_list:
            new_section_title = re.sub(re.escape(repl),
                                       repl,
                                       new_section_title,
                                       flags=re.IGNORECASE)
        # hacks fo acronyms
        for full, acro in constants_tex_fixes.ACRONYMN_LIST:
            new_section_title = re.sub(r'\b' + re.escape(acro) + r'\b',
                                       acro,
                                       new_section_title,
                                       flags=re.IGNORECASE)

        #'the great zebra and giraffe count'

        #new_section_title = section_title.lower()
        new_text = dict_['spaces'] + '\\' + dict_[
            'section_type'] + '{' + new_section_title + '}'
        VERBOSE = 0
        if VERBOSE:
            old_text = match.string[slice(*match.span())]
            if new_text != old_text:
                print(ut.dict_str(dict_))
                print('--- REPL ---')
                print(old_text)
                print(new_text)
        return new_text

    #for match in re.finditer(pattern, text, flags=re.MULTILINE):
    #    fix_capitalization(match)

    new_text = re.sub(pattern, fix_capitalization, text, flags=re.MULTILINE)

    if not dryrun:
        ut.write_to(tex_fpath, new_text)
    else:
        ut.print_difftext(ut.get_textdiff(text, new_text, 0))
Example #39
0
def git_sequence_editor_squash(fpath):
    """
    squashes wip messages

    CommandLine:
        python -m utool.util_git --exec-git_sequence_editor_squash

    Example:
        >>> # SCRIPT
        >>> import utool as ut
        >>> from utool.util_git import *  # NOQA
        >>> fpath = ut.get_argval('--fpath', str, default=None)
        >>> git_sequence_editor_squash(fpath)

    Ignore:
        text = ut.codeblock(
            '''
            pick 852aa05 better doctest for tips
            pick 3c779b8 wip
            pick 02bc21d wip
            pick 1853828 Fixed root tablename
            pick 9d50233 doctest updates
            pick 66230a5 wip
            pick c612e98 wip
            pick b298598 Fixed tablename error
            pick 1120a87 wip
            pick f6c4838 wip
            pick 7f92575 wip
            ''')

    Ignore:
        def squash_consecutive_commits_with_same_message():
            # http://stackoverflow.com/questions/8226278/git-alias-to-squash-all-commits-with-a-particular-commit-message
            # Can do interactively with this. Can it be done automatically and pay attention to
            # Timestamps etc?
            git rebase --interactive HEAD~40 --autosquash
            git rebase --interactive $(git merge-base HEAD master) --autosquash

            # Lookbehind correct version
            %s/\([a-z]* [a-z0-9]* wip\n\)\@<=pick \([a-z0-9]*\) wip/squash \2 wip/gc

           # THE FULL NON-INTERACTIVE AUTOSQUASH SCRIPT
           # TODO: Dont squash if there is a one hour timedelta between commits

           GIT_EDITOR="cat $1" GIT_SEQUENCE_EDITOR="python -m utool.util_git --exec-git_sequence_editor_squash \
                   --fpath $1" git rebase -i $(git rev-list HEAD | tail -n 1) --autosquash --no-verify
           GIT_EDITOR="cat $1" GIT_SEQUENCE_EDITOR="python -m utool.util_git --exec-git_sequence_editor_squash \
                   --fpath $1" git rebase -i HEAD~10 --autosquash --no-verify

           GIT_EDITOR="cat $1" GIT_SEQUENCE_EDITOR="python -m utool.util_git --exec-git_sequence_editor_squash \
                   --fpath $1" git rebase -i $(git merge-base HEAD master) --autosquash --no-verify

           # 14d778fa30a93f85c61f34d09eddb6d2cafd11e2
           # c509a95d4468ebb61097bd9f4d302367424772a3
           # b0ffc26011e33378ee30730c5e0ef1994bfe1a90
           # GIT_SEQUENCE_EDITOR=<script> git rebase -i <params>
           # GIT_SEQUENCE_EDITOR="echo 'FOOBAR $1' " git rebase -i HEAD~40 --autosquash
           # git checkout master
           # git branch -D tmp
           # git checkout -b tmp
           # option to get the tail commit
           $(git rev-list HEAD | tail -n 1)
           # GIT_SEQUENCE_EDITOR="python -m utool.util_git --exec-git_sequence_editor_squash \
                   --fpath $1" git rebase -i HEAD~40 --autosquash
           # GIT_SEQUENCE_EDITOR="python -m utool.util_git --exec-git_sequence_editor_squash \
                   --fpath $1" git rebase -i HEAD~40 --autosquash --no-verify
           <params>
    """
    # print(sys.argv)
    import utool as ut
    text = ut.read_from(fpath)
    # print('fpath = %r' % (fpath,))
    print(text)
    # Doesnt work because of fixed witdth requirement
    # search = (ut.util_regex.positive_lookbehind('[a-z]* [a-z0-9]* wip\n') + 'pick ' +
    #           ut.reponamed_field('hash', '[a-z0-9]*') + ' wip')
    # repl = ('squash ' + ut.bref_field('hash') + ' wip')
    # import re
    # new_text = re.sub(search, repl, text, flags=re.MULTILINE)
    # print(new_text)
    prev_msg = None
    prev_dt = None
    new_lines = []

    def get_commit_date(hashid):
        out, err, ret = ut.cmd('git show -s --format=%ci ' + hashid, verbose=False, quiet=True, pad_stdout=False)
        # from datetime import datetime
        from dateutil import parser
        # print('out = %r' % (out,))
        stamp = out.strip('\n')
        # print('stamp = %r' % (stamp,))
        dt = parser.parse(stamp)
        # dt = datetime.strptime(stamp, '%Y-%m-%d %H:%M:%S %Z')
        # print('dt = %r' % (dt,))
        return dt

    for line in text.split('\n'):
        commit_line = line.split(' ')
        if len(commit_line) < 3:
            prev_msg = None
            prev_dt = None
            new_lines += [line]
            continue
        action = commit_line[0]
        hashid = commit_line[1]
        msg = ' ' .join(commit_line[2:])
        try:
            dt = get_commit_date(hashid)
        except ValueError:
            prev_msg = None
            prev_dt = None
            new_lines += [line]
            continue
        orig_msg = msg
        can_squash = action == 'pick' and msg == 'wip' and prev_msg == 'wip'
        if prev_dt is not None and prev_msg == 'wip':
            tdelta = dt - prev_dt
            # Only squash closely consecutive commits
            threshold_minutes = 45
            td_min = (tdelta.total_seconds() / 60.)
            # print(tdelta)
            can_squash &= td_min < threshold_minutes
            msg = msg + ' -- tdelta=%r' % (ut.get_timedelta_str(tdelta),)
        if can_squash:
            new_line = ' ' .join(['squash', hashid, msg])
            new_lines += [new_line]
        else:
            new_lines += [line]
        prev_msg = orig_msg
        prev_dt = dt
    new_text = '\n'.join(new_lines)

    def get_commit_date(hashid):
        out = ut.cmd('git show -s --format=%ci ' + hashid, verbose=False)
        print('out = %r' % (out,))

    # print('Dry run')
    # ut.dump_autogen_code(fpath, new_text)
    print(new_text)
    ut.write_to(fpath, new_text, n=None)
Example #40
0
def test_siamese_performance(model, data, labels, flat_metadata, dataname=''):
    r"""
    CommandLine:
        utprof.py -m ibeis_cnn --tf pz_patchmatch --db liberty --test --weights=liberty:current --arch=siaml2_128 --test
        python -m ibeis_cnn --tf netrun --db liberty --arch=siaml2_128 --test  --ensure
        python -m ibeis_cnn --tf netrun --db liberty --arch=siaml2_128 --test  --ensure --weights=new
        python -m ibeis_cnn --tf netrun --db liberty --arch=siaml2_128 --train --weights=new
        python -m ibeis_cnn --tf netrun --db pzmtest --weights=liberty:current --arch=siaml2_128 --test  # NOQA
        python -m ibeis_cnn --tf netrun --db pzmtest --weights=liberty:current --arch=siaml2_128
    """
    import vtool as vt
    import plottool as pt

    # TODO: save in model.trainind_dpath/diagnostics/figures
    ut.colorprint('\n[siam_perf] Testing Siamese Performance', 'white')
    #epoch_dpath = model.get_epoch_diagnostic_dpath()
    epoch_dpath = model.arch_dpath
    ut.vd(epoch_dpath)

    dataname += ' ' + model.get_history_hashid() + '\n'

    history_text = ut.list_str(model.era_history, newlines=True)

    ut.write_to(ut.unixjoin(epoch_dpath, 'era_history.txt'), history_text)

    #if True:
    #    import matplotlib as mpl
    #    mpl.rcParams['agg.path.chunksize'] = 100000

    #data   = data[::50]
    #labels = labels[::50]
    #from ibeis_cnn import utils
    #data, labels = utils.random_xy_sample(data, labels, 10000, model.data_per_label_input)

    FULL = not ut.get_argflag('--quick')

    fnum_gen = pt.make_fnum_nextgen()

    ut.colorprint('[siam_perf] Show era history', 'white')
    fig = model.show_era_loss(fnum=fnum_gen())
    pt.save_figure(fig=fig, dpath=epoch_dpath, dpi=180)

    # hack
    ut.colorprint('[siam_perf] Show weights image', 'white')
    fig = model.show_weights_image(fnum=fnum_gen())
    pt.save_figure(fig=fig, dpath=epoch_dpath, dpi=180)
    #model.draw_all_conv_layer_weights(fnum=fnum_gen())
    #model.imwrite_weights(1)
    #model.imwrite_weights(2)

    # Compute each type of score
    ut.colorprint('[siam_perf] Building Scores', 'white')
    test_outputs = model.predict2(model, data)
    network_output = test_outputs['network_output_determ']
    # hack converting network output to distances for non-descriptor networks
    if len(network_output.shape) == 2 and network_output.shape[1] == 1:
        cnn_scores = network_output.T[0]
    elif len(network_output.shape) == 1:
        cnn_scores = network_output
    elif len(network_output.shape) == 2 and network_output.shape[1] > 1:
        assert model.data_per_label_output == 2
        vecs1 = network_output[0::2]
        vecs2 = network_output[1::2]
        cnn_scores = vt.L2(vecs1, vecs2)
    else:
        assert False
    cnn_scores = cnn_scores.astype(np.float64)

    # Segfaults with the data passed in is large (AND MEMMAPPED apparently)
    # Fixed in hesaff implementation
    SIFT = FULL
    if SIFT:
        sift_scores, sift_list = test_sift_patchmatch_scores(data, labels)
        sift_scores = sift_scores.astype(np.float64)

    ut.colorprint('[siam_perf] Learning Encoders', 'white')
    # Learn encoders
    encoder_kw = {
        #'monotonize': False,
        'monotonize': True,
    }
    cnn_encoder = vt.ScoreNormalizer(**encoder_kw)
    cnn_encoder.fit(cnn_scores, labels)

    if SIFT:
        sift_encoder = vt.ScoreNormalizer(**encoder_kw)
        sift_encoder.fit(sift_scores, labels)

    # Visualize
    ut.colorprint('[siam_perf] Visualize Encoders', 'white')
    viz_kw = dict(
        with_scores=False,
        with_postbayes=False,
        with_prebayes=False,
        target_tpr=.95,
    )
    inter_cnn = cnn_encoder.visualize(
        figtitle=dataname + ' CNN scores. #data=' + str(len(data)),
        fnum=fnum_gen(), **viz_kw)
    if SIFT:
        inter_sift = sift_encoder.visualize(
            figtitle=dataname + ' SIFT scores. #data=' + str(len(data)),
            fnum=fnum_gen(), **viz_kw)

    # Save
    pt.save_figure(fig=inter_cnn.fig, dpath=epoch_dpath)
    if SIFT:
        pt.save_figure(fig=inter_sift.fig, dpath=epoch_dpath)

    # Save out examples of hard errors
    #cnn_fp_label_indicies, cnn_fn_label_indicies =
    #cnn_encoder.get_error_indicies(cnn_scores, labels)
    #sift_fp_label_indicies, sift_fn_label_indicies =
    #sift_encoder.get_error_indicies(sift_scores, labels)

    with_patch_examples = FULL
    if with_patch_examples:
        ut.colorprint('[siam_perf] Visualize Confusion Examples', 'white')
        cnn_indicies = cnn_encoder.get_confusion_indicies(cnn_scores, labels)
        if SIFT:
            sift_indicies = sift_encoder.get_confusion_indicies(sift_scores, labels)

        warped_patch1_list, warped_patch2_list = list(zip(*ut.ichunks(data, 2)))
        samp_args = (warped_patch1_list, warped_patch2_list, labels)
        _sample = functools.partial(draw_results.get_patch_sample_img, *samp_args)

        cnn_fp_img = _sample({'fs': cnn_scores}, cnn_indicies.fp)[0]
        cnn_fn_img = _sample({'fs': cnn_scores}, cnn_indicies.fn)[0]
        cnn_tp_img = _sample({'fs': cnn_scores}, cnn_indicies.tp)[0]
        cnn_tn_img = _sample({'fs': cnn_scores}, cnn_indicies.tn)[0]

        if SIFT:
            sift_fp_img = _sample({'fs': sift_scores}, sift_indicies.fp)[0]
            sift_fn_img = _sample({'fs': sift_scores}, sift_indicies.fn)[0]
            sift_tp_img = _sample({'fs': sift_scores}, sift_indicies.tp)[0]
            sift_tn_img = _sample({'fs': sift_scores}, sift_indicies.tn)[0]

        #if ut.show_was_requested():
        #def rectify(arr):
        #    return np.flipud(arr)
        SINGLE_FIG = False
        if SINGLE_FIG:
            def dump_img(img_, lbl, fnum):
                fig, ax = pt.imshow(img_, figtitle=dataname + ' ' + lbl, fnum=fnum)
                pt.save_figure(fig=fig, dpath=epoch_dpath, dpi=180)
            dump_img(cnn_fp_img, 'cnn_fp_img', fnum_gen())
            dump_img(cnn_fn_img, 'cnn_fn_img', fnum_gen())
            dump_img(cnn_tp_img, 'cnn_tp_img', fnum_gen())
            dump_img(cnn_tn_img, 'cnn_tn_img', fnum_gen())

            dump_img(sift_fp_img, 'sift_fp_img', fnum_gen())
            dump_img(sift_fn_img, 'sift_fn_img', fnum_gen())
            dump_img(sift_tp_img, 'sift_tp_img', fnum_gen())
            dump_img(sift_tn_img, 'sift_tn_img', fnum_gen())
            #vt.imwrite(dataname + '_' + 'cnn_fp_img.png', (cnn_fp_img))
            #vt.imwrite(dataname + '_' + 'cnn_fn_img.png', (cnn_fn_img))
            #vt.imwrite(dataname + '_' + 'sift_fp_img.png', (sift_fp_img))
            #vt.imwrite(dataname + '_' + 'sift_fn_img.png', (sift_fn_img))
        else:
            print('Drawing TP FP TN FN')
            fnum = fnum_gen()
            pnum_gen = pt.make_pnum_nextgen(4, 2)
            fig = pt.figure(fnum)
            pt.imshow(cnn_fp_img,  title='CNN FP',  fnum=fnum, pnum=pnum_gen())
            pt.imshow(sift_fp_img, title='SIFT FP', fnum=fnum, pnum=pnum_gen())
            pt.imshow(cnn_fn_img,  title='CNN FN',  fnum=fnum, pnum=pnum_gen())
            pt.imshow(sift_fn_img, title='SIFT FN', fnum=fnum, pnum=pnum_gen())
            pt.imshow(cnn_tp_img,  title='CNN TP',  fnum=fnum, pnum=pnum_gen())
            pt.imshow(sift_tp_img, title='SIFT TP', fnum=fnum, pnum=pnum_gen())
            pt.imshow(cnn_tn_img,  title='CNN TN',  fnum=fnum, pnum=pnum_gen())
            pt.imshow(sift_tn_img, title='SIFT TN', fnum=fnum, pnum=pnum_gen())
            pt.set_figtitle(dataname + ' confusions')
            pt.adjust_subplots(left=0, right=1.0, bottom=0., wspace=.01, hspace=.05)
            pt.save_figure(fig=fig, dpath=epoch_dpath, dpi=180, figsize=(9, 18))

    with_patch_desc = FULL
    if with_patch_desc:
        ut.colorprint('[siam_perf] Visualize Patch Descriptors', 'white')
        fnum = fnum_gen()
        fig = pt.figure(fnum=fnum, pnum=(1, 1, 1))
        num_rows = 7
        pnum_gen = pt.make_pnum_nextgen(num_rows, 3)
        # Compare actual output descriptors
        for index in ut.random_indexes(len(sift_list), num_rows):
            vec_sift = sift_list[index]
            vec_cnn = network_output[index]
            patch = data[index]
            pt.imshow(patch, fnum=fnum, pnum=pnum_gen())
            pt.plot_descriptor_signature(vec_cnn, 'cnn vec',  fnum=fnum, pnum=pnum_gen())
            pt.plot_sift_signature(vec_sift, 'sift vec',  fnum=fnum, pnum=pnum_gen())
        pt.set_figtitle('Patch Descriptors')
        pt.adjust_subplots(left=0, right=0.95, bottom=0., wspace=.1, hspace=.15)
        pt.save_figure(fig=fig, dpath=epoch_dpath, dpi=180, figsize=(9, 18))
Example #41
0
def compile_latex_text(input_text, dpath=None, fname=None, verbose=True,
                       move=True, nest_in_doc=None, title=None,
                       preamb_extra=None):
    r"""
    CommandLine:
        python -m utool.util_latex --test-compile_latex_text --show

    Ignore:
        pdflatex -shell-escape --synctex=-1 -src-specials -interaction=nonstopmode\
            ~/code/ibeis/tmptex/latex_formatter_temp.tex

    Example1:
        >>> # DISABLE_DOCTEST
        >>> from utool.util_latex import *  # NOQA
        >>> import utool as ut
        >>> verbose = True
        >>> #dpath = '/home/joncrall/code/ibeis/aidchallenge'
        >>> dpath = dirname(ut.grab_test_imgpath())
        >>> #ut.vd(dpath)
        >>> orig_fpaths = ut.list_images(dpath, fullpath=True)
        >>> figure_str = ut.get_latex_figure_str(orig_fpaths, width_str='2.4in', nCols=2)
        >>> input_text = figure_str
        >>> pdf_fpath = ut.compile_latex_text(input_text, dpath=dpath,
        >>>                                   verbose=verbose)
        >>> output_pdf_fpath = ut.compress_pdf(pdf_fpath)
        >>> print(pdf_fpath)
        >>> ut.quit_if_noshow()
        >>> ut.startfile(pdf_fpath)
    """
    import utool as ut
    if verbose:
        print('[ut] compile_latex_text')

    if nest_in_doc is None:
        nest_in_doc = 'documentclass' not in input_text
    if nest_in_doc:
        text = make_full_document(input_text, title=title,
                                  preamb_extra=preamb_extra)
    if not dpath:
        dpath = os.getcwd()
    if fname is None:
        fname = 'temp_latex'

    # Create temporary work directly
    work_dpath = join(dpath, '.tmptex')
    ut.ensuredir(work_dpath, verbose=verbose > 1)

    fname_tex = ut.ensure_ext(fname, '.tex')
    fname_pdf = ut.ensure_ext(fname, '.pdf')

    tex_fpath = join(work_dpath, fname_tex)
    pdf_fpath_output = join(work_dpath, fname_pdf)
    ut.write_to(tex_fpath, text)

    with ut.ChdirContext(work_dpath, verbose=verbose > 1):
        # print(text)
        args = ' '.join([
            'lualatex', '-shell-escape', '--synctex=-1', '-src-specials',
            '-interaction=nonstopmode', tex_fpath
        ])
        info = ut.cmd2(args, verbose=verbose > 1)
        if not ut.checkpath(pdf_fpath_output, verbose=verbose > 1):
            print('Error compiling LaTeX')
            ut.print_code(text, 'latex')
            print(info['out'])
            raise RuntimeError('latex failed ')

    if move:
        pdf_fpath = join(dpath, fname_pdf)
        ut.move(pdf_fpath_output, pdf_fpath, verbose=verbose > 1)
    else:
        pdf_fpath = pdf_fpath_output
    return pdf_fpath
Example #42
0
def write_research(r, to_write, rate=-5):
    fname = join(split(__file__)[0], 'to_speak.txt')
    import utool as ut
    ut.write_to(fname, to_write)
Example #43
0
def tune_flann(dpts,
               target_precision=.90,
               build_weight=0.50,
               memory_weight=0.00,
               sample_fraction=0.01):
    r"""

    References:
        http://www.cs.ubc.ca/research/flann/uploads/FLANN/flann_pami2014.pdf
        http://www.cs.ubc.ca/research/flann/uploads/FLANN/flann_manual-1.8.4.pdf
        http://docs.opencv.org/trunk/modules/flann/doc/flann_fast_approximate_nearest_neighbor_search.html

    Math::
        cost of an algorithm is:

        LaTeX:
            \cost = \frac
                {\search + build_weight * \build }
                { \minoverparams( \search + build_weight \build)} +
                memory_weight * \memory

    Args:
        dpts (ndarray):

        target_precision (float): number between 0 and 1 representing desired
            accuracy. Higher values are more accurate.

        build_weight (float): importance weight given to minimizing build time
            relative to search time. This number can range from 0 to infinity.
            typically because building is a more complex computation you want
            to keep the number relatively low, (less than 1) otherwise you'll
            end up getting a linear search (no build time).

        memory_weight (float): Importance of memory relative to total speed.
            A value less than 1 gives more importance to the time spent and a
            value greater than 1 gives more importance to the memory usage.

        sample_fraction (float): number between 0 and 1 representing the
            fraction of the input data to use in the optimization. A higher
            number uses more data.

    Returns:
        dict: tuned_params

    CommandLine:
        python -m vtool.nearest_neighbors --test-tune_flann

    """
    with ut.Timer('tuning flann'):
        print('Autotuning flann with %d %dD vectors' % (dpts.shape[0], dpts.shape[1]))
        print('a sample of %d vectors will be used' % (int(dpts.shape[0] * sample_fraction)))
        flann = pyflann.FLANN()
        #num_data = len(dpts)
        flann_atkwargs = dict(algorithm='autotuned',
                              target_precision=target_precision,
                              build_weight=build_weight,
                              memory_weight=memory_weight,
                              sample_fraction=sample_fraction)
        suffix = repr(flann_atkwargs)
        badchar_list = ',{}\': '
        for badchar in badchar_list:
            suffix = suffix.replace(badchar, '')
        print('flann_atkwargs:')
        print(utool.dict_str(flann_atkwargs))
        print('starting optimization')
        tuned_params = flann.build_index(dpts, **flann_atkwargs)
        print('finished optimization')

        # The algorithm is sometimes returned as default which is
        # very unuseful as the default name is embeded in the pyflann
        # module where most would not care to look. This finds the default
        # name for you.
        for key in ['algorithm', 'centers_init', 'log_level']:
            val = tuned_params.get(key, None)
            if val == 'default':
                dict_ = pyflann.FLANNParameters._translation_[key]
                other_algs = ut.dict_find_other_sameval_keys(dict_, 'default')
                assert len(other_algs) == 1, 'more than 1 default for key=%r' % (key,)
                tuned_params[key] = other_algs[0]

        common_params = [
            'algorithm',
            'checks',
        ]
        relevant_params_dict = dict(
            linear=['algorithm'],
            #---
            kdtree=[
                'trees'
            ],
            #---
            kmeans=[
                'branching',
                'iterations',
                'centers_init',
                'cb_index',
            ],
            #---
            lsh=[
                'table_number',
                'key_size',
                'multi_probe_level',
            ],
        )
        relevant_params_dict['composite'] = relevant_params_dict['kmeans'] + relevant_params_dict['kdtree'] + common_params
        relevant_params_dict['kmeans'] += common_params
        relevant_params_dict['kdtree'] += common_params
        relevant_params_dict['lsh'] += common_params

        #kdtree_single_params = [
        #    'leaf_max_size',
        #]
        #other_params = [
        #    'build_weight',
        #    'sorted',
        #]
        out_file = 'flann_tuned' + suffix
        utool.write_to(out_file, ut.dict_str(tuned_params, sorted_=True, newlines=True))
        flann.delete_index()
        if tuned_params['algorithm'] in relevant_params_dict:
            print('relevant_params=')
            relevant_params = relevant_params_dict[tuned_params['algorithm']]
            print(ut.dict_str(ut.dict_subset(tuned_params, relevant_params),
                              sorted_=True, newlines=True))
            print('irrelevant_params=')
            print(ut.dict_str(ut.dict_setdiff(tuned_params, relevant_params),
                              sorted_=True, newlines=True))
        else:
            print('unknown tuned algorithm=%r' % (tuned_params['algorithm'],))

        print('all_tuned_params=')
        print(ut.dict_str(tuned_params, sorted_=True, newlines=True))
    return tuned_params
Example #44
0
def write_default_ipython_profile():
    """
    CommandLine:
        python ~/local/init/init_ipython_config.py

        python -c "import utool as ut; ut.vd(ut.unixpath('~/.ipython/profile_default'))"
        python -c "import utool as ut; ut.editfile(ut.unixpath('~/.ipython/profile_default/ipython_config.py'))"

    References:
        http://2sn.org/python/ipython_config.py
    """
    dpath = ut.unixpath('~/.ipython/profile_default')
    ut.ensuredir(dpath, info=True, verbose=True)
    ipy_config_fpath = ut.unixjoin(dpath, 'ipython_config.py')
    ipy_config_text = ut.codeblock(
        r'''
        # STARTBLOCK
        c = get_config()  # NOQA
        c.InteractiveShellApp.exec_lines = []
        future_line = (
            'from __future__ import absolute_import, division, print_function, with_statement, unicode_literals')
        c.InteractiveShellApp.exec_lines.append(future_line)
        # Fix sip versions
        try:
            import sip
            # http://stackoverflow.com/questions/21217399/pyqt4-qtcore-qvariant-object-instead-of-a-string
            sip.setapi('QVariant', 2)
            sip.setapi('QString', 2)
            sip.setapi('QTextStream', 2)
            sip.setapi('QTime', 2)
            sip.setapi('QUrl', 2)
            sip.setapi('QDate', 2)
            sip.setapi('QDateTime', 2)
            if hasattr(sip, 'setdestroyonexit'):
                sip.setdestroyonexit(False)  # This prevents a crash on windows
        except ImportError as ex:
            pass
        except ValueError as ex:
            print('Warning: Value Error: %s' % str(ex))
            pass
        c.InteractiveShellApp.exec_lines.append('%load_ext autoreload')
        c.InteractiveShellApp.exec_lines.append('%autoreload 2')
        #c.InteractiveShellApp.exec_lines.append('%pylab qt4')
        c.InteractiveShellApp.exec_lines.append('import numpy as np')
        c.InteractiveShellApp.exec_lines.append('import utool as ut')
        #c.InteractiveShellApp.exec_lines.append('import plottool as pt')
        c.InteractiveShellApp.exec_lines.append('from os.path import *')
        c.InteractiveShellApp.exec_lines.append('from six.moves import cPickle as pickle')
        #c.InteractiveShellApp.exec_lines.append('if \'verbose\' not in vars():\\n    verbose = True')
        import utool as ut
        c.InteractiveShellApp.exec_lines.append(ut.codeblock(
            """
            class classproperty(property):
                def __get__(self, cls, owner):
                    return classmethod(self.fget).__get__(None, owner)()
            class vim(object):
                @classproperty
                def focus(cls):
                    import utool.util_ubuntu
                    utool.util_ubuntu.xctrl.do(('focus', 'GVIM'),)
                @classproperty
                def copy(cls):
                    import utool.util_ubuntu
                    utool.util_ubuntu.xctrl.do(('focus', 'GVIM'),)
                    import utool as ut
                    import IPython
                    ipy = IPython.get_ipython()
                    lastline = ipy.history_manager.input_hist_parsed[-2]
                    ut.copy_text_to_clipboard(lastline)
                    # import utool as ut
                    import utool.util_ubuntu
                    utool.util_ubuntu.xctrl.do(
                        ('focus', 'GVIM'),
                        ('key', 'ctrl+v'),
                        ('focus', 'x-terminal-emulator.X-terminal-emulator')
                    )
            """
        ))
        #c.InteractiveShell.autoindent = True
        #c.InteractiveShell.colors = 'LightBG'
        #c.InteractiveShell.confirm_exit = False
        #c.InteractiveShell.deep_reload = True
        c.InteractiveShell.editor = 'gvim'
        #c.InteractiveShell.xmode = 'Context'
        # ENDBOCK
        '''
    )
    ut.write_to(ipy_config_fpath, ipy_config_text)
Example #45
0
def execute_commands(tpl_rman, wbia_rman):
    import utool as ut

    GET_ARGVAL = ut.get_argval

    ut.init_catch_ctrl_c()

    if 0:
        print('Version Check Source:')
        for repo in tpl_rman.repos:
            print('python -c "import {0}; print({0}.__file__)"'.format(
                repo.modname))
            print('python -c "import {0}; print({0}.__version__)"'.format(
                repo.modname))

    # -----------
    # Execute Commands on Core Repos
    # -----------

    CODE_DIR, pythoncmd, WIN32, PY2, PY3 = get_sysinfo()

    print('wbia_rman = %r' % (wbia_rman, ))

    wildme_ssh_flags = GET_ARGFLAG('--move-wildme') or GET_ARGFLAG(
        '--move-wildme-ssh')
    wildme_https_flags = GET_ARGFLAG('--move-wildme-https') or GET_ARGFLAG(
        '--move-wildme-http')
    if wildme_ssh_flags or wildme_https_flags:
        fmt = 'ssh' if wildme_ssh_flags else 'https'
        move_wildme(wbia_rman, fmt)

    # Commands on global git repos
    if GET_ARGFLAG('--status'):
        wbia_rman.issue('git status')
        sys.exit(0)

    wbia_rman.ensure()

    if GET_ARGFLAG('--dump') or GET_ARGFLAG('--dump-scripts'):
        dpath = '_super_scripts/' + 'scripts' + get_plat_specifier()
        ut.ensuredir(dpath)
        dumps = [
            (tpl_rman, 'cv2', 'build'),
            (tpl_rman, 'cv2', 'install'),
            (wbia_rman, 'flann', 'build'),
            (wbia_rman, 'flann', 'install'),
            (wbia_rman, 'hesaff', 'build'),
            (tpl_rman, 'PyQt', 'system_to_venv'),
            (tpl_rman, 'libgpuarray', 'build'),
        ]

        for rman, mod, sname in dumps:
            from os.path import join

            # if mod not in rman:
            #     print('mod=%r not available in rman=%r' % (mod, rman))
            #     continue
            script = rman[mod].get_script(sname).text
            suffix = get_plat_specifier()
            sh_fpath = join(dpath, mod + '_' + sname + suffix + '.sh')
            ut.write_to(sh_fpath, script)

    if GET_ARGFLAG('--requirements'):
        ut.cmd('pip install -r requirements.txt')

    # HACKED IN SCRIPTS WHILE IM STILL FIGURING OUT TPL DEPS
    if GET_ARGFLAG('--opencv'):
        # There is now a pypi for opencv! Yay
        # ut.cmd('pip install opencv-python')
        # Bummer, but we need opencv source for pyhessaff
        # we should just make a wheel for pyhessaff
        cv_repo = tpl_rman['cv2']
        cv_repo.clone()
        script = cv_repo.get_script('build')
        script.exec_()
        cv_repo = tpl_rman['cv2']
        script = cv_repo.get_script('install')
        script.exec_()

    if GET_ARGFLAG('--flann'):
        script = wbia_rman['flann'].get_script('build')
        script.exec_()
        script = wbia_rman['flann'].get_script('install')
        script.exec_()

    if GET_ARGFLAG('--pyqt'):
        script = tpl_rman['PyQt'].get_script('system_to_venv')
        script.exec_()

    if GET_ARGFLAG('--hesaff'):
        script = wbia_rman['hesaff'].get_script('build')
        script.exec_()

    if GET_ARGFLAG('--pydarknet'):
        script = wbia_rman['pydarknet'].get_script('build')
        script.exec_()

    if GET_ARGFLAG('--pyrf'):
        script = wbia_rman['pyrf'].get_script('build')
        script.exec_()

    if GET_ARGFLAG('--torch'):
        # Theano and lasange code should be moved to pytorch
        tpl_rman['pytorch'].clone(recursive=True)
        tpl_rman['pytorch'].issue('git submodule update --init')
        tpl_rman['pytorch'].issue('python setup install')
        tpl_rman['pytorch'].issue('pip install torchvision')
        # tpl_rman['pytorch'].issue('NO_CUDNN=TRUE && python setup install')
        # tpl_rman['pytorch'].issue('pip install -e .')

    if GET_ARGFLAG('--libgpuarray') or GET_ARGFLAG('--dcnn'):
        tpl_rman['libgpuarray'].clone()
        script = tpl_rman['libgpuarray'].get_script('build')
        script.exec_()

    if GET_ARGFLAG('--dcnn'):
        tpl_rman['theano'].clone()
        # tpl_rman['pylearn2'].clone()
        tpl_rman['lasagne'].clone()
        tpl_rman['theano'].issue('pip install -e .')
        # tpl_rman['pylearn2'].issue('pip install -e .')
        tpl_rman['lasagne'].issue('pip install -e .')
        # tpl_rman['pylearn2'].python_develop()
        # tpl_rman['theano'].python_develop()
        # tpl_rman['lasagne'].python_develop()

    # _===

    if GET_ARGFLAG('--fix') or GET_ARGFLAG('--check'):
        missing_dynlib = tpl_rman.check_cpp_build()
        missing_dynlib += wbia_rman.check_cpp_build()

        missing_install = tpl_rman.check_installed()
        missing_install += wbia_rman.check_installed()

        problems = []
        problems += wbia_rman.check_importable()
        problems += tpl_rman.check_importable()

    if GET_ARGFLAG('--fix'):
        print('Trying to fix problems')

        for repo in missing_dynlib:
            repo.custom_build()

        for repo, recommended_fix in problems:
            print('Trying to fix repo = %r' % (repo, ))
            print(' * recommended_fix = %r' % (recommended_fix, ))
            if recommended_fix == 'rebuild':
                repo.custom_build()
                print(
                    'Can currently only fix one module at a time. Please re-run'
                )
                sys.exit(1)
            else:
                print('Not sure how to fix %r' % (repo, ))

    if GET_ARGFLAG('--pull'):
        wbia_rman.issue('git pull')

    if GET_ARGFLAG('--build'):
        # Build tpl repos
        # tpl_rman.custom_build()
        # wbia_rman.custom_build()
        # Build only IBEIS repos with setup.py
        _rman = wbia_rman.only_with_pysetup()
        _rman.issue('{pythoncmd} setup.py build'.format(pythoncmd=pythoncmd))

    # Like install, but better if you are developing
    if GET_ARGFLAG('--develop'):
        _rman = wbia_rman.only_with_pysetup()
        # # _rman.issue('{pythoncmd} setup.py develop'.format(pythoncmd=pythoncmd),
        #               # sudo=not ut.in_virtual_env())
        _rman.issue(
            '{pythoncmd} -m pip install -e .'.format(pythoncmd=pythoncmd),
            sudo=not ut.in_virtual_env(),
        )

    if GET_ARGFLAG('--clean'):
        _rman = wbia_rman.only_with_pysetup()
        _rman.issue('{pythoncmd} setup.py clean'.format(pythoncmd=pythoncmd))

    if GET_ARGFLAG('--install'):
        print(
            'WARNING: Dont use install if you are a developer. Use develop instead.'
        )
        _rman = wbia_rman.only_with_pysetup()
        _rman.issue('{pythoncmd} setup.py install'.format(pythoncmd=pythoncmd))

    if GET_ARGFLAG('--push'):
        wbia_rman.issue('git push')

    if GET_ARGFLAG('--branch'):
        wbia_rman.issue('git branch')
        sys.exit(0)

    if GET_ARGFLAG('--tag-status'):
        wbia_rman.issue('git tag')

    # Tag everything
    tag_name = GET_ARGVAL('--newtag', type_=str, default=None)
    if tag_name is not None:
        wbia_rman.issue(
            'git tag -a "{tag_name}" -m "super_setup autotag {tag_name}"'.
            format(**locals()))
        wbia_rman.issue('git push --tags')

    if GET_ARGFLAG('--bext'):
        wbia_rman.issue('{pythoncmd} setup.py build_ext --inplace'.format(
            pythoncmd=pythoncmd))

    commit_msg = GET_ARGVAL('--commit', type_=str, default=None)
    if commit_msg is not None:
        wbia_rman.issue('git commit -am "{commit_msg}"'.format(**locals()))

    # Change Branch
    branch_name = GET_ARGVAL('--checkout', type_=str, default=None)
    if branch_name is not None:
        try:
            wbia_rman.issue('git checkout "{branch_name}"'.format(**locals()))
        except Exception:
            print('ERROR: Could not checkout branch: %r' % (branch_name, ))

    # Creates new branches
    newbranch_name = GET_ARGVAL('--newbranch', type_=str, default=None)
    if newbranch_name is not None:
        # rman.issue('git stash"'.format(**locals()))
        wbia_rman.issue(
            'git checkout -b "{newbranch_name}"'.format(**locals()))
        wbia_rman.issue(
            'git push --set-upstream origin {newbranch_name}'.format(
                **locals()))
        # rman.issue('git stash pop"'.format(**locals()))

    # Creates new branches
    newlocalbranch_name = GET_ARGVAL('--newlocalbranch',
                                     type_=str,
                                     default=None)
    if newlocalbranch_name is not None:
        # rman.issue('git stash"'.format(**locals()))
        wbia_rman.issue(
            'git checkout -b "{newlocalbranch_name}"'.format(**locals()))
        # rman.issue('git push --set-upstream origin {newlocalbranch_name}'.format(**locals()))
        # rman.issue('git stash pop"'.format(**locals()))

    # Creates new branches
    mergebranch_name = GET_ARGVAL('--merge', type_=str, default=None)
    if mergebranch_name is not None:
        wbia_rman.issue('git merge "{mergebranch_name}"'.format(**locals()))

    # Change ownership
    if GET_ARGFLAG('--serverchmod'):
        wbia_rman.issue('chmod -R 755 *')

    if GET_ARGFLAG('--chown'):
        # Fixes problems where repos are checked out as root
        username = os.environ.get('USERNAME', ut.get_argval('--username'))
        if username is None:
            username = os.environ.get('USER', None)
        if username is None:
            raise AssertionError(
                'cannot find username in commandline or environment vars')
        usergroup = username
        wbia_rman.issue('chown -R {username}:{usergroup} *'.format(**locals()),
                        sudo=True)

    upstream_branch = GET_ARGVAL('--set-upstream', type_=str, default=None)
    if upstream_branch is not None:
        # git 2.0
        wbia_rman.issue(
            'git branch --set-upstream-to=origin/{upstream_branch} {upstream_branch}'
            .format(**locals()))

    upstream_push = GET_ARGVAL('--upstream-push', type_=str, default=None)
    if upstream_push is not None:
        wbia_rman.issue(
            'git push --set-upstream origin {upstream_push}'.format(
                **locals()))

    if GET_ARGFLAG('--test'):
        failures = []
        for repo_dpath in wbia_rman.repo_dirs:
            # ut.getp_
            mod_dpaths = ut.get_submodules_from_dpath(repo_dpath,
                                                      recursive=False,
                                                      only_packages=True)
            modname_list = ut.lmap(ut.get_modname_from_modpath, mod_dpaths)
            print('Checking modules = %r' % (modname_list, ))

            for modname in modname_list:
                try:
                    ut.import_modname(modname)
                    print(modname + ' success')
                except ImportError:
                    failures += [modname]
                    print(modname + ' failure')

        print('failures = %s' % (ut.repr3(failures), ))

    if False:
        try:
            from six.moves import input
        except ImportError:
            input = raw_input  # NOQA
        # General global git command
        gg_cmd = GET_ARGVAL('--gg', None)  # global command
        if gg_cmd is not None:
            ans = ('yes' if GET_ARGFLAG('-y') else input(
                'Are you sure you want to run: %r on all directories? ' %
                (gg_cmd, )))
            if ans == 'yes':
                wbia_rman.issue(gg_cmd)
Example #46
0
def write_default_ipython_profile():
    """
    CommandLine:
        python ~/local/init/init_ipython_config.py

        python -c "import utool as ut; ut.vd(ut.unixpath('~/.ipython/profile_default'))"
        python -c "import utool as ut; ut.editfile(ut.unixpath('~/.ipython/profile_default/ipython_config.py'))"

    References:
        http://2sn.org/python/ipython_config.py
    """
    dpath = ut.unixpath('~/.ipython/profile_default')
    ut.ensuredir(dpath, info=True, verbose=True)
    ipy_config_fpath = ut.unixjoin(dpath, 'ipython_config.py')
    ipy_config_text = ut.codeblock(r'''
        # STARTBLOCK
        c = get_config()  # NOQA
        c.InteractiveShellApp.exec_lines = []
        import six
        if six.PY2:
            future_line = (
                'from __future__ import absolute_import, division, print_function, with_statement, unicode_literals')
            c.InteractiveShellApp.exec_lines.append(future_line)
            # Fix sip versions
            try:
                import sip
                # http://stackoverflow.com/questions/21217399/pyqt4-qtcore-qvariant-object-instead-of-a-string
                sip.setapi('QVariant', 2)
                sip.setapi('QString', 2)
                sip.setapi('QTextStream', 2)
                sip.setapi('QTime', 2)
                sip.setapi('QUrl', 2)
                sip.setapi('QDate', 2)
                sip.setapi('QDateTime', 2)
                if hasattr(sip, 'setdestroyonexit'):
                    sip.setdestroyonexit(False)  # This prevents a crash on windows
            except ImportError as ex:
                pass
            except ValueError as ex:
                print('Warning: Value Error: %s' % str(ex))
                pass
        c.InteractiveShellApp.exec_lines.append('%load_ext autoreload')
        c.InteractiveShellApp.exec_lines.append('%autoreload 2')
        #c.InteractiveShellApp.exec_lines.append('%pylab qt4')
        c.InteractiveShellApp.exec_lines.append('import numpy as np')
        c.InteractiveShellApp.exec_lines.append('import ubelt as ub')
        c.InteractiveShellApp.exec_lines.append('import utool as ut')
        c.InteractiveShellApp.exec_lines.append('import pandas as pd')
        c.InteractiveShellApp.exec_lines.append('pd.options.display.max_columns = 40')
        c.InteractiveShellApp.exec_lines.append('pd.options.display.width = 160')
        c.InteractiveShellApp.exec_lines.append('pd.options.display.max_rows = 20')
        c.InteractiveShellApp.exec_lines.append('pd.options.display.float_format = lambda x: \'%.4f\' % (x,)')
        c.InteractiveShellApp.exec_lines.append('import networkx as nx')
        #c.InteractiveShellApp.exec_lines.append('import plottool as pt')
        c.InteractiveShellApp.exec_lines.append('from os.path import *')
        c.InteractiveShellApp.exec_lines.append('from six.moves import cPickle as pickle')
        #c.InteractiveShellApp.exec_lines.append('if \'verbose\' not in vars():\\n    verbose = True')
        import utool as ut
        c.InteractiveShellApp.exec_lines.append(ut.codeblock(
            """
            class classproperty(property):
                def __get__(self, cls, owner):
                    return classmethod(self.fget).__get__(None, owner)()
            class vim(object):
                @classproperty
                def focus(cls):
                    import utool.util_ubuntu
                    utool.util_ubuntu.xctrl.do(('focus', 'GVIM'),)
                @classproperty
                def copy(cls):
                    import utool.util_ubuntu
                    utool.util_ubuntu.xctrl.do(('focus', 'GVIM'),)
                    import utool as ut
                    import IPython
                    ipy = IPython.get_ipython()
                    lastline = ipy.history_manager.input_hist_parsed[-2]
                    ut.copy_text_to_clipboard(lastline)
                    # import utool as ut
                    import utool.util_ubuntu
                    utool.util_ubuntu.xctrl.do(
                        ('focus', 'GVIM'),
                        ('key', 'ctrl+v'),
                        ('focus', 'x-terminal-emulator.X-terminal-emulator')
                    )
            """
        ))
        #c.InteractiveShell.autoindent = True
        #c.InteractiveShell.colors = 'LightBG'
        #c.InteractiveShell.confirm_exit = False
        #c.InteractiveShell.deep_reload = True
        c.InteractiveShell.editor = 'gvim'
        #c.InteractiveShell.xmode = 'Context'
        # ENDBOCK
        ''')
    ut.write_to(ipy_config_fpath, ipy_config_text)
Example #47
0
def test_reloading_metaclass():
    r"""
    CommandLine:
        python -m utool.util_class --test-test_reloading_metaclass

    References:
        http://stackoverflow.com/questions/8122734/pythons-imp-reload-function-is-not-working

    Example:
        >>> # ENABLE_DOCTEST
        >>> from utool.util_class import *  # NOQA
        >>> result = test_reloading_metaclass()
        >>> print(result)
    """
    import utool as ut
    testdir = ut.ensure_app_resource_dir('utool', 'metaclass_tests')
    testfoo_fpath = ut.unixjoin(testdir, 'testfoo.py')
    # os.chdir(testdir)
    #with ut.ChdirContext(testdir, stay=ut.inIPython()):
    with ut.ChdirContext(testdir):
        foo_code1 = ut.codeblock(
            r'''
            # STARTBLOCK
            import utool as ut
            import six


            @six.add_metaclass(ut.ReloadingMetaclass)
            class Foo(object):
                def __init__(self):
                    pass

            spamattr = 'version1'
            # ENDBLOCK
            '''
        )
        foo_code2 = ut.codeblock(
            r'''
            # STARTBLOCK
            import utool as ut
            import six


            @six.add_metaclass(ut.ReloadingMetaclass)
            class Foo(object):
                def __init__(self):
                    pass

                def bar(self):
                    return 'spam'

            eggsattr = 'version2'
            # ENDBLOCK
            '''
        )
        # Write a testclass to disk
        ut.delete(testfoo_fpath)
        ut.write_to(testfoo_fpath, foo_code1, verbose=True)
        testfoo = ut.import_module_from_fpath(testfoo_fpath)
        #import testfoo
        foo = testfoo.Foo()
        print('foo = %r' % (foo,))
        assert not hasattr(foo, 'bar'), 'foo should not have a bar attr'
        ut.delete(testfoo_fpath + 'c')  # remove the pyc file because of the identical creation time
        ut.write_to(testfoo_fpath, foo_code2, verbose=True)
        assert not hasattr(foo, 'bar'), 'foo should still not have a bar attr'
        foo.rrr()
        assert foo.bar() == 'spam'
        ut.delete(testfoo_fpath)
        print('Reloading worked nicely')