예제 #1
0
def on_pick(event, infr=None):
    import wbia.plottool as pt

    logger.info('ON PICK: %r' % (event, ))
    artist = event.artist
    plotdat = pt.get_plotdat_dict(artist)
    if plotdat:
        if 'node' in plotdat:
            all_node_data = ut.sort_dict(plotdat['node_data'].copy())
            visual_node_data = ut.dict_subset(all_node_data,
                                              infr.visual_node_attrs, None)
            node_data = ut.delete_dict_keys(all_node_data,
                                            infr.visual_node_attrs)
            node = plotdat['node']
            node_data['degree'] = infr.graph.degree(node)
            node_label = infr.pos_graph.node_label(node)
            logger.info('visual_node_data: ' +
                        ut.repr2(visual_node_data, nl=1))
            logger.info('node_data: ' + ut.repr2(node_data, nl=1))
            ut.cprint('node: ' + ut.repr2(plotdat['node']), 'blue')
            logger.info('(pcc) node_label = %r' % (node_label, ))
            logger.info('artist = %r' % (artist, ))
        elif 'edge' in plotdat:
            all_edge_data = ut.sort_dict(plotdat['edge_data'].copy())
            logger.info(infr.repr_edge_data(all_edge_data))
            ut.cprint('edge: ' + ut.repr2(plotdat['edge']), 'blue')
            logger.info('artist = %r' % (artist, ))
        else:
            logger.info('???: ' + ut.repr2(plotdat))
    logger.info(ut.get_timestamp())
예제 #2
0
def package_installer():
    """
    system dependent post pyinstaller step
    """
    print('[installer] +--- PACKAGE_INSTALLER ---')
    #build_win32_inno_installer()
    cwd = get_setup_dpath()
    # Build the os-appropriate package
    if sys.platform.startswith('win32'):
        installer_src = build_win32_inno_installer()
        installer_fname_fmt = 'ibeis-win32-install-{timestamp}.exe'
    elif sys.platform.startswith('darwin'):
        installer_src = build_osx_dmg_installer()
        installer_fname_fmt = 'ibeis-osx-install-{timestamp}.dmg'
    elif sys.platform.startswith('linux'):
        installer_src = build_linux_zip_binaries()
        installer_fname_fmt = 'ibeis-linux-binary-{timestamp}.zip'
        #try:
        #    raise NotImplementedError('no linux packager (rpm or deb) supported. try running with --build')
        #except Exception as ex:
        #    ut.printex(ex)
        #pass
    # timestamp the installer name
    installer_fname = installer_fname_fmt.format(timestamp=ut.get_timestamp())
    installer_dst = join(cwd, 'dist', installer_fname)
    try:
        ut.move(installer_src, installer_dst)
    except Exception as ex:
        ut.printex(ex, 'error moving setups', iswarning=True)
    print('[installer] L___ FINISH PACKAGE_INSTALLER ___')
예제 #3
0
def package_installer():
    """
    system dependent post pyinstaller step
    """
    print('[installer] +--- PACKAGE_INSTALLER ---')
    #build_win32_inno_installer()
    cwd = get_setup_dpath()
    # Build the os-appropriate package
    if sys.platform.startswith('win32'):
        installer_src = build_win32_inno_installer()
        installer_fname_fmt = 'ibeis-win32-install-{timestamp}.exe'
    elif sys.platform.startswith('darwin'):
        installer_src = build_osx_dmg_installer()
        installer_fname_fmt = 'ibeis-osx-install-{timestamp}.dmg'
    elif sys.platform.startswith('linux'):
        installer_src = build_linux_zip_binaries()
        installer_fname_fmt = 'ibeis-linux-binary-{timestamp}.zip'
        #try:
        #    raise NotImplementedError('no linux packager (rpm or deb) supported. try running with --build')
        #except Exception as ex:
        #    ut.printex(ex)
        #pass
    # timestamp the installer name
    installer_fname = installer_fname_fmt.format(timestamp=ut.get_timestamp())
    installer_dst = join(cwd, 'dist', installer_fname)
    try:
        ut.move(installer_src, installer_dst)
    except Exception as ex:
        ut.printex(ex, 'error moving setups', iswarning=True)
    print('[installer] L___ FINISH PACKAGE_INSTALLER ___')
예제 #4
0
def build_runbench_pyth_text(cy_bench_list):
    # write script to run all cyth benchmarks
    runbench_pytext_fmt_ = r'''
    #!/usr/bin/env python
    " Autogenerated by cyth on {timestamp} "
    from __future__ import absolute_import, division, print_function
    import utool
    {bench_import_text}

    SORTBY = utool.get_argval('--sortby', str, 'python')

    if __name__ == '__main__':
        all_results = []
        iterations = utool.get_argval(('--iterations', '-n'), type_=int, default=100)

        # Run the benchmarks
        {bench_runline_text}
        # Sort by chosen field
        sortable_fields = ['python', 'cython']
        sortx = sortable_fields.index(SORTBY)
        sorted_allresults = sorted(all_results, key=lambda tup: tup[sortx])
        sorted_lines = [tup[2] for tup in sorted_allresults]
        # Report sorted results
        print('\n\n')
        print('==================================')
        print('Aggregating all benchmarks results')
        print('==================================')
        print('\n')
        print('sorting by %s' % sortable_fields[sortx])
        print('\n'.join(utool.flatten(sorted_lines)))
    '''
    runbench_pytext_fmt = utool.unindent(runbench_pytext_fmt_).strip('\n')
    from os.path import relpath, splitext
    import os

    def bench_fpath_to_modname(bench):
        bench_upath = utool.unixpath(bench)
        bench_relpath = relpath(bench_upath, os.getcwd())
        bench_relname, _ = splitext(bench_relpath)
        bench_modname = bench_relname.replace('\\', '/').replace('/', '.')
        return bench_modname

    bench_modnames = list(map(bench_fpath_to_modname, cy_bench_list))

    bench_imports = [
        'import ' + bench_modname for bench_modname in bench_modnames
    ]
    runline_fmt = 'all_results.extend({bench_modname}.run_all_benchmarks(iterations))'
    bench_runlines = [
        runline_fmt.format(bench_modname=bench_modname)
        for bench_modname in bench_modnames
    ]
    bench_import_text = '\n'.join(bench_imports)
    bench_runline_text = '\n    '.join(bench_runlines)
    timestamp = utool.get_timestamp()  # NOQA
    runbench_pytext = runbench_pytext_fmt.format(
        timestamp=timestamp,
        bench_runline_text=bench_runline_text,
        bench_import_text=bench_import_text)
    return runbench_pytext
예제 #5
0
def build_matrix_str(allres):
    ibs = allres.ibs
    cx2_gx = ibs.tables.cx2_gx
    gx2_gname = ibs.tables.gx2_gname

    def cx2_gname(rid):
        return [os.path.splitext(gname)[0] for gname in gx2_gname[cx2_gx]]

    col_label_gname = cx2_gname(allres.col_label_rid)
    row_label_gname = cx2_gname(allres.row_label_rid)
    timestamp = utool.get_timestamp(format_='comment') + '\n'
    header = '\n'.join([
        '# Result score matrix', '# Generated on: ' + timestamp,
        '# Format: rows separated by newlines, cols separated by commas',
        '# num_queries  / rows = ' + repr(len(row_label_gname)),
        '# num_indexed  / cols = ' + repr(len(col_label_gname)),
        '# row_labels = ' + repr(row_label_gname),
        '# col_labels = ' + repr(col_label_gname)
    ])
    row_strings = []
    for row in allres.score_matrix:
        row_str = map(lambda x: '%5.2f' % x, row)
        row_strings.append(', '.join(row_str))
    body = '\n'.join(row_strings)
    matrix_str = '\n'.join([header, body])
    allres.matrix_str = matrix_str
예제 #6
0
def build_matrix_str(allres):
    ibs = allres.ibs
    cx2_gx = ibs.tables.cx2_gx
    gx2_gname = ibs.tables.gx2_gname

    def cx2_gname(rid):
        return [os.path.splitext(gname)[0] for gname in gx2_gname[cx2_gx]]
    col_label_gname = cx2_gname(allres.col_label_rid)
    row_label_gname = cx2_gname(allres.row_label_rid)
    timestamp =  utool.get_timestamp(format_='comment') + '\n'
    header = '\n'.join(
        ['# Result score matrix',
         '# Generated on: ' + timestamp,
         '# Format: rows separated by newlines, cols separated by commas',
         '# num_queries  / rows = ' + repr(len(row_label_gname)),
         '# num_indexed  / cols = ' + repr(len(col_label_gname)),
         '# row_labels = ' + repr(row_label_gname),
         '# col_labels = ' + repr(col_label_gname)])
    row_strings = []
    for row in allres.score_matrix:
        row_str = map(lambda x: '%5.2f' % x, row)
        row_strings.append(', '.join(row_str))
    body = '\n'.join(row_strings)
    matrix_str = '\n'.join([header, body])
    allres.matrix_str = matrix_str
예제 #7
0
    def review(oracle, edge, truth, infr, accuracy=None):
        feedback = {
            'user_id': 'user:oracle',
            'confidence': 'absolutely_sure',
            'evidence_decision': None,
            'meta_decision': NULL,
            'timestamp_s1': ut.get_timestamp('int', isutc=True),
            'timestamp_c1': ut.get_timestamp('int', isutc=True),
            'timestamp_c2': ut.get_timestamp('int', isutc=True),
            'tags': [],
        }
        is_recovering = infr.is_recovering()

        if accuracy is None:
            if is_recovering:
                accuracy = oracle.recover_accuracy
            else:
                accuracy = oracle.normal_accuracy

        # The oracle can get anything where the hardness is less than its
        # accuracy

        hardness = oracle.rng.random()
        error = accuracy < hardness

        if error:
            error_options = list(oracle.states - {truth} - {INCMP})
            observed = oracle.rng.choice(list(error_options))
        else:
            observed = truth
        if accuracy < 1.0:
            feedback['confidence'] = 'pretty_sure'
        if accuracy < 0.5:
            feedback['confidence'] = 'guessing'
        feedback['evidence_decision'] = observed
        if error:
            infr.print(
                'ORACLE ERROR real={} pred={} acc={:.2f} hard={:.2f}'.format(
                    truth, observed, accuracy, hardness),
                2,
                color='red',
            )

            # infr.print(
            #     'ORACLE ERROR edge={}, truth={}, pred={}, rec={}, hardness={:.3f}'.format(edge, truth, observed, is_recovering, hardness),
            #     2, color='red')
        return feedback
예제 #8
0
def dump_profile_text():
    import utool as ut
    print("Dumping Profile Information")
    profile = ut.PROFILE_FUNC
    output_text, summary_text = get_profile_text(profile)
    #profile.dump_stats('out.lprof')
    print(summary_text)
    ut.writeto('profile_output.txt', output_text + '\n' + summary_text)
    ut.writeto('profile_output.%s.txt' % (ut.get_timestamp()),
               output_text + '\n' + summary_text)
예제 #9
0
def make_bench_text(benchmark_codes, benchmark_names, py_modname):
    # TODO: let each function individually specify number
    codes = '\n\n\n'.join(benchmark_codes)  # NOQA
    list_ = [utool.quasiquote('results.extend({benchfunc}(iterations))')
             for benchfunc in benchmark_names]
    all_benchmarks = utool.indent('\n'.join(list_), ' ' * 8).strip()  # NOQA
    timestamp = utool.get_timestamp()  # NOQA
    bench_text_fmt = get_bench_text_fmt()
    bench_text = utool.quasiquote(bench_text_fmt)
    return bench_text
예제 #10
0
def build_runbench_pyth_text(cy_bench_list):
    # write script to run all cyth benchmarks
    runbench_pytext_fmt_ = r'''
    #!/usr/bin/env python
    " Autogenerated by cyth on {timestamp} "
    from __future__ import absolute_import, division, print_function
    import utool
    {bench_import_text}

    SORTBY = utool.get_argval('--sortby', str, 'python')

    if __name__ == '__main__':
        all_results = []
        iterations = utool.get_argval(('--iterations', '-n'), type_=int, default=100)

        # Run the benchmarks
        {bench_runline_text}
        # Sort by chosen field
        sortable_fields = ['python', 'cython']
        sortx = sortable_fields.index(SORTBY)
        sorted_allresults = sorted(all_results, key=lambda tup: tup[sortx])
        sorted_lines = [tup[2] for tup in sorted_allresults]
        # Report sorted results
        print('\n\n')
        print('==================================')
        print('Aggregating all benchmarks results')
        print('==================================')
        print('\n')
        print('sorting by %s' % sortable_fields[sortx])
        print('\n'.join(utool.flatten(sorted_lines)))
    '''
    runbench_pytext_fmt = utool.unindent(runbench_pytext_fmt_).strip('\n')
    from os.path import relpath, splitext
    import os
    def bench_fpath_to_modname(bench):
        bench_upath = utool.unixpath(bench)
        bench_relpath = relpath(bench_upath, os.getcwd())
        bench_relname, _ = splitext(bench_relpath)
        bench_modname = bench_relname.replace('\\', '/').replace('/', '.')
        return bench_modname

    bench_modnames = list(map(bench_fpath_to_modname, cy_bench_list))

    bench_imports = ['import ' + bench_modname for bench_modname in bench_modnames]
    runline_fmt = 'all_results.extend({bench_modname}.run_all_benchmarks(iterations))'
    bench_runlines = [runline_fmt.format(bench_modname=bench_modname)
                      for bench_modname in bench_modnames]
    bench_import_text = '\n'.join(bench_imports)
    bench_runline_text = '\n    '.join(bench_runlines)
    timestamp = utool.get_timestamp()  # NOQA
    runbench_pytext = runbench_pytext_fmt.format(timestamp=timestamp,
                                                 bench_runline_text=bench_runline_text,
                                                 bench_import_text=bench_import_text)
    return runbench_pytext
예제 #11
0
def make_bench_text(benchmark_codes, benchmark_names, py_modname):
    # TODO: let each function individually specify number
    codes = '\n\n\n'.join(benchmark_codes)  # NOQA
    list_ = [
        utool.quasiquote('results.extend({benchfunc}(iterations))')
        for benchfunc in benchmark_names
    ]
    all_benchmarks = utool.indent('\n'.join(list_), ' ' * 8).strip()  # NOQA
    timestamp = utool.get_timestamp()  # NOQA
    bench_text_fmt = get_bench_text_fmt()
    bench_text = utool.quasiquote(bench_text_fmt)
    return bench_text
예제 #12
0
def inject_python_code(fpath, patch_code, tag=None,
                       inject_location='after_imports'):
    """
    DEPRICATE
    puts code into files on disk
    """
    import utool as ut
    assert tag is not None, 'TAG MUST BE SPECIFIED IN INJECTED CODETEXT'
    text = ut.read_from(fpath)
    comment_start_tag = '# <util_inject:%s>' % tag
    comment_end_tag  = '# </util_inject:%s>' % tag

    tagstart_txtpos = text.find(comment_start_tag)
    tagend_txtpos = text.find(comment_end_tag)

    text_lines = ut.split_python_text_into_lines(text)

    # split the file into two parts and inject code between them
    if tagstart_txtpos != -1 or tagend_txtpos != -1:
        assert tagstart_txtpos != -1, 'both tags must not be found'
        assert tagend_txtpos != -1, 'both tags must not be found'

        for pos, line in enumerate(text_lines):
            if line.startswith(comment_start_tag):
                tagstart_pos = pos
            if line.startswith(comment_end_tag):
                tagend_pos = pos
        part1 = text_lines[0:tagstart_pos]
        part2 = text_lines[tagend_pos + 1:]
    else:
        if inject_location == 'after_imports':
            first_nonimport_pos = 0
            for line in text_lines:
                list_ = ['import ', 'from ', '#', ' ']
                isvalid = (len(line) == 0 or
                           any(line.startswith(str_) for str_ in list_))
                if not isvalid:
                    break
                first_nonimport_pos += 1
            part1 = text_lines[0:first_nonimport_pos]
            part2 = text_lines[first_nonimport_pos:]
        else:
            raise AssertionError('Unknown inject location')

    newtext = (
        '\n'.join(part1 + [comment_start_tag]) +
        '\n' + patch_code + '\n' +
        '\n'.join( [comment_end_tag] + part2)
    )
    text_backup_fname = fpath + '.' + ut.get_timestamp() + '.bak'
    ut.write_to(text_backup_fname, text)
    ut.write_to(fpath, newtext)
예제 #13
0
def inject_python_code(fpath,
                       patch_code,
                       tag=None,
                       inject_location='after_imports'):
    """
    DEPRICATE
    puts code into files on disk
    """
    import utool as ut
    assert tag is not None, 'TAG MUST BE SPECIFIED IN INJECTED CODETEXT'
    text = ut.read_from(fpath)
    comment_start_tag = '# <util_inject:%s>' % tag
    comment_end_tag = '# </util_inject:%s>' % tag

    tagstart_txtpos = text.find(comment_start_tag)
    tagend_txtpos = text.find(comment_end_tag)

    text_lines = ut.split_python_text_into_lines(text)

    # split the file into two parts and inject code between them
    if tagstart_txtpos != -1 or tagend_txtpos != -1:
        assert tagstart_txtpos != -1, 'both tags must not be found'
        assert tagend_txtpos != -1, 'both tags must not be found'

        for pos, line in enumerate(text_lines):
            if line.startswith(comment_start_tag):
                tagstart_pos = pos
            if line.startswith(comment_end_tag):
                tagend_pos = pos
        part1 = text_lines[0:tagstart_pos]
        part2 = text_lines[tagend_pos + 1:]
    else:
        if inject_location == 'after_imports':
            first_nonimport_pos = 0
            for line in text_lines:
                list_ = ['import ', 'from ', '#', ' ']
                isvalid = (len(line) == 0
                           or any(line.startswith(str_) for str_ in list_))
                if not isvalid:
                    break
                first_nonimport_pos += 1
            part1 = text_lines[0:first_nonimport_pos]
            part2 = text_lines[first_nonimport_pos:]
        else:
            raise AssertionError('Unknown inject location')

    newtext = ('\n'.join(part1 + [comment_start_tag]) + '\n' + patch_code +
               '\n' + '\n'.join([comment_end_tag] + part2))
    text_backup_fname = fpath + '.' + ut.get_timestamp() + '.bak'
    ut.write_to(text_backup_fname, text)
    ut.write_to(fpath, newtext)
예제 #14
0
def dump_profile_text():
    import utool as ut
    print("Dumping Profile Information")
    profile = ut.PROFILE_FUNC
    try:
        output_text, summary_text = get_profile_text(profile)
    except AttributeError:
        print('profile is not on')
    else:
        #profile.dump_stats('out.lprof')
        print(summary_text)
        ut.writeto('profile_output.txt', output_text + '\n' + summary_text)
        ut.writeto('profile_output.%s.txt' % (ut.get_timestamp()),
                   output_text + '\n' + summary_text)
예제 #15
0
def _detect_remote_push_imageset(ibs, image_uuid_list):
    route_url = _construct_route_url('/api/image/imageset/text/json/')

    db_name = ibs.get_dbname()
    db_uuid = ibs.get_db_init_uuid()
    time_str = ut.get_timestamp()
    imageset_text = 'Sync from %s (%s) at %s' % (db_name, db_uuid, time_str)
    imageset_text_list = [imageset_text] * len(image_uuid_list)

    data_dict = {
        'image_uuid_list': image_uuid_list,
        'imageset_text_list': imageset_text_list,
    }
    for key in data_dict:
        data_dict[key] = ut.to_json(data_dict[key])
    response = requests.put(route_url, data=data_dict)
    _verify_response(response)
예제 #16
0
def __dump_text_report(allres, report_type):
    if not 'report_type' in vars():
        report_type = 'rankres_str'
    print('[rr2] Dumping textfile: ' + report_type)
    report_str = allres.__dict__[report_type]
    # Get directories
    result_dir = allres.ibs.dirs.result_dir
    timestamp_dir = join(result_dir, 'timestamped_results')
    utool.ensurepath(timestamp_dir)
    utool.ensurepath(result_dir)
    # Write to timestamp and result dir
    timestamp = utool.get_timestamp()
    csv_timestamp_fname = report_type + allres.title_suffix + timestamp + '.csv'
    csv_timestamp_fpath = join(timestamp_dir, csv_timestamp_fname)
    csv_fname = report_type + allres.title_suffix + '.csv'
    csv_fpath = join(result_dir, csv_fname)
    utool.write_to(csv_fpath, report_str)
    utool.write_to(csv_timestamp_fpath, report_str)
예제 #17
0
def __dump_text_report(allres, report_type):
    if not 'report_type' in vars():
        report_type = 'rankres_str'
    print('[rr2] Dumping textfile: ' + report_type)
    report_str = allres.__dict__[report_type]
    # Get directories
    result_dir    = allres.ibs.dirs.result_dir
    timestamp_dir = join(result_dir, 'timestamped_results')
    utool.ensurepath(timestamp_dir)
    utool.ensurepath(result_dir)
    # Write to timestamp and result dir
    timestamp = utool.get_timestamp()
    csv_timestamp_fname = report_type + allres.title_suffix + timestamp + '.csv'
    csv_timestamp_fpath = join(timestamp_dir, csv_timestamp_fname)
    csv_fname  = report_type + allres.title_suffix + '.csv'
    csv_fpath = join(result_dir, csv_fname)
    utool.write_to(csv_fpath, report_str)
    utool.write_to(csv_timestamp_fpath, report_str)
예제 #18
0
def make_setup(repodir):
    setup_text_fmt_ = '''
    # autogenerated setup.py on {timestamp} for {repodir}
    from __future__ import absolute_import, division, print_function
    import setuptools
    #from utool import util_setup

    INSTALL_REQUIRES = [
    ]

    if __name__ == '__main__':
        setuptools.setup(
            name='{pkgname}',
            packages={packages},
            #packages=util_setup.find_packages(),
            #version='0.0.0.autogen',
            #description='short description',
            #url='',
            #ext_modules=util_setup.find_ext_modules(),
            #cmdclass=util_setup.get_cmdclass(),
            #author=''
            #author_email='',
            #keywords='',
            #install_requires=INSTALL_REQUIRES,
            package_data={{}},
            scripts=[
            ],
            classifiers=[],
        )
    '''
    setup_text_fmt = utool.unindent(setup_text_fmt_)
    timestamp = utool.get_timestamp()
    pkgname = basename(repodir)
    packages = utool.ls_moduledirs(repodir, full=False)
    print(pkgname)
    setup_text = setup_text_fmt.format(
        packages=packages,
        repodir=repodir,
        timestamp=timestamp,
        pkgname=pkgname,
    )
    return setup_text
예제 #19
0
파일: makesetup.py 프로젝트: animalus/utool
def make_setup(repodir):
    setup_text_fmt_ = '''
    # autogenerated setup.py on {timestamp} for {repodir}
    from __future__ import absolute_import, division, print_function
    import setuptools
    #from utool import util_setup

    INSTALL_REQUIRES = [
    ]

    if __name__ == '__main__':
        setuptools.setup(
            name='{pkgname}',
            packages={packages},
            #packages=util_setup.find_packages(),
            #version='0.0.0.autogen',
            #description='short description',
            #url='',
            #ext_modules=util_setup.find_ext_modules(),
            #cmdclass=util_setup.get_cmdclass(),
            #author=''
            #author_email='',
            #keywords='',
            #install_requires=INSTALL_REQUIRES,
            package_data={{}},
            scripts=[
            ],
            classifiers=[],
        )
    '''
    setup_text_fmt = utool.unindent(setup_text_fmt_)
    timestamp = utool.get_timestamp()
    pkgname = basename(repodir)
    packages = utool.ls_moduledirs(repodir, full=False)
    print(pkgname)
    setup_text = setup_text_fmt.format(
        packages=packages,
        repodir=repodir,
        timestamp=timestamp,
        pkgname=pkgname,
    )
    return setup_text
예제 #20
0
    def __init__(
        qres_wgt,
        ibs,
        cm_list,
        parent=None,
        callback=None,
        qreq_=None,
        query_title='',
        review_cfg={},
    ):
        if ut.VERBOSE:
            logger.info('[qres_wgt] Init QueryResultsWidget')

        assert not isinstance(cm_list, dict)
        assert qreq_ is not None, 'must specify qreq_'

        if USE_FILTER_PROXY:
            super(QueryResultsWidget,
                  qres_wgt).__init__(parent=parent,
                                     model_class=CustomFilterModel)
        else:
            super(QueryResultsWidget, qres_wgt).__init__(parent=parent)

        # if USE_FILTER_PROXY:
        #    APIItemWidget.__init__(qres_wgt, parent=parent,
        #                            model_class=CustomFilterModel)
        # else:
        #    APIItemWidget.__init__(qres_wgt, parent=parent)

        qres_wgt.cm_list = cm_list
        qres_wgt.ibs = ibs
        qres_wgt.qreq_ = qreq_
        qres_wgt.query_title = query_title
        qres_wgt.qaid2_cm = dict([(cm.qaid, cm) for cm in cm_list])

        qres_wgt.review_cfg = id_review_api.REVIEW_CFG_DEFAULTS.copy()
        qres_wgt.review_cfg = ut.update_existing(qres_wgt.review_cfg,
                                                 review_cfg,
                                                 assert_exists=True)

        # qres_wgt.altkey_shortcut =
        # QtWidgets.QShortcut(QtGui.QKeySequence(QtCore.Qt.ALT), qres_wgt,
        #                qres_wgt.on_alt_pressed,
        #                context=QtCore..Qt.WidgetShortcut)
        qres_wgt.button_list = None
        qres_wgt.show_new = True
        qres_wgt.show_join = True
        qres_wgt.show_split = True
        qres_wgt.tt = ut.tic()
        # Set results data
        if USE_FILTER_PROXY:
            qres_wgt.add_checkboxes(qres_wgt.show_new, qres_wgt.show_join,
                                    qres_wgt.show_split)

        lbl = gt.newLineEdit(
            qres_wgt,
            text=
            "'T' marks as correct match. 'F' marks as incorrect match. Alt brings up context menu. Double click a row to inspect matches.",
            editable=False,
            enabled=False,
        )
        qres_wgt.layout().setSpacing(0)
        qres_wgt_layout = qres_wgt.layout()
        if hasattr(qres_wgt_layout, 'setMargin'):
            qres_wgt_layout.setMargin(0)
        else:
            qres_wgt_layout.setContentsMargins(0, 0, 0, 0)
        bottom_bar = gt.newWidget(qres_wgt,
                                  orientation=Qt.Horizontal,
                                  spacing=0,
                                  margin=0)
        bottom_bar.layout().setSpacing(0)
        bottom_bar_layout = bottom_bar.layout()
        if hasattr(bottom_bar_layout, 'setMargin'):
            bottom_bar_layout.setMargin(0)
        else:
            bottom_bar_layout.setContentsMargins(0, 0, 0, 0)
        lbl.setMinimumSize(0, 0)
        lbl.setSizePolicy(QtWidgets.QSizePolicy.Expanding,
                          QtWidgets.QSizePolicy.Ignored)
        # lbl.setSizePolicy(gt.newSizePolicy())

        qres_wgt.layout().addWidget(bottom_bar)
        bottom_bar.addWidget(lbl)
        bottom_bar.addNewButton(
            'Mark unreviewed with higher scores as correct',
            pressed=qres_wgt.mark_unreviewed_above_score_as_correct,
        )
        bottom_bar.addNewButton('Repopulate', pressed=qres_wgt.repopulate)
        bottom_bar.addNewButton('Edit Filters', pressed=qres_wgt.edit_filters)

        qres_wgt.setSizePolicy(gt.newSizePolicy())
        qres_wgt.repopulate()
        qres_wgt.connect_signals_and_slots()
        if callback is None:
            callback = partial(ut.identity, None)
        qres_wgt.callback = callback
        qres_wgt.view.setColumnHidden(0, False)
        qres_wgt.view.setColumnHidden(1, False)
        qres_wgt.view.connect_single_key_to_slot(gt.ALT_KEY,
                                                 qres_wgt.on_alt_pressed)
        qres_wgt.view.connect_keypress_to_slot(qres_wgt.on_special_key_pressed)
        if parent is None:
            # Register parentless QWidgets
            fig_presenter.register_qt4_win(qres_wgt)

        dbdir = qres_wgt.qreq_.ibs.get_dbdir()
        expt_dir = ut.ensuredir(ut.unixjoin(dbdir, 'SPECIAL_GGR_EXPT_LOGS'))
        review_log_dir = ut.ensuredir(ut.unixjoin(expt_dir, 'review_logs'))

        ts = ut.get_timestamp(isutc=True, timezone=True)
        log_fpath = ut.unixjoin(
            review_log_dir,
            'review_log_%s_%s.json' % (qres_wgt.qreq_.ibs.dbname, ts))

        # LOG ALL CHANGES MADE TO NAMES
        import logging

        # ut.vd(review_log_dir)
        # create logger with 'spam_application'
        logger_ = logging.getLogger('query_review')
        logger_.setLevel(logging.DEBUG)
        formatter = logging.Formatter(
            '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
        # create file handler which logs even debug messages
        fh = logging.FileHandler(log_fpath)
        fh.setLevel(logging.DEBUG)
        fh.setFormatter(formatter)
        logger_.addHandler(fh)

        # create console handler with a higher log level
        ch = logging.StreamHandler()
        ch.setLevel(logging.INFO)
        ch.setFormatter(formatter)
        logger_.addHandler(ch)

        qres_wgt.logger = logger
        logger_.info('START QUERY_RESULT_REVIEW')
        logger_.info('NUM CHIP_MATCH OBJECTS (len(cm_list)=%d)' %
                     (len(cm_list), ))
        logger_.info('NUM PAIRS TO EVIDENCE_DECISION (nRows=%d)' %
                     (qres_wgt.review_api.nRows, ))
        logger_.info('PARENT QUERY REQUEST (cfgstr=%s)' %
                     (qres_wgt.qreq_.get_cfgstr(with_input=True), ))
예제 #21
0
def build_rankres_str(allres):
    'Builds csv files showing the rids/scores/ranks of the query results'
    ibs = allres.ibs
    #qrid2_qres = allres.qrid2_qres
    cx2_cid = ibs.tables.cx2_cid
    #cx2_nx = ibs.tables.cx2_nx
    test_samp = allres.qrid_list
    train_samp = ibs.train_sample_rid
    indx_samp = ibs.indexed_sample_rid
    # Get organized data for csv file
    (qcx2_top_true_rank, qcx2_top_true_score,
     qcx2_top_true_rid) = allres.top_true_qcx_arrays

    (qcx2_bot_true_rank, qcx2_bot_true_score,
     qcx2_bot_true_rid) = allres.bot_true_qcx_arrays

    (qcx2_top_false_rank, qcx2_top_false_score,
     qcx2_top_false_rid) = allres.top_false_qcx_arrays
    # Number of groundtruth per query
    qcx2_numgt = np.zeros(len(cx2_cid)) - 2
    for qrid in test_samp:
        qcx2_numgt[qrid] = len(ibs.get_other_indexed_rids(qrid))
    # Easy to digest results
    num_chips = len(test_samp)
    num_nonquery = len(np.setdiff1d(indx_samp, test_samp))
    # Find the test samples WITH ground truth
    test_samp_with_gt = np.array(test_samp)[qcx2_numgt[test_samp] > 0]
    if len(test_samp_with_gt) == 0:
        warnings.warn('[rr2] there were no queries with ground truth')
    #train_nxs_set = set(cx2_nx[train_samp])
    flag_cxs_fn = ibs.flag_cxs_with_name_in_sample

    def ranks_less_than_(thresh, intrain=None):
        #Find the number of ranks scoring more than thresh
        # Get statistics with respect to the training set
        if len(test_samp_with_gt) == 0:
            test_cxs_ = np.array([])
        elif intrain is None:  # report all
            test_cxs_ = test_samp_with_gt
        else:  # report either or
            in_train_flag = flag_cxs_fn(test_samp_with_gt, train_samp)
            if intrain is False:
                in_train_flag = True - in_train_flag
            test_cxs_ = test_samp_with_gt[in_train_flag]
        # number of test samples with ground truth
        num_with_gt = len(test_cxs_)
        if num_with_gt == 0:
            return [], ('NoGT', 'NoGT', -1, 'NoGT')
        # find tests with ranks greater and less than thresh
        testcx2_ttr = qcx2_top_true_rank[test_cxs_]
        greater_rids = test_cxs_[np.where(testcx2_ttr >= thresh)[0]]
        num_greater = len(greater_rids)
        num_less = num_with_gt - num_greater
        num_greater = num_with_gt - num_less
        frac_less = 100.0 * num_less / num_with_gt
        fmt_tup = (num_less, num_with_gt, frac_less, num_greater)
        return greater_rids, fmt_tup

    greater5_rids, fmt5_tup = ranks_less_than_(5)
    greater1_rids, fmt1_tup = ranks_less_than_(1)
    #
    gt5_intrain_rids, fmt5_in_tup = ranks_less_than_(5, intrain=True)
    gt1_intrain_rids, fmt1_in_tup = ranks_less_than_(1, intrain=True)
    #
    gt5_outtrain_rids, fmt5_out_tup = ranks_less_than_(5, intrain=False)
    gt1_outtrain_rids, fmt1_out_tup = ranks_less_than_(1, intrain=False)
    #
    allres.greater1_rids = greater1_rids
    allres.greater5_rids = greater5_rids
    #print('greater5_rids = %r ' % (allres.greater5_rids,))
    #print('greater1_rids = %r ' % (allres.greater1_rids,))
    # CSV Metadata
    header = '# Experiment allres.title_suffix = ' + allres.title_suffix + '\n'
    header += utool.get_timestamp(format_='comment') + '\n'
    # Scalar summary
    scalar_summary = '# Num Query Chips: %d \n' % num_chips
    scalar_summary += '# Num Query Chips with at least one match: %d \n' % len(
        test_samp_with_gt)
    scalar_summary += '# Num NonQuery Chips: %d \n' % num_nonquery
    scalar_summary += '# Ranks <= 5: %r/%r = %.1f%% (missed %r)\n' % (fmt5_tup)
    scalar_summary += '# Ranks <= 1: %r/%r = %.1f%% (missed %r)\n\n' % (
        fmt1_tup)

    scalar_summary += '# InTrain Ranks <= 5: %r/%r = %.1f%% (missed %r)\n' % (
        fmt5_in_tup)
    scalar_summary += '# InTrain Ranks <= 1: %r/%r = %.1f%% (missed %r)\n\n' % (
        fmt1_in_tup)

    scalar_summary += '# OutTrain Ranks <= 5: %r/%r = %.1f%% (missed %r)\n' % (
        fmt5_out_tup)
    scalar_summary += '# OutTrain Ranks <= 1: %r/%r = %.1f%% (missed %r)\n\n' % (
        fmt1_out_tup)
    header += scalar_summary
    # Experiment parameters
    #header += '# Full Parameters: \n' + utool.indent(params.param_string(), '#') + '\n\n'
    # More Metadata
    header += textwrap.dedent('''
    # Rank Result Metadata:
    #   QCX  = Query chip-index
    # QGNAME = Query images name
    # NUMGT  = Num ground truth matches
    #    TT  = top true
    #    BT  = bottom true
    #    TF  = top false''').strip()
    # Build the CSV table
    test_sample_gx = ibs.tables.cx2_gx[test_samp]
    test_sample_gname = ibs.tables.gx2_gname[test_sample_gx]
    test_sample_gname = [g.replace('.jpg', '') for g in test_sample_gname]
    column_labels = [
        'QCX',
        'NUM GT',
        'TT CX',
        'BT CX',
        'TF CX',
        'TT SCORE',
        'BT SCORE',
        'TF SCORE',
        'TT RANK',
        'BT RANK',
        'TF RANK',
        'QGNAME',
    ]
    column_list = [
        test_samp,
        qcx2_numgt[test_samp],
        qcx2_top_true_rid[test_samp],
        qcx2_bot_true_rid[test_samp],
        qcx2_top_false_rid[test_samp],
        qcx2_top_true_score[test_samp],
        qcx2_bot_true_score[test_samp],
        qcx2_top_false_score[test_samp],
        qcx2_top_true_rank[test_samp],
        qcx2_bot_true_rank[test_samp],
        qcx2_top_false_rank[test_samp],
        test_sample_gname,
    ]
    column_type = [
        int,
        int,
        int,
        int,
        int,
        float,
        float,
        float,
        int,
        int,
        int,
        str,
    ]
    rankres_str = utool.util_csv.make_csv_table(column_labels, column_list,
                                                header, column_type)
    # Put some more data at the end
    problem_true_pairs = zip(allres.problem_true.qrids,
                             allres.problem_true.rids)
    problem_false_pairs = zip(allres.problem_false.qrids,
                              allres.problem_false.rids)
    problem_str = '\n'.join([
        '#Problem Cases: ',
        '# problem_true_pairs = ' + repr(problem_true_pairs),
        '# problem_false_pairs = ' + repr(problem_false_pairs)
    ])
    rankres_str += '\n' + problem_str
    # Attach results to allres structure
    allres.rankres_str = rankres_str
    allres.scalar_summary = scalar_summary
    allres.problem_false_pairs = problem_false_pairs
    allres.problem_true_pairs = problem_true_pairs
    allres.problem_false_pairs = problem_false_pairs
    allres.problem_true_pairs = problem_true_pairs
예제 #22
0
def autogen_explicit_injectable_metaclass(classname, regen_command=None,
                                          conditional_imports=None):
    r"""
    Args:
        classname (?):

    Returns:
        ?:

    CommandLine:
        python -m utool.util_class --exec-autogen_explicit_injectable_metaclass

    Example:
        >>> # DISABLE_DOCTEST
        >>> from utool.util_class import *  # NOQA
        >>> from utool.util_class import  __CLASSTYPE_ATTRIBUTES__  # NOQA
        >>> import ibeis
        >>> import ibeis.control.IBEISControl
        >>> classname = ibeis.control.controller_inject.CONTROLLER_CLASSNAME
        >>> result = autogen_explicit_injectable_metaclass(classname)
        >>> print(result)
    """
    import utool as ut
    vals_list = []

    def make_redirect(func):
        # PRESERVES ALL SIGNATURES WITH EXECS
        src_fmt = r'''
        def {funcname}{defsig}:
            """ {orig_docstr}"""
            return {orig_funcname}{callsig}
        '''
        from utool._internal import meta_util_six
        orig_docstr = meta_util_six.get_funcdoc(func)
        funcname = meta_util_six.get_funcname(func)
        orig_funcname = modname.split('.')[-1] + '.' + funcname
        orig_docstr = '' if orig_docstr is None else orig_docstr
        import textwrap
        # Put wrapped function into a scope
        import inspect
        argspec = inspect.getargspec(func)
        (args, varargs, varkw, defaults) = argspec
        defsig = inspect.formatargspec(*argspec)
        callsig = inspect.formatargspec(*argspec[0:3])
        src_fmtdict = dict(funcname=funcname, orig_funcname=orig_funcname,
                           defsig=defsig, callsig=callsig,
                           orig_docstr=orig_docstr)
        src = textwrap.dedent(src_fmt).format(**src_fmtdict)
        return src

    src_list = []

    for classkey, vals in __CLASSTYPE_ATTRIBUTES__.items():
        modname = classkey[1]
        if classkey[0] == classname:
            vals_list.append(vals)
            for func in vals:
                src = make_redirect(func)
                src = ut.indent(src)
                src = '\n'.join([_.rstrip() for _ in src.split('\n')])
                src_list.append(src)

    if regen_command is None:
        regen_command = 'FIXME None given'

    module_header = ut.codeblock(
        """
        # -*- coding: utf-8 -*-
        """ + ut.TRIPLE_DOUBLE_QUOTE + """
        Static file containing autogenerated functions for {classname}
        Autogenerated on {autogen_time}

        RegenCommand:
            {regen_command}
        """ + ut.TRIPLE_DOUBLE_QUOTE + """

        from __future__ import absolute_import, division, print_function
        import utool as ut

        """).format(
            autogen_time=ut.get_timestamp(),
            regen_command=regen_command,
            classname=classname)

    depends_module_block = autogen_import_list(classname, conditional_imports)
    inject_statement_fmt = ("print, rrr, profile = "
                            "ut.inject2(__name__, '[autogen_explicit_inject_{classname}]')")
    inject_statement = inject_statement_fmt.format(classname=classname)

    source_block_lines = [
        module_header,
        depends_module_block,
        inject_statement,
        '\n',
        'class ExplicitInject' + classname + '(object):',
    ] + src_list
    source_block = '\n'.join(source_block_lines)

    source_block = ut.autoformat_pep8(source_block, aggressive=2)
    return source_block
예제 #23
0
def makeinit(mod_dpath, exclude_modnames=[], use_star=False):
    r"""
    Args:
        mod_dpath (str):
        exclude_modnames (list): (default = [])
        use_star (bool): (default = False)

    Returns:
        str: init_codeblock

    CommandLine:
        python -m utool.util_autogen makeinit --modname=ibeis.algo

    Example:
        >>> # SCRIPT
        >>> from utool.util_autogen import *  # NOQA
        >>> import utool as ut
        >>> modname = ut.get_argval('--modname', str, default=None)
        >>> mod_dpath = (os.getcwd() if modname is None else
        >>>              ut.get_modpath(modname, prefer_pkg=True))
        >>> mod_dpath = ut.unixpath(mod_dpath)
        >>> mod_fpath = join(mod_dpath, '__init__.py')
        >>> exclude_modnames = ut.get_argval(('--exclude', '-x'), list, default=[])
        >>> use_star = ut.get_argflag('--star')
        >>> init_codeblock = makeinit(mod_dpath, exclude_modnames, use_star)
        >>> ut.dump_autogen_code(mod_fpath, init_codeblock)
    """
    from utool._internal import util_importer
    import utool as ut
    module_name = ut.get_modname_from_modpath(mod_dpath)
    IMPORT_TUPLES = util_importer.make_import_tuples(mod_dpath, exclude_modnames=exclude_modnames)
    initstr = util_importer.make_initstr(module_name, IMPORT_TUPLES)
    regen_command = 'cd %s\n' % (mod_dpath)
    regen_command += '    makeinit.py'
    regen_command += ' --modname={modname}'.format(modname=module_name)
    if use_star:
        regen_command += ' --star'
    if len(exclude_modnames ) > 0:
        regen_command += ' -x ' + ' '.join(exclude_modnames)

    regen_block = (ut.codeblock('''
    """
    Regen Command:
        {regen_command}
    """
    ''').format(regen_command=regen_command))

    importstar_codeblock = ut.codeblock(
        '''
        """
        python -c "import {module_name}" --dump-{module_name}-init
        python -c "import {module_name}" --update-{module_name}-init
        """
        __DYNAMIC__ = True
        if __DYNAMIC__:
            # TODO: import all utool external prereqs. Then the imports will not import
            # anything that has already in a toplevel namespace
            # COMMENTED OUT FOR FROZEN __INIT__
            # Dynamically import listed util libraries and their members.
            from utool._internal import util_importer
            # FIXME: this might actually work with rrrr, but things arent being
            # reimported because they are already in the modules list
            import_execstr = util_importer.dynamic_import(__name__, IMPORT_TUPLES)
            exec(import_execstr)
            DOELSE = False
        else:
            # Do the nonexec import (can force it to happen no matter what if alwyas set
            # to True)
            DOELSE = True

        if DOELSE:
            # <AUTOGEN_INIT>
            pass
            # </AUTOGEN_INIT>
        '''.format(module_name=module_name)
    )

    ts_line = '# Autogenerated on {ts}'.format(ts=ut.get_timestamp('printable'))

    init_codeblock_list = ['# -*- coding: utf-8 -*-', ts_line]
    init_codeblock_list.append(initstr)
    init_codeblock_list.append('\nIMPORT_TUPLES = ' + ut.list_str(IMPORT_TUPLES))
    if use_star:
        init_codeblock_list.append(importstar_codeblock)
    init_codeblock_list.append(regen_block)

    init_codeblock = '\n'.join(init_codeblock_list)
    return init_codeblock
예제 #24
0
def build_rankres_str(allres):
    'Builds csv files showing the rids/scores/ranks of the query results'
    ibs = allres.ibs
    #qrid2_qres = allres.qrid2_qres
    cx2_cid = ibs.tables.cx2_cid
    #cx2_nx = ibs.tables.cx2_nx
    test_samp = allres.qrid_list
    train_samp = ibs.train_sample_rid
    indx_samp = ibs.indexed_sample_rid
    # Get organized data for csv file
    (qcx2_top_true_rank,
     qcx2_top_true_score,
     qcx2_top_true_rid)  = allres.top_true_qcx_arrays

    (qcx2_bot_true_rank,
     qcx2_bot_true_score,
     qcx2_bot_true_rid)  = allres.bot_true_qcx_arrays

    (qcx2_top_false_rank,
     qcx2_top_false_score,
     qcx2_top_false_rid) = allres.top_false_qcx_arrays
    # Number of groundtruth per query
    qcx2_numgt = np.zeros(len(cx2_cid)) - 2
    for qrid in test_samp:
        qcx2_numgt[qrid] = len(ibs.get_other_indexed_rids(qrid))
    # Easy to digest results
    num_chips = len(test_samp)
    num_nonquery = len(np.setdiff1d(indx_samp, test_samp))
    # Find the test samples WITH ground truth
    test_samp_with_gt = np.array(test_samp)[qcx2_numgt[test_samp] > 0]
    if len(test_samp_with_gt) == 0:
        warnings.warn('[rr2] there were no queries with ground truth')
    #train_nxs_set = set(cx2_nx[train_samp])
    flag_cxs_fn = ibs.flag_cxs_with_name_in_sample

    def ranks_less_than_(thresh, intrain=None):
        #Find the number of ranks scoring more than thresh
        # Get statistics with respect to the training set
        if len(test_samp_with_gt) == 0:
            test_cxs_ = np.array([])
        elif intrain is None:  # report all
            test_cxs_ =  test_samp_with_gt
        else:  # report either or
            in_train_flag = flag_cxs_fn(test_samp_with_gt, train_samp)
            if intrain is False:
                in_train_flag = True - in_train_flag
            test_cxs_ =  test_samp_with_gt[in_train_flag]
        # number of test samples with ground truth
        num_with_gt = len(test_cxs_)
        if num_with_gt == 0:
            return [], ('NoGT', 'NoGT', -1, 'NoGT')
        # find tests with ranks greater and less than thresh
        testcx2_ttr = qcx2_top_true_rank[test_cxs_]
        greater_rids = test_cxs_[np.where(testcx2_ttr >= thresh)[0]]
        num_greater = len(greater_rids)
        num_less    = num_with_gt - num_greater
        num_greater = num_with_gt - num_less
        frac_less   = 100.0 * num_less / num_with_gt
        fmt_tup     = (num_less, num_with_gt, frac_less, num_greater)
        return greater_rids, fmt_tup

    greater5_rids, fmt5_tup = ranks_less_than_(5)
    greater1_rids, fmt1_tup = ranks_less_than_(1)
    #
    gt5_intrain_rids, fmt5_in_tup = ranks_less_than_(5, intrain=True)
    gt1_intrain_rids, fmt1_in_tup = ranks_less_than_(1, intrain=True)
    #
    gt5_outtrain_rids, fmt5_out_tup = ranks_less_than_(5, intrain=False)
    gt1_outtrain_rids, fmt1_out_tup = ranks_less_than_(1, intrain=False)
    #
    allres.greater1_rids = greater1_rids
    allres.greater5_rids = greater5_rids
    #print('greater5_rids = %r ' % (allres.greater5_rids,))
    #print('greater1_rids = %r ' % (allres.greater1_rids,))
    # CSV Metadata
    header = '# Experiment allres.title_suffix = ' + allres.title_suffix + '\n'
    header +=  utool.get_timestamp(format_='comment') + '\n'
    # Scalar summary
    scalar_summary  = '# Num Query Chips: %d \n' % num_chips
    scalar_summary += '# Num Query Chips with at least one match: %d \n' % len(test_samp_with_gt)
    scalar_summary += '# Num NonQuery Chips: %d \n' % num_nonquery
    scalar_summary += '# Ranks <= 5: %r/%r = %.1f%% (missed %r)\n' % (fmt5_tup)
    scalar_summary += '# Ranks <= 1: %r/%r = %.1f%% (missed %r)\n\n' % (fmt1_tup)

    scalar_summary += '# InTrain Ranks <= 5: %r/%r = %.1f%% (missed %r)\n' % (fmt5_in_tup)
    scalar_summary += '# InTrain Ranks <= 1: %r/%r = %.1f%% (missed %r)\n\n' % (fmt1_in_tup)

    scalar_summary += '# OutTrain Ranks <= 5: %r/%r = %.1f%% (missed %r)\n' % (fmt5_out_tup)
    scalar_summary += '# OutTrain Ranks <= 1: %r/%r = %.1f%% (missed %r)\n\n' % (fmt1_out_tup)
    header += scalar_summary
    # Experiment parameters
    #header += '# Full Parameters: \n' + utool.indent(params.param_string(), '#') + '\n\n'
    # More Metadata
    header += textwrap.dedent('''
    # Rank Result Metadata:
    #   QCX  = Query chip-index
    # QGNAME = Query images name
    # NUMGT  = Num ground truth matches
    #    TT  = top true
    #    BT  = bottom true
    #    TF  = top false''').strip()
    # Build the CSV table
    test_sample_gx = ibs.tables.cx2_gx[test_samp]
    test_sample_gname = ibs.tables.gx2_gname[test_sample_gx]
    test_sample_gname = [g.replace('.jpg', '') for g in test_sample_gname]
    column_labels = ['QCX', 'NUM GT',
                     'TT CX', 'BT CX', 'TF CX',
                     'TT SCORE', 'BT SCORE', 'TF SCORE',
                     'TT RANK', 'BT RANK', 'TF RANK',
                     'QGNAME', ]
    column_list = [
        test_samp, qcx2_numgt[test_samp],
        qcx2_top_true_rid[test_samp], qcx2_bot_true_rid[test_samp],
        qcx2_top_false_rid[test_samp], qcx2_top_true_score[test_samp],
        qcx2_bot_true_score[test_samp], qcx2_top_false_score[test_samp],
        qcx2_top_true_rank[test_samp], qcx2_bot_true_rank[test_samp],
        qcx2_top_false_rank[test_samp], test_sample_gname, ]
    column_type = [int, int, int, int, int,
                   float, float, float, int, int, int, str, ]
    rankres_str = utool.util_csv.make_csv_table(column_labels, column_list, header, column_type)
    # Put some more data at the end
    problem_true_pairs = zip(allres.problem_true.qrids, allres.problem_true.rids)
    problem_false_pairs = zip(allres.problem_false.qrids, allres.problem_false.rids)
    problem_str = '\n'.join( [
        '#Problem Cases: ',
        '# problem_true_pairs = ' + repr(problem_true_pairs),
        '# problem_false_pairs = ' + repr(problem_false_pairs)])
    rankres_str += '\n' + problem_str
    # Attach results to allres structure
    allres.rankres_str = rankres_str
    allres.scalar_summary = scalar_summary
    allres.problem_false_pairs = problem_false_pairs
    allres.problem_true_pairs = problem_true_pairs
    allres.problem_false_pairs = problem_false_pairs
    allres.problem_true_pairs = problem_true_pairs
예제 #25
0
 def _onresize(event):
     print('foo' + ut.get_timestamp())
예제 #26
0
def autogen_explicit_injectable_metaclass(classname, regen_command=None,
                                          conditional_imports=None):
    r"""
    Args:
        classname (?):

    Returns:
        ?:

    CommandLine:
        python -m utool.util_class --exec-autogen_explicit_injectable_metaclass

    Example:
        >>> # DISABLE_DOCTEST
        >>> from utool.util_class import *  # NOQA
        >>> from utool.util_class import  __CLASSTYPE_ATTRIBUTES__  # NOQA
        >>> import ibeis
        >>> import ibeis.control.IBEISControl
        >>> classname = ibeis.control.controller_inject.CONTROLLER_CLASSNAME
        >>> result = autogen_explicit_injectable_metaclass(classname)
        >>> print(result)
    """
    import utool as ut
    vals_list = []

    def make_redirect(func):
        # PRESERVES ALL SIGNATURES WITH EXECS
        src_fmt = r'''
        def {funcname}{defsig}:
            """ {orig_docstr}"""
            return {orig_funcname}{callsig}
        '''
        from utool._internal import meta_util_six
        orig_docstr = meta_util_six.get_funcdoc(func)
        funcname = meta_util_six.get_funcname(func)
        orig_funcname = modname.split('.')[-1] + '.' + funcname
        orig_docstr = '' if orig_docstr is None else orig_docstr
        import textwrap
        # Put wrapped function into a scope
        import inspect
        argspec = inspect.getargspec(func)
        (args, varargs, varkw, defaults) = argspec
        defsig = inspect.formatargspec(*argspec)
        callsig = inspect.formatargspec(*argspec[0:3])
        src_fmtdict = dict(funcname=funcname, orig_funcname=orig_funcname,
                           defsig=defsig, callsig=callsig,
                           orig_docstr=orig_docstr)
        src = textwrap.dedent(src_fmt).format(**src_fmtdict)
        return src

    src_list = []

    for classkey, vals in __CLASSTYPE_ATTRIBUTES__.items():
        modname = classkey[1]
        if classkey[0] == classname:
            vals_list.append(vals)
            for func in vals:
                src = make_redirect(func)
                src = ut.indent(src)
                src = '\n'.join([_.rstrip() for _ in src.split('\n')])
                src_list.append(src)

    if regen_command is None:
        regen_command = 'FIXME None given'

    module_header = ut.codeblock(
        """
        # -*- coding: utf-8 -*-
        """ + ut.TRIPLE_DOUBLE_QUOTE + """
        Static file containing autogenerated functions for {classname}
        Autogenerated on {autogen_time}

        RegenCommand:
            {regen_command}
        """ + ut.TRIPLE_DOUBLE_QUOTE + """

        from __future__ import absolute_import, division, print_function
        import utool as ut

        """).format(
            autogen_time=ut.get_timestamp(),
            regen_command=regen_command,
            classname=classname)

    depends_module_block = autogen_import_list(classname, conditional_imports)
    inject_statement_fmt = ("print, rrr, profile = "
                            "ut.inject2(__name__, '[autogen_explicit_inject_{classname}]')")
    inject_statement = inject_statement_fmt.format(classname=classname)

    source_block_lines = [
        module_header,
        depends_module_block,
        inject_statement,
        '\n',
        'class ExplicitInject' + classname + '(object):',
    ] + src_list
    source_block = '\n'.join(source_block_lines)

    source_block = ut.autoformat_pep8(source_block, aggressive=2)
    return source_block