Esempio n. 1
0
    def update_registry(drive):
        print('Updating registered files in %r' % (drive,))
        # Update existing files
        fpath_exists_list = list(map(exists, ut.ProgIter(drive.fpath_list, 'checkexist fpath', freq=1000)))
        dpath_exists_list = list(map(exists, ut.ProgIter(drive.dpath_list, 'checkexist dpath', freq=1000)))
        if all(fpath_exists_list):
            print('No change in file structure')
        else:
            print('%d/%d files no longer exist' % (
                len(drive.fpath_list) - sum(fpath_exists_list),
                len(drive.fpath_list)))
            removed_fpaths = ut.compress(drive.fpath_list, ut.not_list(fpath_exists_list))
            print('removed_fpaths = %s' % (ut.list_str(removed_fpaths),))
        if all(dpath_exists_list):
            print('No change in dpath structure')
        else:
            print('%d/%d dirs no longer exist' % (
                len(drive.dpath_list) - sum(dpath_exists_list),
                len(drive.dpath_list)))
            removed_dpaths = ut.compress(
                drive.dpath_list,
                ut.not_list(dpath_exists_list))
            print('removed_dpaths = %s' % (ut.list_str(removed_dpaths),))

        drive.fpath_list = ut.compress(drive.fpath_list, fpath_exists_list)
        drive.dpath_list = ut.compress(drive.dpath_list, dpath_exists_list)
        drive.cache.save('fpath_list', drive.fpath_list)
        drive.cache.save('dpath_list', drive.dpath_list)
Esempio n. 2
0
def test_sver_wrapper2():
    r"""
    CommandLine:
        python -m vtool.sver_c_wrapper --test-test_sver_wrapper2
        python -m vtool.sver_c_wrapper --test-test_sver_wrapper2 --no-c --quiet
        python -m vtool.sver_c_wrapper --test-test_sver_wrapper2 --rebuild-sver

    Example:
        >>> # ENABLE_DOCTEST
        >>> from vtool.sver_c_wrapper import *  # NOQA
        >>> result = test_sver_wrapper2()
        >>> print(result)

    Ignore:
        C (Serial):
            unique cases affine inliers: [
                '[ 4 25 33 36 37 53]',
            ]
            unique cases homog inliers: [
                '[]',
            ]

        C (Parallel)
            unique cases affine inliers: [
                '[ 4 25 33 36 37 53]',
                '[10 19 25 29 36 39 53]',
            ]
            unique cases homog inliers: [
                '[10 43 53]',
                '[]',
            ]

        Python:
            unique cases affine inliers: [
                '[10 19 25 29 36 39 53]',
            ]
            unique cases homog inliers: [
                '[10 43 53]',
            ]
    """
    import vtool
    import vtool.tests.testdata_nondeterm_sver
    kpts1, kpts2, fm, xy_thresh, scale_thresh, ori_thresh, dlen_sqrd2, min_nInliers, match_weights, full_homog_checks = vtool.tests.testdata_nondeterm_sver.testdata_nondeterm_sver()
    inliers_list = []
    homog_inliers_list = []

    for x in range(10):
        sv_tup = vtool.spatially_verify_kpts(
            kpts1, kpts2, fm, xy_thresh, scale_thresh, ori_thresh,
            dlen_sqrd2, min_nInliers, match_weights=match_weights,
            full_homog_checks=full_homog_checks, returnAff=True)
        aff_inliers = sv_tup[3]
        inliers_list.append(str(aff_inliers))
        homog_inliers_list.append(str(sv_tup[0]))

        #print(sv_tup[0])
        #print(sv_tup[3])
    print('unique cases affine inliers: ' + ut.list_str(list(set(inliers_list))))
    print('unique cases homog inliers: ' + ut.list_str(list(set(homog_inliers_list))))
Esempio n. 3
0
def print_database_structure(cur):
    import utool as ut
    tablename_list = ut.get_tablenames(cur)
    colinfos_list = [ut.get_table_columninfo_list(cur, tablename) for tablename in tablename_list]
    numrows_list = [ut.get_table_num_rows(cur, tablename) for tablename in tablename_list]
    for tablename, colinfo_list, num_rows in ut.sortedby(list(zip(tablename_list, colinfos_list, numrows_list)), numrows_list):
        print('+-------------')
        print('tablename = %r' % (tablename,))
        print('num_rows = %r' % (num_rows,))
        #print(ut.list_str(colinfo_list))
        print(ut.list_str(ut.get_primary_columninfo(cur, tablename)))
        print(ut.list_str(ut.get_nonprimary_columninfo(cur, tablename)))
        print('+-------------')
Esempio n. 4
0
def show_function_usage(fname, funcname_list, dpath_list):
    # Check to see for function usage
    funcname_list = [r'\b%s\b' % (funcname.strip(),) for funcname in funcname_list if len(funcname) > 0]
    flagged_funcnames = []
    for funcname in funcname_list:
        found_filestr_list, found_lines_list, found_lxs_list = ut.grep([funcname], dpath_list=dpath_list)
        total = 0
        for lines in found_lines_list:
            total += len(lines)
        funcname_ = funcname.replace('\\b', '')
        print(funcname_ + ' ' + str(total))
        if total == 1:
            flagged_funcnames.append(funcname_)
        # See where external usage is
        isexternal_list = [fname == fname_ for fname_ in found_filestr_list]
        external_filestr_list = ut.compress(found_filestr_list, isexternal_list)
        external_lines_list = ut.compress(found_lines_list, isexternal_list)
        #external_lxs_list = ut.compress(found_lxs_list, isexternal_list)
        if len(external_filestr_list) == 0:
            print(' no external usage')
        else:
            for filename, lines in zip(external_filestr_list, external_lines_list):
                print(' * filename=%r' % (filename,))
                print(ut.list_str(lines))
            #print(ut.list_str(list(zip(external_filestr_list, external_lines_list))))
    print('----------')
    print('flagged:')
    print('\n'.join(flagged_funcnames))
Esempio n. 5
0
def parse_latex_comments_for_commmands():
    r"""
    CommandLine:
        python -m ibeis.scripts.gen_cand_expts --exec-parse_latex_comments_for_commmands

    Example:
        >>> # SCRIPT
        >>> from ibeis.scripts.gen_cand_expts import *  # NOQA
        >>> parse_latex_comments_for_commmands()
    """
    fname = ut.get_argval('--fname', type_=str, default='figdefexpt.tex')
    text = ut.read_from(ut.truepath('~/latex/crall-candidacy-2015/' + fname))
    #text = ut.read_from(ut.truepath('~/latex/crall-candidacy-2015/figdefindiv.tex'))
    lines = text.split('\n')
    cmd_list = ['']
    in_comment = True
    for line in lines:
        if line.startswith('% ---'):
            # Keep separators
            toadd = line.replace('%', '#')
            if not (len(cmd_list) > 1 and cmd_list[-1].startswith('# ---')):
                cmd_list[-1] += (toadd)
            else:
                cmd_list.append(toadd)
            cmd_list.append('')

        if line.strip().startswith(r'\begin{comment}'):
            in_comment = True
            continue
        if in_comment:
            line = line.strip()
            if line == '' or line.startswith('#') or line.startswith('%'):
                in_comment = False
            else:
                cmd_list[-1] = cmd_list[-1] + line
                if not line.strip().endswith('\\'):
                    cmd_list[-1] = cmd_list[-1] + ' $@'
                    #cmd_list.append('')
                    #cmd_list.append('#--')
                    cmd_list.append('')
                    in_comment = False
                else:
                    cmd_list[-1] = cmd_list[-1] + '\n'

    cmd_list = [cmd.replace('--render', '').replace('--diskshow', '') for cmd in cmd_list]

    # formatting
    cmd_list2 = []
    for cmd in cmd_list:
        #cmd = cmd.replace(' -t ', ' \\\n    -t ')
        #cmd = cmd.replace('--db', '\\\n    --db')
        #cmd = cmd.replace('python -m ibeis.dev', './dev.py')
        cmd = cmd.replace('python -m ibeis.dev -e', 'ibeis -e')
        cmd_list2.append(cmd)
    cmd_list = cmd_list2

    print('cmd_list = %s' % (ut.list_str(cmd_list),))
    from os.path import splitext
    script_fname =  'regen_' + splitext(fname)[0] + '.sh'
    fname, script, line_list = write_script_lines(cmd_list, script_fname)
Esempio n. 6
0
 def biggest_files(drive):
     print('Biggest Files in %r' % (drive,))
     sortx = ut.list_argsort(drive.fpath_bytes_list)[::-1]
     sel = sortx[0:10]
     biggest_nbytes = ut.take(drive.fpath_bytes_list, sel)
     biggest_files = ut.take(drive.fpath_list, sel)
     biginfo_list = list(zip(map(ut.byte_str2, biggest_nbytes), biggest_files))
     print(ut.list_str(biginfo_list, strvals=True))
Esempio n. 7
0
    def get_csv_results(gridsearch, max_lines=None, score_lbl='score_diff'):
        """
        Make csv text describing results

        Args:
            max_lines (int): add top num lines to the csv. No limit if None.
            score_lbl (str): score label to sort by

        Returns:
            str: result data in csv format

        CommandLine:
            python -m utool.util_gridsearch --test-get_csv_results

        Example:
            >>> # DISABLE_DOCTEST
            >>> from utool.util_gridsearch import *  # NOQA
            >>> import utool as ut
            >>> import plottool as pt
            >>> # build test data
            >>> score_lbl = 'score_diff'
            >>> gridsearch = testdata_grid_search()
            >>> csvtext = gridsearch.get_csv_results(10, score_lbl)
            >>> print(csvtext)
            >>> result = ut.hashstr(csvtext)
            >>> print(result)
            60yptleiwo@lk@24
        """
        import utool as ut
        collbl_tup = gridsearch.get_sorted_columns_and_labels(score_lbl)
        (score_name_sorted, param_name_sorted,
         score_list_sorted, param_vals_sorted) = collbl_tup

        # Build CSV
        column_lbls = score_name_sorted + param_name_sorted
        column_list = score_list_sorted + param_vals_sorted

        if max_lines is not None:
            column_list = [ut.listclip(col, max_lines) for col in column_list]
        header_raw_fmtstr = ut.codeblock(
            '''
            import utool as ut
            from utool import DimensionBasis
            title = 'Grid Search Results CSV'
            label = {label}
            grid_basis = {grid_basis_str}
            ''')
        fmtdict = dict(
            grid_basis_str=ut.list_str(gridsearch.grid_basis),
            label=gridsearch.label
        )
        header_raw = header_raw_fmtstr.format(**fmtdict)
        header     = ut.indent(header_raw, '# >>> ')
        precision = 3
        csvtext = ut.make_csv_table(column_list, column_lbls, header, precision=precision)
        return csvtext
Esempio n. 8
0
 def read_csv(csv_fpath):
     import utool as ut
     csv_text = ut.read_from(csv_fpath)
     csv_lines = csv_text.split('\n')
     print(ut.list_str(csv_lines[0:2]))
     csv_data = [[field.strip('"').strip('\r') for field in line.split(',')]
                 for line in csv_lines if len(line) > 0]
     csv_header = csv_data[0]
     csv_data = csv_data[1:]
     return csv_data, csv_header
Esempio n. 9
0
 def jagged_stats_info(arr_, lbl, col_lbls):
     arr = ut.recursive_replace(arr_, np.inf, np.nan)
     # Treat infinite as nan
     stat_dict = ut.get_jagged_stats(arr, use_nan=True, use_sum=True)
     sel_stat_dict, sel_indices = ut.find_interesting_stats(stat_dict, col_lbls)
     sel_col_lbls = ut.take(col_lbls, sel_indices)
     statstr_kw   = dict(precision=3, newlines=True, lbl=lbl, align=True)
     stat_str     = ut.get_stats_str(stat_dict=stat_dict, **statstr_kw)
     sel_stat_str = ut.get_stats_str(stat_dict=sel_stat_dict, **statstr_kw)
     sel_stat_str = 'sel_col_lbls = %s' % (ut.list_str(sel_col_lbls),) + '\n' + sel_stat_str
     return stat_str, sel_stat_str
Esempio n. 10
0
def autogen_argparse_block(extra_args=[]):
    """
    SHOULD TURN ANY REGISTERED ARGS INTO A A NEW PARSING CONFIG
    FILE FOR BETTER --help COMMANDS

    import utool as ut
    __REGISTERED_ARGS__ = ut.util_arg.__REGISTERED_ARGS__

    Args:
        extra_args (list): (default = [])

    CommandLine:
        python -m utool.util_arg --test-autogen_argparse_block

    Example:
        >>> # DISABLE_DOCTEST
        >>> import utool as ut
        >>> extra_args = []
        >>> result = ut.autogen_argparse_block(extra_args)
        >>> print(result)
    """
    #import utool as ut  # NOQA
    #__REGISTERED_ARGS__
    # TODO FINISHME

    grouped_args = []
    # Group similar a args
    for argtup in __REGISTERED_ARGS__:
        argstr_list, type_, default, help_ = argtup
        argstr_set = set(argstr_list)
        # <MULTIKEY_SETATTR>
        # hack in multikey setattr n**2 yuck
        found = False
        for index, (keyset, vals) in enumerate(grouped_args):
            if len(keyset.intersection(argstr_set)) > 0:
                # update
                keyset.update(argstr_set)
                vals.append(argtup)
                found = True
                break
        if not found:
            new_keyset = argstr_set
            new_vals = [argtup]
            grouped_args.append((new_keyset, new_vals))
        # </MULTIKEY_SETATTR>
    # DEBUG
    multi_groups = []
    for keyset, vals in grouped_args:
        if len(vals) > 1:
            multi_groups.append(vals)
    if len(multi_groups) > 0:
        import utool as ut
        print('Following arg was specified multiple times')
        print(ut.list_str(multi_groups, newlines=2))
Esempio n. 11
0
def autogen_argparse_block(extra_args=[]):
    """
    SHOULD TURN ANY REGISTERED ARGS INTO A A NEW PARSING CONFIG
    FILE FOR BETTER --help COMMANDS

    import utool as ut
    __REGISTERED_ARGS__ = ut.util_arg.__REGISTERED_ARGS__

    Args:
        extra_args (list): (default = [])

    CommandLine:
        python -m utool.util_arg --test-autogen_argparse_block

    Example:
        >>> # DISABLE_DOCTEST
        >>> import utool as ut
        >>> extra_args = []
        >>> result = ut.autogen_argparse_block(extra_args)
        >>> print(result)
    """
    #import utool as ut  # NOQA
    #__REGISTERED_ARGS__
    # TODO FINISHME

    grouped_args = []
    # Group similar a args
    for argtup in __REGISTERED_ARGS__:
        argstr_list, type_, default, help_ = argtup
        argstr_set = set(argstr_list)
        # <MULTIKEY_SETATTR>
        # hack in multikey setattr n**2 yuck
        found = False
        for index, (keyset, vals) in enumerate(grouped_args):
            if len(keyset.intersection(argstr_set)) > 0:
                # update
                keyset.update(argstr_set)
                vals.append(argtup)
                found = True
                break
        if not found:
            new_keyset = argstr_set
            new_vals = [argtup]
            grouped_args.append((new_keyset, new_vals))
        # </MULTIKEY_SETATTR>
    # DEBUG
    multi_groups = []
    for keyset, vals in grouped_args:
        if len(vals) > 1:
            multi_groups.append(vals)
    if len(multi_groups) > 0:
        import utool as ut
        print('Following arg was specified multiple times')
        print(ut.list_str(multi_groups, newlines=2))
Esempio n. 12
0
 def on_contextMenuRequested(widget, index, pos):
     print('context request')
     if widget.api is not None:
         print(ut.list_str(widget.api.get_available_colnames()))
         # HACK test
         #widget.api.add_column_names(['qx2_gt_rank', 'qx2_gf_rank', 'qx2_gt_raw_score', 'qx2_gf_raw_score'])
         widget.refresh_headers()
         #widget.change_headers(widget.api.make_headers())
     if VERBOSE_ITEM_WIDGET:
         print('context request')
     pass
Esempio n. 13
0
 def on_contextMenuRequested(widget, index, pos):
     print('context request')
     if widget.api is not None:
         print(ut.list_str(widget.api.get_available_colnames()))
         # HACK test
         #widget.api.add_column_names(['qx2_gt_rank', 'qx2_gf_rank', 'qx2_gt_raw_score', 'qx2_gf_raw_score'])
         widget.refresh_headers()
         #widget.change_headers(widget.api.make_headers())
     if VERBOSE_ITEM_WIDGET:
         print('context request')
     pass
Esempio n. 14
0
 def jagged_stats_info(arr_, lbl, col_lbls):
     arr = ut.recursive_replace(arr_, np.inf, np.nan)
     # Treat infinite as nan
     stat_dict = ut.get_jagged_stats(arr, use_nan=True, use_sum=True)
     sel_stat_dict, sel_indices = ut.find_interesting_stats(
         stat_dict, col_lbls)
     sel_col_lbls = ut.take(col_lbls, sel_indices)
     statstr_kw = dict(precision=3, newlines=True, lbl=lbl, align=True)
     stat_str = ut.get_stats_str(stat_dict=stat_dict, **statstr_kw)
     sel_stat_str = ut.get_stats_str(stat_dict=sel_stat_dict, **statstr_kw)
     sel_stat_str = 'sel_col_lbls = %s' % (
         ut.list_str(sel_col_lbls), ) + '\n' + sel_stat_str
     return stat_str, sel_stat_str
Esempio n. 15
0
    def biggest_dirs(drive):
        print('Biggest Dirs in %r' % (drive,))
        dpath_list = drive.dpath_list
        fidxs_list = ut.dict_take(drive.dpath_to_fidx, dpath_list)
        unflat_dpath_bytes_list = ut.list_unflat_take(drive.fpath_bytes_list, fidxs_list)
        dpath_nbytes_list = list(map(sum, unflat_dpath_bytes_list))

        sortx = ut.list_argsort(dpath_nbytes_list)[::-1]
        sel = sortx[0:10]
        biggest_nbytes = ut.take(dpath_nbytes_list, sel)
        biggest_dpaths = ut.take(dpath_list, sel)
        biginfo_list = list(zip(map(ut.byte_str2, biggest_nbytes), biggest_dpaths))
        print(ut.list_str(biginfo_list, strvals=True))
        pass
Esempio n. 16
0
def drive_test_script(ibs):
    r"""
    Test script where we drive around and take pictures of animals
    both in a given database and not in a given databse to make sure
    the system works.

    CommandLine:
        python -m ibeis.viz.viz_image --test-drive_test_script
        python -m ibeis.viz.viz_image --test-drive_test_script --db PZ_MTEST --show
        python -m ibeis.viz.viz_image --test-drive_test_script --db GIR_Tanya --show
        python -m ibeis.viz.viz_image --test-drive_test_script --db GIR_Master0 --show
        python -m ibeis.viz.viz_image --test-drive_test_script --db PZ_Master0 --show
        python -m ibeis.viz.viz_image --test-drive_test_script --db PZ_FlankHack --show

        python -m ibeis.viz.viz_image --test-drive_test_script --db PZ_FlankHack --show
        python -m ibeis.viz.viz_image --test-drive_test_script --dbdir /raid/work2/Turk/GIR_Master --show

    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis.viz.viz_image import *  # NOQA
        >>> import ibeis
        >>> ibs = ibeis.opendb()
        >>> drive_test_script(ibs)
    """
    import ibeis
    aid_list = ibeis.testdata_aids(a='default:pername=1')
    print('Running with (annot) aid_list = %r' % (aid_list))
    gid_list = ibs.get_annot_gids(aid_list)
    print('Running with (image) gid_list = %r' % (gid_list))
    avuuid_list = ibs.get_annot_visual_uuids(aid_list)
    guuid_list = ibs.get_image_uuids(gid_list)
    print('Running with annot_visual_uuid_list = %s' % (ut.list_str(zip(aid_list, avuuid_list))))
    print('Running with image_uuid_list = %s' % (ut.list_str(zip(gid_list, guuid_list))))
    for gid, aid in ut.ProgressIter(zip(gid_list, aid_list), lbl='progress '):
        print('\ngid, aid, nid = %r, %r, %r' % (gid, aid, ibs.get_annot_nids(aid),))
        show_image(ibs, gid, annote=False, rich_title=True)
        pt.show_if_requested()
Esempio n. 17
0
def filter_duplicate_acfgs(expanded_aids_list,
                           acfg_list,
                           acfg_name_list,
                           verbose=ut.NOT_QUIET):
    """
    Removes configs with the same expanded aids list

    CommandLine:
        # The following will trigger this function:
        ibeis -e print_acfg -a timectrl timectrl:view=left --db PZ_MTEST

    """
    from ibeis.expt import annotation_configs
    acfg_list_ = []
    expanded_aids_list_ = []
    seen_ = ut.ddict(list)
    for acfg, (qaids, daids) in zip(acfg_list, expanded_aids_list):
        key = (ut.hashstr_arr27(qaids,
                                'qaids'), ut.hashstr_arr27(daids, 'daids'))
        if key in seen_:
            seen_[key].append(acfg)
            continue
        else:
            seen_[key].append(acfg)
            expanded_aids_list_.append((qaids, daids))
            acfg_list_.append(acfg)
    if verbose:
        duplicate_configs = dict([(key_, val_) for key_, val_ in seen_.items()
                                  if len(val_) > 1])
        if len(duplicate_configs) > 0:
            print(
                'The following configs produced duplicate annnotation configs')
            for key, val in duplicate_configs.items():
                # Print the semantic difference between the duplicate configs
                _tup = annotation_configs.compress_acfg_list_for_printing(val)
                nonvaried_compressed_dict, varied_compressed_dict_list = _tup
                print('+--')
                print('key = %r' % (key, ))
                print('duplicate_varied_cfgs = %s' %
                      (ut.list_str(varied_compressed_dict_list), ))
                print('duplicate_nonvaried_cfgs = %s' %
                      (ut.dict_str(nonvaried_compressed_dict), ))
                print('L__')

        print('[harn.help] parsed %d / %d unique annot configs from: %r' %
              (len(acfg_list_), len(acfg_list), acfg_name_list))
    return expanded_aids_list_, acfg_list_
Esempio n. 18
0
def test_pygist():
    print('[pygist] Testing pygist')
    # Ensure you have test data
    print('[pygist] Ensuring testdata')
    datafile       = utool.grab_file_url(TEST_MODEL_URL, appname='utool')
    test_image_dir = utool.grab_zipped_url(TEST_IMAGES_URL, appname='utool')
    imgpaths       = utool.list_images(test_image_dir, fullpath=True)   # test image paths
    outdir = utool.get_app_resource_dir('pygist')  # where to put results
    # Run pygist on test images
    print('[pygist] Running tests')
    test_results = pygist.test(imgpaths, outdir=outdir, datafile=datafile)
    # Print results
    target_results = [-1, -1, 1, -1, 1, -1, -1, -1, 1, 1, -1, 1, 1]
    assert target_results == target_results, 'results do not match'
    print('test_results = %r' % (test_results,))
    print(utool.list_str(list(izip(imgpaths, test_results))))
    return locals()
Esempio n. 19
0
def test_pygist():
    print('[pygist] Testing pygist')
    # Ensure you have test data
    print('[pygist] Ensuring testdata')
    datafile = utool.grab_file_url(TEST_MODEL_URL, appname='utool')
    test_image_dir = utool.grab_zipped_url(TEST_IMAGES_URL, appname='utool')
    imgpaths = utool.list_images(test_image_dir,
                                 fullpath=True)  # test image paths
    outdir = utool.get_app_resource_dir('pygist')  # where to put results
    # Run pygist on test images
    print('[pygist] Running tests')
    test_results = pygist.test(imgpaths, outdir=outdir, datafile=datafile)
    # Print results
    target_results = [-1, -1, 1, -1, 1, -1, -1, -1, 1, 1, -1, 1, 1]
    assert target_results == target_results, 'results do not match'
    print('test_results = %r' % (test_results, ))
    print(utool.list_str(list(izip(imgpaths, test_results))))
    return locals()
Esempio n. 20
0
def draw_precision_recall_curve_(recall_range_, p_interp_curve, title_pref=None, fnum=1):
    import plottool as pt
    if recall_range_ is None:
        recall_range_ = np.array([])
        p_interp_curve = np.array([])
    fig = pt.figure(fnum=fnum, docla=True, doclf=True)  # NOQA

    if recall_range_ is None:
        ave_p = np.nan
    else:
        ave_p = p_interp_curve.sum() / p_interp_curve.size

    pt.plot2(recall_range_, p_interp_curve, marker='o--',
              x_label='recall', y_label='precision', unitbox=True,
              flipx=False, color='r',
              title='Interplated Precision Vs Recall\n' + 'avep = %r'  % ave_p)
    print('Interplated Precision')
    print(ut.list_str(list(zip(recall_range_, p_interp_curve))))
Esempio n. 21
0
def inspect_deck(deck):
    def get_card_tags(card, deck):
        tags = []
        stats = card.mana_source_stats(deck)
        if stats is not None:
            tags.append("land")
            if len(stats[1]) > 0:
                tags.append("tapland")
            else:
                tags.append("untapland")
        return tags

    # ------------
    print("len(deck) = %r" % (len(deck),))
    tags_list = [get_card_tags(card, deck) for card in deck.card_list]
    print("Deck Counts:")
    print(ut.repr2(ut.dict_hist(ut.flatten(tags_list)), nl=True))

    hand = deck.sample_hand()
    manastats_list = [card.mana_source_stats(deck) for card in hand]
    print(ut.list_str([card.name + ": " + text_type(stats) for card, stats in zip(hand, manastats_list)]))
    tags_list = [get_card_tags(card, deck) for card in hand]
    print("Hand Counts")
    print(ut.repr2(ut.dict_hist(ut.flatten(tags_list)), nl=True))

    valid_tags = ["land", "tapland", "untapland"]
    x = {tag: [] for tag in valid_tags}

    for _ in range(500):
        hand = deck.sample_hand()
        tags_list = [get_card_tags(card, deck) for card in hand]
        taghist = ut.dict_hist(ut.flatten(tags_list))
        for key, val in x.items():
            val.append(taghist.get(key, 0))

    print("Monte Stats:")
    for key, val in list(x.items()):
        print("%15s: %s" % (key, ut.repr2(ut.get_stats(val), precision=2)))

    def hand_stats():
        # [card.types for card in hand]
        # [card.rrr() for card in hand]
        [card.mana_source_stats(deck) for card in hand]
        card.types
Esempio n. 22
0
def filter_duplicate_acfgs(expanded_aids_list, acfg_list, acfg_name_list, verbose=ut.NOT_QUIET):
    """
    Removes configs with the same expanded aids list

    CommandLine:
        # The following will trigger this function:
        ibeis -e print_acfg -a timectrl timectrl:view=left --db PZ_MTEST

    """
    from ibeis.expt import annotation_configs
    acfg_list_ = []
    expanded_aids_list_ = []
    seen_ = ut.ddict(list)
    for acfg, (qaids, daids) in zip(acfg_list, expanded_aids_list):
        key = (ut.hashstr_arr27(qaids, 'qaids'), ut.hashstr_arr27(daids, 'daids'))
        if key in seen_:
            seen_[key].append(acfg)
            continue
        else:
            seen_[key].append(acfg)
            expanded_aids_list_.append((qaids, daids))
            acfg_list_.append(acfg)
    if verbose:
        duplicate_configs = dict(
            [(key_, val_) for key_, val_ in seen_.items() if len(val_) > 1])
        if len(duplicate_configs) > 0:
            print('The following configs produced duplicate annnotation configs')
            for key, val in duplicate_configs.items():
                # Print the semantic difference between the duplicate configs
                _tup = annotation_configs.compress_acfg_list_for_printing(val)
                nonvaried_compressed_dict, varied_compressed_dict_list = _tup
                print('+--')
                print('key = %r' % (key,))
                print('duplicate_varied_cfgs = %s' % (
                    ut.list_str(varied_compressed_dict_list),))
                print('duplicate_nonvaried_cfgs = %s' % (
                    ut.dict_str(nonvaried_compressed_dict),))
                print('L__')

        print('[harn.help] parsed %d / %d unique annot configs from: %r' % (
            len(acfg_list_), len(acfg_list), acfg_name_list))
    return expanded_aids_list_, acfg_list_
Esempio n. 23
0
def makeinit(module_path, exclude_modnames=[]):
    #module_name = basename(module_path)
    module_name = ut.get_modname_from_modpath(module_path)
    IMPORT_TUPLES = util_importer.make_import_tuples(module_path, exclude_modnames=exclude_modnames)
    initstr = util_importer.make_initstr(module_name, IMPORT_TUPLES)
    regen_command = 'cd %s\n' % (module_path)
    regen_command += '    makeinit.py'
    if len(exclude_modnames ) > 0:
        regen_command += ' -x ' + ' '.join(exclude_modnames)
    regen_block = (ut.codeblock('''
    """
    Regen Command:
        {regen_command}
    """
    ''').format(regen_command=regen_command))

    print('### __init__.py ###')
    print(initstr)
    print('\nIMPORT_TUPLES = ' + ut.list_str(IMPORT_TUPLES))
    print(regen_block)
Esempio n. 24
0
def precfg_dbs(db_list):
    r"""
    Helper to precompute information Runs precfg on multiple databases

    Args:
        db_list (list):

    CommandLine:
        python -m ibeis.expt.precomputer --exec-precfg_dbs
        python -m ibeis.expt.precomputer --exec-precfg_dbs --dblist testdb1 PZ_MTEST
        python -m ibeis.expt.precomputer --exec-precfg_dbs --dblist testdb1 PZ_MTEST --preload -t custom
        python -m ibeis.expt.precomputer --exec-precfg_dbs --dblist=PZ_MTEST,NNP_MasterGIRM_core,PZ_Master0,NNP_Master3,GZ_ALL,PZ_FlankHack --preload --delete-nn-cache

        #python -m ibeis.expt.precomputer --exec-precfg_dbs --dblist=PZ_Master0 -t candidacy1 --preload-chip --controlled --species=primary
        python -m ibeis.expt.precomputer --exec-precfg_dbs --dblist=candidacy --preload

        python -m ibeis.expt.precomputer --exec-precfg_dbs --dblist=candidacy -t candidacy --preload-chip --species=primary --controlled
        python -m ibeis.expt.precomputer --exec-precfg_dbs --dblist=candidacy -t candidacy --preload-chip --species=primary --allgt
        python -m ibeis.expt.precomputer --exec-precfg_dbs --dblist=candidacy -t candidacy --preload-feat
        python -m ibeis.expt.precomputer --exec-precfg_dbs --dblist=candidacy -t candidacy --preload-featweight
        python -m ibeis.expt.precomputer --exec-precfg_dbs --dblist=candidacy -t candidacy --preload
        python -m ibeis.expt.precomputer --exec-precfg_dbs --dblist=candidacy --delete-nn-cache

    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis.expt.precomputer import *  # NOQA
        >>> db_list = ut.get_argval('--dblist', type_=list, default=['testdb1'])
        >>> result = precfg_dbs(db_list)
        >>> print(result)
    """
    import ibeis.init.main_helpers
    import ibeis
    if db_list == ['candidacy']:
        from ibeis.expt import experiment_configs
        db_list = experiment_configs.get_candidacy_dbnames()  # HACK
    print('db_list = %s' % (ut.list_str(db_list),))
    test_cfg_name_list = ut.get_argval('-t', type_=list, default=[])
    for db in db_list:
        ibs = ibeis.opendb(db=db)
        ibs, qaids, daids = ibeis.init.main_helpers.testdata_expanded_aids(verbose=False, ibs=ibs)
        precfg(ibs, qaids, daids, test_cfg_name_list)
Esempio n. 25
0
def get_text_between_lines(lnum1, lnum2, col1=0, col2=sys.maxint - 1):
    import vim
    lines = vim.eval('getline({}, {})'.format(lnum1, lnum2))
    import utool as ut
    lines = ut.ensure_unicode_strlist(lines)
    try:
        if len(lines) == 0:
            pass
        elif len(lines) == 1:
            lines[0] = lines[0][col1:col2 + 1]
        else:
            lines[0] = lines[0][col1:]
            lines[-1] = lines[-1][:col2 + 1]
        text = '\n'.join(lines)
    except Exception:
        import utool as ut
        ut.ENABLE_COLORS = False
        ut.util_str.ENABLE_COLORS = False
        ut.util_dbg.COLORED_EXCEPTIONS = False
        print(ut.list_str(lines))
        raise
    return text
Esempio n. 26
0
def print_system_users():
    r"""

    prints users on the system

    On unix looks for /bin/bash users in /etc/passwd

    CommandLine:
        python -m utool.util_cplat --test-print_system_users

    Example:
        >>> # SCRIPT
        >>> from utool.util_cplat import *  # NOQA
        >>> result = print_system_users()
        >>> print(result)
    """
    import utool as ut
    text = ut.read_from('/etc/passwd')
    userinfo_text_list = text.splitlines()
    userinfo_list = [uitext.split(':') for uitext in userinfo_text_list]
    #print(ut.list_str(sorted(userinfo_list)))
    bash_users = [tup for tup in userinfo_list if tup[-1] == '/bin/bash']
    print(ut.list_str(sorted(bash_users)))
Esempio n. 27
0
def make_individual_latex_figures(ibs, fpaths_list, flat_case_labels,
                                  cfgx2_shortlbl, case_figdir,
                                  analysis_fpath_list):
    # HACK MAKE LATEX CONVINENCE STUFF
    #print('LATEX HACK')
    if len(fpaths_list) == 0:
        print('nothing to render')
        return
    RENDER = ut.get_argflag('--render')
    DUMP_FIGDEF = ut.get_argflag(('--figdump', '--dump-figdef', '--figdef'))

    if not (DUMP_FIGDEF or RENDER):  # HACK
        return

    latex_code_blocks = []
    latex_block_keys = []

    caption_prefix = ut.get_argval('--cappref', type_=str, default='')
    caption_suffix = ut.get_argval('--capsuf', type_=str, default='')
    cmdaug = ut.get_argval('--cmdaug', type_=str, default='custom')

    selected = None

    for case_idx, (fpaths, labels) in enumerate(zip(fpaths_list, flat_case_labels)):
        if labels is None:
            labels = [cmdaug]
        if len(fpaths) < 4:
            nCols = len(fpaths)
        else:
            nCols = 2

        _cmdname = ibs.get_dbname() + ' Case ' + ' '.join(labels) + '_' + str(case_idx)
        #print('_cmdname = %r' % (_cmdname,))
        cmdname = ut.latex_sanitize_command_name(_cmdname)
        label_str = cmdname
        if len(caption_prefix) == 0:
            caption_str = ut.escape_latex('Casetags: ' +
                                          ut.list_str(labels, nl=False, strvals=True) +
                                          ', db=' + ibs.get_dbname() + '. ')
        else:
            caption_str = ''

        use_sublbls = len(cfgx2_shortlbl) > 1
        if use_sublbls:
            caption_str += ut.escape_latex('Each figure shows a different configuration: ')
            sublbls = ['(' + chr(97 + count) + ') ' for count in range(len(cfgx2_shortlbl))]
        else:
            #caption_str += ut.escape_latex('This figure depicts correct and
            #incorrect matches from configuration: ')
            sublbls = [''] * len(cfgx2_shortlbl)
        def wrap_tt(text):
            return r'{\tt ' + text + '}'
        _shortlbls = cfgx2_shortlbl
        _shortlbls = list(map(ut.escape_latex, _shortlbls))
        # Adjust spacing for breaks
        #tex_small_space = r''
        tex_small_space = r'\hspace{0pt}'
        # Remove query specific config flags in individual results
        _shortlbls = [re.sub('\\bq[^,]*,?', '', shortlbl) for shortlbl in _shortlbls]
        # Let config strings be broken over newlines
        _shortlbls = [re.sub('\\+', tex_small_space + '+' + tex_small_space, shortlbl)
                      for shortlbl in _shortlbls]
        _shortlbls = [re.sub(', *', ',' + tex_small_space, shortlbl)
                      for shortlbl in _shortlbls]
        _shortlbls = list(map(wrap_tt, _shortlbls))
        cfgx2_texshortlbl = ['\n    ' + lbl + shortlbl
                             for lbl, shortlbl in zip(sublbls, _shortlbls)]

        caption_str += ut.conj_phrase(cfgx2_texshortlbl, 'and') + '.\n    '
        caption_str = '\n    ' + caption_prefix + caption_str + caption_suffix
        caption_str = caption_str.rstrip()
        figure_str  = ut.get_latex_figure_str(fpaths,
                                                nCols=nCols,
                                                label_str=label_str,
                                                caption_str=caption_str,
                                                use_sublbls=None,
                                                use_frame=True)
        latex_block = ut.latex_newcommand(cmdname, figure_str)
        latex_block = '\n%----------\n' + latex_block
        latex_code_blocks.append(latex_block)
        latex_block_keys.append(cmdname)

    # HACK
    remove_fpath = ut.truepath('~/latex/crall-candidacy-2015') + '/'

    latex_fpath = join(case_figdir, 'latex_cases.tex')

    if selected is not None:
        selected_keys = selected
    else:
        selected_keys = latex_block_keys

    selected_blocks = ut.dict_take(dict(zip(latex_block_keys, latex_code_blocks)), selected_keys)

    figdef_block = '\n'.join(selected_blocks)
    figcmd_block = '\n'.join(['\\' + key for key in latex_block_keys])

    selected_block = figdef_block + '\n\n' + figcmd_block

    # HACK: need full paths to render
    selected_block_renderable = selected_block
    selected_block = selected_block.replace(remove_fpath, '')
    if RENDER:
        ut.render_latex_text(selected_block_renderable)

    if DUMP_FIGDEF:
        ut.writeto(latex_fpath, selected_block)

    #if NOT DUMP AND NOT RENDER:
    #    print('STANDARD LATEX RESULTS')
    #    cmdname = ibs.get_dbname() + 'Results'
    #    latex_block  = ut.get_latex_figure_str2(analysis_fpath_list, cmdname, nCols=1)
    #    ut.print_code(latex_block, 'latex')
    if DUMP_FIGDEF or RENDER:
        ut.print_code(selected_block, 'latex')
Esempio n. 28
0
def makeinit(mod_dpath, exclude_modnames=[], use_star=False):
    r"""
    Args:
        mod_dpath (str):
        exclude_modnames (list): (default = [])
        use_star (bool): (default = False)

    Returns:
        str: init_codeblock

    CommandLine:
        python -m utool.util_autogen makeinit --modname=ibeis.algo

    Example:
        >>> # SCRIPT
        >>> from utool.util_autogen import *  # NOQA
        >>> import utool as ut
        >>> modname = ut.get_argval('--modname', str, default=None)
        >>> mod_dpath = (os.getcwd() if modname is None else
        >>>              ut.get_modpath(modname, prefer_pkg=True))
        >>> mod_dpath = ut.unixpath(mod_dpath)
        >>> mod_fpath = join(mod_dpath, '__init__.py')
        >>> exclude_modnames = ut.get_argval(('--exclude', '-x'), list, default=[])
        >>> use_star = ut.get_argflag('--star')
        >>> init_codeblock = makeinit(mod_dpath, exclude_modnames, use_star)
        >>> ut.dump_autogen_code(mod_fpath, init_codeblock)
    """
    from utool._internal import util_importer
    import utool as ut
    module_name = ut.get_modname_from_modpath(mod_dpath)
    IMPORT_TUPLES = util_importer.make_import_tuples(mod_dpath, exclude_modnames=exclude_modnames)
    initstr = util_importer.make_initstr(module_name, IMPORT_TUPLES)
    regen_command = 'cd %s\n' % (mod_dpath)
    regen_command += '    makeinit.py'
    regen_command += ' --modname={modname}'.format(modname=module_name)
    if use_star:
        regen_command += ' --star'
    if len(exclude_modnames ) > 0:
        regen_command += ' -x ' + ' '.join(exclude_modnames)

    regen_block = (ut.codeblock('''
    """
    Regen Command:
        {regen_command}
    """
    ''').format(regen_command=regen_command))

    importstar_codeblock = ut.codeblock(
        '''
        """
        python -c "import {module_name}" --dump-{module_name}-init
        python -c "import {module_name}" --update-{module_name}-init
        """
        __DYNAMIC__ = True
        if __DYNAMIC__:
            # TODO: import all utool external prereqs. Then the imports will not import
            # anything that has already in a toplevel namespace
            # COMMENTED OUT FOR FROZEN __INIT__
            # Dynamically import listed util libraries and their members.
            from utool._internal import util_importer
            # FIXME: this might actually work with rrrr, but things arent being
            # reimported because they are already in the modules list
            import_execstr = util_importer.dynamic_import(__name__, IMPORT_TUPLES)
            exec(import_execstr)
            DOELSE = False
        else:
            # Do the nonexec import (can force it to happen no matter what if alwyas set
            # to True)
            DOELSE = True

        if DOELSE:
            # <AUTOGEN_INIT>
            pass
            # </AUTOGEN_INIT>
        '''.format(module_name=module_name)
    )

    ts_line = '# Autogenerated on {ts}'.format(ts=ut.get_timestamp('printable'))

    init_codeblock_list = ['# -*- coding: utf-8 -*-', ts_line]
    init_codeblock_list.append(initstr)
    init_codeblock_list.append('\nIMPORT_TUPLES = ' + ut.list_str(IMPORT_TUPLES))
    if use_star:
        init_codeblock_list.append(importstar_codeblock)
    init_codeblock_list.append(regen_block)

    init_codeblock = '\n'.join(init_codeblock_list)
    return init_codeblock
Esempio n. 29
0
def fix_conference_title_names(clean_text, key_list=None):
    """
    mass bibtex fixes

    CommandLine:
        ./fix_bib.py
    """

    # Find citations from the tex documents
    if key_list is None:
        key_list = find_used_citations(testdata_fpaths())
        key_list = list(set(key_list))
        ignore = ['JP', '?']
        for item in ignore:
            try:
                key_list.remove(item)
            except ValueError:
                pass

    unknown_confkeys = []

    conference_keys = [
        'journal',
        'booktitle',
    ]

    ignore_confkey = []

    bib_database = bibtexparser.loads(clean_text)

    bibtex_dict = bib_database.get_entry_dict()

    isect = set(ignore_confkey).intersection(
        set(constants_tex_fixes.CONFERENCE_TITLE_MAPS.keys()))
    assert len(isect) == 0, repr(isect)

    #ut.embed()
    #conftitle_to_types_hist = ut.ddict(list)

    type_key = 'ENTRYTYPE'

    debug_author = ut.get_argval('--debug-author', type_=str, default=None)
    # ./fix_bib.py --debug_author=Kappes

    for key in bibtex_dict.keys():
        entry = bibtex_dict[key]

        if debug_author is not None:
            debug = debug_author in entry.get('author', '')
        else:
            debug = False

        if debug:
            print(' --- ENTRY ---')
            print(ut.repr3(entry))

        #if type_key not in entry:
        #    #entry[type_key] = entry['ENTRYTYPE']
        #    ut.embed()

        # Clip abstrat
        if 'abstract' in entry:
            entry['abstract'] = ' '.join(entry['abstract'].split(' ')[0:7])

        # Remove Keys
        remove_keys = [
            'note',
            'urldate',
            'series',
            'publisher',
            'isbn',
            'editor',
            'shorttitle',
            'copyright',
            'language',
            'month',
            # These will be put back in
            #'number',
            #'pages',
            #'volume',
        ]
        entry = ut.delete_dict_keys(entry, remove_keys)

        # Fix conference names
        confkeys = list(set(entry.keys()).intersection(set(conference_keys)))
        #entry = ut.delete_dict_keys(entry, ['abstract'])
        # TODO: FIX THESE IF NEEDBE
        #if len(confkeys) == 0:
        #    print(ut.dict_str(entry))
        #    print(entry.keys())
        if len(confkeys) == 1:
            confkey = confkeys[0]
            old_confval = entry[confkey]
            # Remove curly braces
            old_confval = old_confval.replace('{', '').replace('}', '')
            if old_confval in ignore_confkey:
                print(ut.dict_str(entry))
                continue

            new_confval_candiates = []
            if old_confval.startswith('arXiv'):
                continue

            # for conf_title, patterns in constants_tex_fixes.CONFERENCE_TITLE_MAPS.items():
            for conf in constants_tex_fixes.CONFERENCES:
                if conf.matches(old_confval):
                    conf_title = conf.accro()
                    if debug:
                        print('old_confval = %r' % (old_confval, ))
                        print('conf_title = %r' % (conf_title, ))
                    new_confval = conf_title
                    new_confval_candiates.append(new_confval)

            if len(new_confval_candiates) == 0:
                new_confval = None
            elif len(new_confval_candiates) == 1:
                new_confval = new_confval_candiates[0]
            else:
                assert False, 'double match'

            if new_confval is None:
                if key in key_list:
                    unknown_confkeys.append(old_confval)
                #print(old_confval)
            else:
                # Overwrite old confval
                entry[confkey] = new_confval

            # Record info about types of conferneces
            true_confval = entry[confkey].replace('{', '').replace('}', '')

            # FIX ENTRIES THAT SHOULD BE CONFERENCES
            if true_confval in constants_tex_fixes.CONFERENCE_LIST:
                if entry[type_key] == 'inproceedings':
                    pass
                    #print(confkey)
                    #print(ut.dict_str(entry))
                elif entry[type_key] == 'article':
                    entry['booktitle'] = entry['journal']
                    del entry['journal']
                    #print(ut.dict_str(entry))
                elif entry[type_key] == 'incollection':
                    pass
                else:
                    raise AssertionError('UNKNOWN TYPE: %r' %
                                         (entry[type_key], ))

                if 'booktitle' not in entry:
                    print('DOES NOT HAVE CORRECT CONFERENCE KEY')
                    print(ut.dict_str(entry))

                assert 'journal' not in entry, 'should not have journal'

                #print(entry['type'])
                entry[type_key] = 'inproceedings'

            # FIX ENTRIES THAT SHOULD BE JOURNALS
            if true_confval in constants_tex_fixes.JOURNAL_LIST:

                if entry[type_key] == 'article':
                    pass
                elif entry[type_key] == 'inproceedings':
                    pass
                    #print(ut.dict_str(entry))
                elif entry[type_key] == 'incollection':
                    pass
                else:
                    raise AssertionError('UNKNOWN TYPE: %r' %
                                         (entry['type'], ))

                if 'journal' not in entry:
                    print('DOES NOT HAVE CORRECT CONFERENCE KEY')
                    print(ut.dict_str(entry))

                assert 'booktitle' not in entry, 'should not have booktitle'
                #print(entry['type'])
                #entry['type'] = 'article'

            #conftitle_to_types_hist[true_confval].append(entry['type'])

        elif len(confkeys) > 1:
            raise AssertionError('more than one confkey=%r' % (confkeys, ))

        # Fix Authors
        if 'author' in entry:
            authors = six.text_type(entry['author'])
            for truename, alias_list in constants_tex_fixes.AUTHOR_NAME_MAPS.items(
            ):
                pattern = six.text_type(
                    ut.regex_or([
                        ut.util_regex.whole_word(alias) for alias in alias_list
                    ]))
                authors = re.sub(pattern,
                                 six.text_type(truename),
                                 authors,
                                 flags=re.UNICODE)
            entry['author'] = authors
    """
    article = journal
    inprocedings = converence paper

    """

    #conftitle_to_types_set_hist = {key: set(val) for key, val in conftitle_to_types_hist.items()}
    #print(ut.dict_str(conftitle_to_types_set_hist))

    print(ut.list_str(sorted(unknown_confkeys)))
    print('len(unknown_confkeys) = %r' % (len(unknown_confkeys), ))

    writer = BibTexWriter()
    writer.contents = ['comments', 'entries']
    writer.indent = '  '
    writer.order_entries_by = ('type', 'author', 'year')

    new_bibtex_str = bibtexparser.dumps(bib_database, writer)
    return new_bibtex_str
Esempio n. 30
0
def print_acfg_list(acfg_list,
                    expanded_aids_list=None,
                    ibs=None,
                    combined=False,
                    **kwargs):
    r"""
    Args:
        acfg_list (list):
        expanded_aids_list (list): (default = None)
        ibs (IBEISController):  ibeis controller object(default = None)
        combined (bool): (default = False)

    CommandLine:
        python -m ibeis.expt.annotation_configs --exec-print_acfg_list --show

    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis.expt.annotation_configs import *  # NOQA
        >>> import ibeis
        >>> acfg_list = '?'
        >>> expanded_aids_list = None
        >>> ibs = None
        >>> combined = False
        >>> result = print_acfg_list(acfg_list, expanded_aids_list, ibs, combined)
        >>> print(result)
        >>> ut.quit_if_noshow()
        >>> import plottool as pt
        >>> ut.show_if_requested()
    """
    _tup = compress_acfg_list_for_printing(acfg_list)
    nonvaried_compressed_dict, varied_compressed_dict_list = _tup

    ut.colorprint('+=== <Info acfg_list> ===', 'white')
    #print('Printing acfg_list info. len(acfg_list) = %r' % (len(acfg_list),))
    print('non-varied aidcfg = ' + ut.dict_str(nonvaried_compressed_dict))
    seen_ = ut.ddict(list)

    # get default kwkeys for annot info
    if ibs is not None:
        annotstats_kw = kwargs.copy()
        kwkeys = ut.parse_func_kwarg_keys(ibs.get_annot_stats_dict)
        annotstats_kw.update(
            ut.argparse_dict(dict(zip(kwkeys, [None] * len(kwkeys))),
                             only_specified=True))

    hashid_list = []
    for acfgx in range(len(acfg_list)):
        acfg = acfg_list[acfgx]
        title = ('q_cfgname=' + acfg['qcfg']['_cfgname'] + ' d_cfgname=' +
                 acfg['dcfg']['_cfgname'])

        ut.colorprint(
            '+--- acfg %d / %d -- %s ---- ' %
            (acfgx + 1, len(acfg_list), title), 'lightgray')
        print('acfg = ' +
              ut.dict_str(varied_compressed_dict_list[acfgx], strvals=True))

        if expanded_aids_list is not None:
            qaids, daids = expanded_aids_list[acfgx]
            key = (ut.hashstr_arr27(qaids,
                                    'qaids'), ut.hashstr_arr27(daids, 'daids'))
            if key not in seen_:
                if ibs is not None:
                    seen_[key].append(acfgx)
                    stats_, locals_ = ibs.get_annotconfig_stats(
                        qaids,
                        daids,
                        verbose=False,
                        combined=combined,
                        **annotstats_kw)
                    hashids = (stats_['qaid_stats']['qhashid'],
                               stats_['daid_stats']['dhashid'])
                    hashid_list.append(hashids)
                    stats_str2 = ut.dict_str(stats_,
                                             strvals=True,
                                             newlines=True,
                                             explicit=False,
                                             nobraces=False)
                    print('annot_config_stats = ' + stats_str2)
            else:
                dupindex = seen_[key]
                print('DUPLICATE of index %r' % (dupindex, ))
                dupdict = varied_compressed_dict_list[dupindex[0]]
                print('DUP OF acfg = ' + ut.dict_str(dupdict, strvals=True))
    print('hashid summary = ' + ut.list_str(hashid_list, nl=1))
    ut.colorprint('L___ </Info acfg_list> ___', 'white')
Esempio n. 31
0
def list_distinctivness_cache():
    global_distinctdir = sysres.get_global_distinctiveness_modeldir()
    print(ut.list_str(ut.ls(global_distinctdir)))
Esempio n. 32
0
def inject_instance(self, classtype=None, allow_override=False,
                    verbose=VERBOSE_CLASS, strict=True):
    """
    Injects an instance (self) of type (classtype)
    with all functions registered to (classtype)

    call this in the __init__ class function

    Args:
        self: the class instance
        classtype: key for a class, preferably the class type itself, but it
            doesnt have to be

    SeeAlso:
        make_class_method_decorator

    Example:
        >>> # DOCTEST_DISABLE
        >>> utool.make_class_method_decorator(InvertedIndex)(smk_debug.invindex_dbgstr)
        >>> utool.inject_instance(invindex)
    """
    import utool as ut
    if verbose:
        print('[util_class] begin inject_instance')
    try:
        if classtype is None:
            # Probably should depricate this block of code
            # It tries to do too much
            classtype = self.__class__
            if classtype == 'ibeis.gui.models_and_views.IBEISTableView':
                # HACK HACK HACK
                from guitool.__PYQT__ import QtGui
                classtype = QtGui.QAbstractItemView
            if len(__CLASSTYPE_ATTRIBUTES__[classtype]) == 0:
                print('[utool] Warning: no classes of type %r are registered' % (classtype,))
                print('[utool] type(self)=%r, self=%r' % (type(self), self)),
                print('[utool] Checking to see if anybody else was registered...')
                print('[utool] __CLASSTYPE_ATTRIBUTES__ = ' + ut.list_str(__CLASSTYPE_ATTRIBUTES__.keys()))
                for classtype_, _ in six.iteritems(__CLASSTYPE_ATTRIBUTES__):
                    isinstance(self, classtype_)
                    classtype = classtype_
                    print('[utool] Warning: using subclass=%r' % (classtype_,))
                    break
        func_list = __CLASSTYPE_ATTRIBUTES__[classtype]
        if verbose or util_arg.VERBOSE:
            print('[util_class] injecting %d methods\n   with classtype=%r\n   into %r' % (len(func_list), classtype, self,))
        for func in func_list:
            if VERBOSE_CLASS:
                print('[util_class] * injecting %r' % (func,))
            method_name = None
            # Allow user to register tuples for aliases
            if isinstance(func, tuple):
                func, method_name = func
            inject_func_as_method(self, func, method_name=method_name, allow_override=allow_override)
        if verbose:
            print('[util_class] Running postinject functions on %r' % (self,))
        for func in __CLASSTYPE_POSTINJECT_FUNCS__[classtype]:
            func(self)
        if verbose:
            print('[util_class] Finished injecting instance self=%r' % (self,))
    except Exception as ex:
        ut.printex(ex, 'ISSUE WHEN INJECTING %r' % (classtype,),
                      iswarning=not strict)
        if strict:
            raise
Esempio n. 33
0
def test_sver_wrapper2():
    r"""
    CommandLine:
        python -m vtool.sver_c_wrapper --test-test_sver_wrapper2
        python -m vtool.sver_c_wrapper --test-test_sver_wrapper2 --no-c --quiet
        python -m vtool.sver_c_wrapper --test-test_sver_wrapper2 --rebuild-sver

    Example:
        >>> # ENABLE_DOCTEST
        >>> from vtool.sver_c_wrapper import *  # NOQA
        >>> result = test_sver_wrapper2()
        >>> print(result)

    Ignore:
        C (Serial):
            unique cases affine inliers: [
                '[ 4 25 33 36 37 53]',
            ]
            unique cases homog inliers: [
                '[]',
            ]

        C (Parallel)
            unique cases affine inliers: [
                '[ 4 25 33 36 37 53]',
                '[10 19 25 29 36 39 53]',
            ]
            unique cases homog inliers: [
                '[10 43 53]',
                '[]',
            ]

        Python:
            unique cases affine inliers: [
                '[10 19 25 29 36 39 53]',
            ]
            unique cases homog inliers: [
                '[10 43 53]',
            ]
    """
    import vtool
    import vtool.tests.testdata_nondeterm_sver
    kpts1, kpts2, fm, xy_thresh, scale_thresh, ori_thresh, dlen_sqrd2, min_nInliers, match_weights, full_homog_checks = vtool.tests.testdata_nondeterm_sver.testdata_nondeterm_sver(
    )
    inliers_list = []
    homog_inliers_list = []

    for x in range(10):
        sv_tup = vtool.spatially_verify_kpts(
            kpts1,
            kpts2,
            fm,
            xy_thresh,
            scale_thresh,
            ori_thresh,
            dlen_sqrd2,
            min_nInliers,
            match_weights=match_weights,
            full_homog_checks=full_homog_checks,
            returnAff=True)
        aff_inliers = sv_tup[3]
        inliers_list.append(str(aff_inliers))
        homog_inliers_list.append(str(sv_tup[0]))

        #print(sv_tup[0])
        #print(sv_tup[3])
    print('unique cases affine inliers: ' +
          ut.list_str(list(set(inliers_list))))
    print('unique cases homog inliers: ' +
          ut.list_str(list(set(homog_inliers_list))))
Esempio n. 34
0
def augment_nnindexer_experiment():
    """

    References:
        http://answers.opencv.org/question/44592/flann-index-training-fails-with-segfault/

    CommandLine:
        utprof.py -m ibeis.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment
        python -m ibeis.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment

        python -m ibeis.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment --db PZ_MTEST --diskshow --adjust=.1 --save "augment_experiment_{db}.png" --dpath='.' --dpi=180 --figsize=9,6
        python -m ibeis.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment --db PZ_Master0 --diskshow --adjust=.1 --save "augment_experiment_{db}.png" --dpath='.' --dpi=180 --figsize=9,6 --nosave-flann --show
        python -m ibeis.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment --db PZ_Master0 --diskshow --adjust=.1 --save "augment_experiment_{db}.png" --dpath='.' --dpi=180 --figsize=9,6 --nosave-flann --show


        python -m ibeis.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment --db PZ_Master0 --diskshow --adjust=.1 --save "augment_experiment_{db}.png" --dpath='.' --dpi=180 --figsize=9,6 --nosave-flann --no-api-cache --nocache-uuids

        python -m ibeis.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment --db PZ_MTEST --show
        python -m ibeis.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment --db PZ_Master0 --show

        # RUNS THE SEGFAULTING CASE
        python -m ibeis.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment --db PZ_Master0 --show
        # Debug it
        gdb python
        run -m ibeis.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment --db PZ_Master0 --show
        gdb python
        run -m ibeis.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment --db PZ_Master0 --diskshow --adjust=.1 --save "augment_experiment_{db}.png" --dpath='.' --dpi=180 --figsize=9,6


    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis.algo.hots._neighbor_experiment import *  # NOQA
        >>> # execute function
        >>> augment_nnindexer_experiment()
        >>> # verify results
        >>> ut.show_if_requested()

    """
    import ibeis
    # build test data
    #ibs = ibeis.opendb('PZ_MTEST')
    ibs = ibeis.opendb(defaultdb='PZ_Master0')
    if ibs.get_dbname() == 'PZ_MTEST':
        initial = 1
        addition_stride = 4
        max_ceiling = 100
    elif ibs.get_dbname() == 'PZ_Master0':
        initial = 128
        #addition_stride = 64
        #addition_stride = 128
        addition_stride = 256
        max_ceiling = 10000
        #max_ceiling = 4000
        #max_ceiling = 2000
        #max_ceiling = 600
    else:
        assert False
    all_daids = ibs.get_valid_aids(species='zebra_plains')
    qreq_ = ibs.new_query_request(all_daids, all_daids)
    max_num = min(max_ceiling, len(all_daids))

    # Clear Caches
    ibs.delete_flann_cachedir()
    neighbor_index_cache.clear_memcache()
    neighbor_index_cache.clear_uuid_cache(qreq_)

    # Setup
    all_randomize_daids_ = ut.deterministic_shuffle(all_daids[:])
    # ensure all features are computed
    #ibs.get_annot_vecs(all_randomize_daids_, ensure=True)
    #ibs.get_annot_fgweights(all_randomize_daids_, ensure=True)

    nnindexer_list = []
    addition_lbl = 'Addition'
    _addition_iter = list(range(initial + 1, max_num, addition_stride))
    addition_iter = iter(ut.ProgressIter(_addition_iter, lbl=addition_lbl,
                                         freq=1, autoadjust=False))
    time_list_addition = []
    #time_list_reindex = []
    addition_count_list = []
    tmp_cfgstr_list = []

    #for _ in range(80):
    #    next(addition_iter)
    try:
        memtrack = ut.MemoryTracker(disable=False)
        for count in addition_iter:
            aid_list_ = all_randomize_daids_[0:count]
            # Request an indexer which could be an augmented version of an existing indexer.
            with ut.Timer(verbose=False) as t:
                memtrack.report('BEFORE AUGMENT')
                nnindexer_ = neighbor_index_cache.request_augmented_ibeis_nnindexer(qreq_, aid_list_)
                memtrack.report('AFTER AUGMENT')
            nnindexer_list.append(nnindexer_)
            addition_count_list.append(count)
            time_list_addition.append(t.ellapsed)
            tmp_cfgstr_list.append(nnindexer_.cfgstr)
            print('===============\n\n')
        print(ut.list_str(time_list_addition))
        print(ut.list_str(list(map(id, nnindexer_list))))
        print(ut.list_str(tmp_cfgstr_list))
        print(ut.list_str(list([nnindxer.cfgstr for nnindxer in nnindexer_list])))

        IS_SMALL = False

        if IS_SMALL:
            nnindexer_list = []
        reindex_label = 'Reindex'
        # go backwards for reindex
        _reindex_iter = list(range(initial + 1, max_num, addition_stride))[::-1]
        reindex_iter = ut.ProgressIter(_reindex_iter, lbl=reindex_label)
        time_list_reindex = []
        #time_list_reindex = []
        reindex_count_list = []

        for count in reindex_iter:
            print('\n+===PREDONE====================\n')
            # check only a single size for memory leaks
            #count = max_num // 16 + ((x % 6) * 1)
            #x += 1

            aid_list_ = all_randomize_daids_[0:count]
            # Call the same code, but force rebuilds
            memtrack.report('BEFORE REINDEX')
            with ut.Timer(verbose=False) as t:
                nnindexer_ = neighbor_index_cache.request_augmented_ibeis_nnindexer(
                    qreq_, aid_list_, force_rebuild=True, memtrack=memtrack)
            memtrack.report('AFTER REINDEX')
            ibs.print_cachestats_str()
            print('[nnindex.MEMCACHE] size(NEIGHBOR_CACHE) = %s' % (
                ut.get_object_size_str(neighbor_index_cache.NEIGHBOR_CACHE.items()),))
            print('[nnindex.MEMCACHE] len(NEIGHBOR_CACHE) = %s' % (
                len(neighbor_index_cache.NEIGHBOR_CACHE.items()),))
            print('[nnindex.MEMCACHE] size(UUID_MAP_CACHE) = %s' % (
                ut.get_object_size_str(neighbor_index_cache.UUID_MAP_CACHE),))
            print('totalsize(nnindexer) = ' + ut.get_object_size_str(nnindexer_))
            memtrack.report_type(neighbor_index_cache.NeighborIndex)
            ut.print_object_size_tree(nnindexer_, lbl='nnindexer_')
            if IS_SMALL:
                nnindexer_list.append(nnindexer_)
            reindex_count_list.append(count)
            time_list_reindex.append(t.ellapsed)
            #import cv2
            #import matplotlib as mpl
            #print(mem_top.mem_top(limit=30, width=120,
            #                      #exclude_refs=[cv2.__dict__, mpl.__dict__]
            #     ))
            print('L___________________\n\n\n')
        print(ut.list_str(time_list_reindex))
        if IS_SMALL:
            print(ut.list_str(list(map(id, nnindexer_list))))
            print(ut.list_str(list([nnindxer.cfgstr for nnindxer in nnindexer_list])))
    except KeyboardInterrupt:
            print('\n[train] Caught CRTL+C')
            resolution = ''
            from six.moves import input
            while not (resolution.isdigit()):
                print('\n[train] What do you want to do?')
                print('[train]     0 - Continue')
                print('[train]     1 - Embed')
                print('[train]  ELSE - Stop network training')
                resolution = input('[train] Resolution: ')
            resolution = int(resolution)
            # We have a resolution
            if resolution == 0:
                print('resuming training...')
            elif resolution == 1:
                ut.embed()

    import plottool as pt

    next_fnum = iter(range(0, 1)).next  # python3 PY3
    pt.figure(fnum=next_fnum())
    if len(addition_count_list) > 0:
        pt.plot2(addition_count_list, time_list_addition, marker='-o', equal_aspect=False,
                 x_label='num_annotations', label=addition_lbl + ' Time')

    if len(reindex_count_list) > 0:
        pt.plot2(reindex_count_list, time_list_reindex, marker='-o', equal_aspect=False,
                 x_label='num_annotations', label=reindex_label + ' Time')

    pt.set_figtitle('Augmented indexer experiment')

    pt.legend()
def get_data_list():
    r"""
    CommandLine:
        python ~/code/ibeis/_installers/ibeis_pyinstaller_data_helper.py --test-get_data_list

    Example:
        >>> # ENABLE_DOCTEST
        >>> from ibeis_pyinstaller_data_helper import *  # NOQA
        >>> result = get_data_list()
        >>> DATATUP_LIST, BINARYTUP_LIST, iconfile = result
        >>> print('DATATUP_LIST = ' + ut.list_str(DATATUP_LIST))
        >>> print('BINARYTUP_LIST = ' + ut.list_str(BINARYTUP_LIST))
        >>> print(len(DATATUP_LIST))
        >>> print(len(BINARYTUP_LIST))
        >>> print(iconfile)

    """
    # Build data before running analysis for quick debugging
    DATATUP_LIST = []
    BINARYTUP_LIST = []

    #import pyhesaff
    #pyhesaff.HESAFF_CLIB.__LIB_FPATH__
    #import pyrf
    #pyrf.RF_CLIB.__LIB_FPATH__
    # Hesaff
    libhesaff_fname = 'libhesaff' + LIB_EXT
    libhesaff_src = realpath(
        join(root_dir, '..', 'hesaff', 'pyhesaff', libhesaff_fname))
    libhesaff_dst = join(ibsbuild, 'pyhesaff', 'lib', libhesaff_fname)
    DATATUP_LIST.append((libhesaff_dst, libhesaff_src))

    # PyRF
    libpyrf_fname = 'libpyrf' + LIB_EXT
    libpyrf_src = realpath(join(root_dir, '..', 'pyrf', 'pyrf', libpyrf_fname))
    libpyrf_dst = join(ibsbuild, 'pyrf', 'lib', libpyrf_fname)
    DATATUP_LIST.append((libpyrf_dst, libpyrf_src))

    # FLANN
    libflann_fname = 'libflann' + LIB_EXT
    #try:
    #    #import pyflann
    #    #pyflann.__file__
    #    #join(dirname(dirname(pyflann.__file__)), 'build')
    #except ImportError as ex:
    #    print('PYFLANN IS NOT IMPORTABLE')
    #    raise
    #if WIN32 or LINUX:
    # FLANN
    #libflann_src = join_SITE_PACKAGES('pyflann', 'lib', libflann_fname)
    #libflann_dst = join(ibsbuild, libflann_fname)
    #elif APPLE:
    #    # libflann_src = '/pyflann/lib/libflann.dylib'
    #    # libflann_dst = join(ibsbuild, libflann_fname)
    #    libflann_src = join_SITE_PACKAGES('pyflann', 'lib', libflann_fname)
    #    libflann_dst = join(ibsbuild, libflann_fname)
    # This path is when pyflann was built using setup.py develop
    libflann_src = realpath(
        join(root_dir, '..', 'flann', 'build', 'lib', libflann_fname))
    libflann_dst = join(ibsbuild, 'pyflann', 'lib', libflann_fname)
    DATATUP_LIST.append((libflann_dst, libflann_src))

    # VTool
    vtool_libs = ['libsver']
    for libname in vtool_libs:
        lib_fname = libname + LIB_EXT
        vtlib_src = realpath(join(root_dir, '..', 'vtool', 'vtool', lib_fname))
        vtlib_dst = join(ibsbuild, 'vtool', lib_fname)
        DATATUP_LIST.append((vtlib_dst, vtlib_src))

    linux_lib_dpaths = [
        '/usr/lib/x86_64-linux-gnu', '/usr/lib', '/usr/local/lib'
    ]

    # OpenMP
    if APPLE:
        # BSDDB, Fix for the modules that PyInstaller needs and (for some reason)
        # are not being added by PyInstaller
        libbsddb_src = '/opt/local/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/lib-dynload/_bsddb.so'
        libbsddb_dst = join(ibsbuild, '_bsddb.so')
        DATATUP_LIST.append((libbsddb_dst, libbsddb_src))
        #libgomp_src = '/opt/local/lib/libgomp.dylib'
        libgomp_src = '/opt/local/lib/gcc48/libgomp.dylib'
        BINARYTUP_LIST.append(('libgomp.1.dylib', libgomp_src, 'BINARY'))

        # very hack
        libiomp_src = '/Users/bluemellophone/code/libomp_oss/exports/mac_32e/lib.thin/libiomp5.dylib'
        BINARYTUP_LIST.append(('libiomp5.dylib', libiomp_src, 'BINARY'))

    if LINUX:
        libgomp_src = ut.search_in_dirs('libgomp.so.1', linux_lib_dpaths)
        ut.assertpath(libgomp_src)
        BINARYTUP_LIST.append(('libgomp.so.1', libgomp_src, 'BINARY'))

    # MinGW
    if WIN32:
        mingw_root = r'C:\MinGW\bin'
        mingw_dlls = [
            'libgcc_s_dw2-1.dll', 'libstdc++-6.dll', 'libgomp-1.dll',
            'pthreadGC2.dll'
        ]
        for lib_fname in mingw_dlls:
            lib_src = join(mingw_root, lib_fname)
            lib_dst = join(ibsbuild, lib_fname)
            DATATUP_LIST.append((lib_dst, lib_src))

    # We need to add these 4 opencv libraries because pyinstaller does not find them.
    #OPENCV_EXT = {'win32': '248.dll',
    #              'darwin': '.2.4.dylib',
    #              'linux2': '.so.2.4'}[PLATFORM]

    target_cv_version = '3.0.0'

    OPENCV_EXT = {
        'win32': target_cv_version.replace('.', '') + '.dll',
        'darwin': '.' + target_cv_version + '.dylib',
        'linux2': '.so.' + target_cv_version
    }[PLATFORM]

    missing_cv_name_list = [
        'libopencv_videostab',
        'libopencv_superres',
        'libopencv_stitching',
        #'libopencv_gpu',
        'libopencv_core',
        'libopencv_highgui',
        'libopencv_imgproc',
    ]
    # Hack to find the appropriate opencv libs
    for name in missing_cv_name_list:
        fname = name + OPENCV_EXT
        src = ''
        dst = ''
        if APPLE:
            src = join('/opt/local/lib', fname)
        elif LINUX:
            #src = join('/usr/lib', fname)
            src, tried = ut.search_in_dirs(fname,
                                           linux_lib_dpaths,
                                           strict=True,
                                           return_tried=True)
        elif WIN32:
            if ut.get_computer_name() == 'Ooo':
                src = join(r'C:/Program Files (x86)/OpenCV/x86/mingw/bin',
                           fname)
            else:
                src = join(root_dir, '../opencv/build/bin', fname)
        dst = join(ibsbuild, fname)
        # ut.assertpath(src)
        DATATUP_LIST.append((dst, src))

    ##################################
    # QT Gui dependencies
    ##################################
    if APPLE:
        walk_path = '/opt/local/Library/Frameworks/QtGui.framework/Versions/4/Resources/qt_menu.nib'
        for root, dirs, files in os.walk(walk_path):
            for lib_fname in files:
                toc_src = join(walk_path, lib_fname)
                toc_dst = join('qt_menu.nib', lib_fname)
                DATATUP_LIST.append((toc_dst, toc_src))

    ##################################
    # Documentation, Icons, and Web Assets
    ##################################
    # Documentation
    #userguide_dst = join('.', '_docs', 'IBEISUserGuide.pdf')
    #userguide_src = join(root_dir, '_docs', 'IBEISUserGuide.pdf')
    #DATATUP_LIST.append((userguide_dst, userguide_src))

    # Icon File
    ICON_EXT = {'darwin': '.icns', 'win32': '.ico', 'linux2': '.ico'}[PLATFORM]
    iconfile = join('_installers', 'ibsicon' + ICON_EXT)
    icon_src = join(root_dir, iconfile)
    icon_dst = join(ibsbuild, iconfile)
    DATATUP_LIST.append((icon_dst, icon_src))

    print('[installer] Checking Data (preweb)')
    try:
        for (dst, src) in DATATUP_LIST:
            assert ut.checkpath(
                src, verbose=True), 'checkpath for src=%r failed' % (src, )
    except Exception as ex:
        ut.printex(
            ex,
            'Checking data failed DATATUP_LIST=' + ut.list_str(DATATUP_LIST))
        raise

    # Web Assets
    INSTALL_WEB = True and not ut.get_argflag('--noweb')
    if INSTALL_WEB:
        web_root = join('ibeis', 'web/')
        #walk_path = join(web_root, 'static')
        #static_data = []
        #for root, dirs, files in os.walk(walk_path):
        #    root2 = root.replace(web_root, '')
        #    for icon_fname in files:
        #        if '.DS_Store' not in icon_fname:
        #            toc_src = join(abspath(root), icon_fname)
        #            toc_dst = join(root2, icon_fname)
        #            static_data.append((toc_dst, toc_src))
        #ut.get_list_column(static_data, 1) == ut.glob(walk_path, '*', recursive=True, with_dirs=False, exclude_dirs=['.DS_Store'])
        static_src_list = ut.glob(join(web_root, 'static'),
                                  '*',
                                  recursive=True,
                                  with_dirs=False,
                                  exclude_dirs=['.DS_Store'])
        static_dst_list = [
            relpath(src, join(root_dir, 'ibeis')) for src in static_src_list
        ]
        static_data = zip(static_dst_list, static_src_list)
        DATATUP_LIST.extend(static_data)

        #walk_path = join(web_root, 'templates')
        #template_data = []
        #for root, dirs, files in os.walk(walk_path):
        #    root2 = root.replace(web_root, '')
        #    for icon_fname in files:
        #        if '.DS_Store' not in icon_fname:
        #            toc_src = join(abspath(root), icon_fname)
        #            toc_dst = join(root2, icon_fname)
        #            template_data.append((toc_dst, toc_src))
        template_src_list = ut.glob(join(web_root, 'templates'),
                                    '*',
                                    recursive=True,
                                    with_dirs=False,
                                    exclude_dirs=['.DS_Store'])
        template_dst_list = [
            relpath(src, join(root_dir, 'ibeis')) for src in template_src_list
        ]
        template_data = zip(template_dst_list, template_src_list)
        DATATUP_LIST.extend(template_data)

    print('[installer] Checking Data (postweb)')
    try:
        for (dst, src) in DATATUP_LIST:
            assert ut.checkpath(
                src, verbose=False), 'checkpath for src=%r failed' % (src, )
    except Exception as ex:
        ut.printex(
            ex,
            'Checking data failed DATATUP_LIST=' + ut.list_str(DATATUP_LIST))
        raise

    return DATATUP_LIST, BINARYTUP_LIST, iconfile
Esempio n. 36
0
def ensure_pz_mtest_mergesplit_test():
    r"""
    Make a test database for MERGE and SPLIT cases

    CommandLine:
        python -m ibeis.init.sysres --test-ensure_pz_mtest_mergesplit_test

    Example:
        >>> # SCRIPT
        >>> from ibeis.init.sysres import *  # NOQA
        >>> ensure_pz_mtest_mergesplit_test()
    """
    import ibeis
    ibeis.ensure_pz_mtest()
    workdir = ibeis.sysres.get_workdir()
    mtest_dbpath = join(workdir, 'PZ_MTEST')

    source_dbdir = mtest_dbpath
    dest_dbdir = join(workdir, 'PZ_MERGESPLIT_MTEST')

    if ut.get_argflag('--reset'):
        ut.delete(dest_dbdir)
    if ut.checkpath(dest_dbdir):
        return

    copy_ibeisdb(source_dbdir, dest_dbdir)

    ibs = ibeis.opendb('PZ_MERGESPLIT_MTEST')
    assert len(ibs.get_valid_aids()) == 119
    assert len(ibs.get_valid_nids()) == 41

    aid_list = ibs.get_valid_aids()
    aids_list, nid_list = ibs.group_annots_by_name(aid_list)
    num_aids = list(map(len, aids_list))

    # num cases wanted
    num_merge = 3
    num_split = 1
    num_combo = 1

    # num inputs needed
    num_merge_names = num_merge
    num_split_names = num_split * 2
    num_combo_names = num_combo * 3

    total_names = num_merge_names + num_split_names + num_combo_names

    modify_aids = ut.take(
        aids_list,
        ut.list_argsort(num_aids, reverse=True)[0:total_names])

    merge_nids1 = ibs.make_next_nids(num_merge, location_text='XMERGE')
    merge_nids2 = ibs.make_next_nids(num_merge, location_text='XMERGE')
    split_nid = ibs.make_next_nids(num_split, location_text='XSPLIT')[0]
    combo_nids = ibs.make_next_nids(num_combo * 2, location_text='XCOMBO')

    # the first 3 become merge cases
    #left = 0
    #right = left + num_merge
    for aids, nid1, nid2 in zip(modify_aids[0:3], merge_nids1, merge_nids2):
        #ibs.get_annot_nids(aids)
        aids_ = aids[::2]
        ibs.set_annot_name_rowids(aids_, [nid1] * len(aids_))
        ibs.set_annot_name_rowids(aids_, [nid2] * len(aids_))

    # the next 2 become split cases
    #left = right
    #right = left + num_split_names
    for aids in modify_aids[3:5]:
        ibs.set_annot_name_rowids(aids, [split_nid] * len(aids))

    #left = right
    #right = left + num_combo_names
    # The final 3 are a combination case
    for aids in modify_aids[5:8]:
        aids_even = aids[::2]
        aids_odd = aids[1::2]
        ibs.set_annot_name_rowids(aids_even, [combo_nids[0]] * len(aids_even))
        ibs.set_annot_name_rowids(aids_odd, [combo_nids[1]] * len(aids_odd))

    final_result = ibs.unflat_map(ibs.get_annot_nids, modify_aids)
    print('final_result = %s' % (ut.list_str(final_result), ))
Esempio n. 37
0
def build_correspondences(sparse_list, qfxs_list, dfxs_list, daids_list):
    """ helper
    these list comprehensions replace the prevous for loop
    they still need to be optimized a little bit (and made clearer)
    can probably unnest the list comprehensions as well
    """

    """
    IGNORE
    Legacy::
        def old_build_correspondences(sparse_list, qfxs_list, dfxs_list, daids_list):
            fm_nestlist_ = []
            fs_nestlist_ = []
            daid_nestlist_ = []
            for scores, qfxs, dfxs, daids in zip(sparse_list, qfxs_list, dfxs_list, daids_list):
                for rx, cx, score in zip(scores.row, scores.col, scores.data):
                    _fm = tuple(product(qfxs[rx], dfxs[cx]))
                    _fs = [score / len(_fm)] * len(_fm)
                    _daid = [daids[cx]] * len(_fm)
                    fm_nestlist_.append(_fm)
                    fs_nestlist_.append(_fs)
                    daid_nestlist_.append(_daid)
            return fm_nestlist_, fs_nestlist_, daid_nestlist_

        oldtup_ = old_build_correspondences(sparse_list, qfxs_list, dfxs_list, daids_list)
        fm_nestlist_, fs_nestlist_, daid_nestlist_ = oldtup_
        newtup_ = build_correspondences(sparse_list, qfxs_list, dfxs_list, daids_list)
        fm_nestlist, fs_nestlist, daid_nestlist = newtup_

        assert fm_nestlist == fm_nestlist_
        assert fs_nestlist == fs_nestlist_
        assert daid_nestlist == daid_nestlist_

        47ms
        %timeit build_correspondences(sparse_list, qfxs_list, dfxs_list, daids_list)

        59ms
        %timeit old_build_correspondences(sparse_list, qfxs_list, dfxs_list, daids_list)
    IGNORE
    """
    # FIXME: rewrite double comprehension as a flat comprehension

    # Build nested feature matches (a single match might have many members)
    fm_nestlist = [
        tuple(product(qfxs[rx], dfxs[cx]))
        for scores, qfxs, dfxs in zip(sparse_list, qfxs_list, dfxs_list)
        for rx, cx in zip(scores.row, scores.col)
    ]
    nFm_list = [len(fm) for fm in fm_nestlist]
    #fs_unsplit = (score
    #              for scores in sparse_list
    #              for score in scores.data)
    #daid_unsplit = (daids[cx]
    #                for scores, daids in zip(sparse_list, daids_list)
    #                for cx in scores.col)
    # Build nested feature scores
    fs_unsplit = utool.iflatten(
        (scores.data for scores in sparse_list))
    # Build nested feature matches (a single match might have many members)
    daid_unsplit = utool.iflatten(
        (daids.take(scores.col)
         for scores, daids in zip(sparse_list, daids_list)))
    # Expand feature scores and daids splitting scores amongst match members
    fs_nestlist = [
        [score / nFm] * nFm
        for score, nFm in zip(fs_unsplit, nFm_list)
    ]
    daid_nestlist = [
        [daid] * nFm
        for daid, nFm in zip(daid_unsplit, nFm_list)
    ]

    if DEBUG_SMK:
        assert len(fm_nestlist) == len(fs_nestlist), 'inconsistent len'
        assert len(fm_nestlist) == len(nFm_list), 'inconsistent len'
        assert len(daid_nestlist) == len(fs_nestlist), 'inconsistent len'
        min_ = min(2, len(nFm_list))
        max_ = min(15, len(nFm_list))
        print('nFm_list[_min:_max]      = ' + utool.list_str(nFm_list[min_:max_]))
        print('fm_nestlist[_min:_max]   = ' + utool.list_str(fm_nestlist[min_:max_]))
        print('fs_nestlist[_min:_max]   = ' + utool.list_str(fs_nestlist[min_:max_]))
        print('daid_nestlist[_min:_max] = ' + utool.list_str(daid_nestlist[min_:max_]))
        for fm_, fs_, daid_ in zip(fm_nestlist, fs_nestlist, daid_nestlist):
            assert len(fm_) == len(fs_), 'inconsistent len'
            assert len(fm_) == len(daid_), 'inconsistent len'
        print('[smk_core] checked build_chipmatch correspondence ...ok')
    return fm_nestlist, fs_nestlist, daid_nestlist
Esempio n. 38
0
def grep_projects(tofind_list,
                  user_profile=None,
                  verbose=True,
                  new=False,
                  **kwargs):
    r"""
    Greps the projects defined in the current UserProfile

    Args:
        tofind_list (list):
        user_profile (None): (default = None)

    Kwargs:
        user_profile

    CommandLine:
        python -m utool --tf grep_projects grep_projects

    Example:
        >>> # DISABLE_DOCTEST
        >>> from utool.util_project import *  # NOQA
        >>> import utool as ut
        >>> import sys
        >>> tofind_list = ut.get_argval('--find', type_=list,
        >>>                             default=[sys.argv[-1]])
        >>> grep_projects(tofind_list)
    """
    import utool as ut
    user_profile = ensure_user_profile(user_profile)

    kwargs = kwargs.copy()
    colored = kwargs.pop('colored', True)

    grepkw = {}
    grepkw['greater_exclude_dirs'] = user_profile.project_exclude_dirs
    grepkw['exclude_dirs'] = user_profile.project_exclude_dirs
    grepkw['dpath_list'] = user_profile.project_dpaths
    grepkw['include_patterns'] = user_profile.project_include_patterns
    grepkw['exclude_patterns'] = user_profile.project_exclude_patterns
    grepkw.update(kwargs)

    msg_list1 = []
    msg_list2 = []

    print_ = msg_list1.append
    print_('Greping Projects')
    print_('tofind_list = %s' % (ut.list_str(tofind_list, nl=True), ))
    #print_('grepkw = %s' % ut.dict_str(grepkw, nl=True))
    if verbose:
        print('\n'.join(msg_list1))
    #with ut.Timer('greping', verbose=True):
    grep_result = ut.grep(tofind_list, **grepkw)
    found_fpath_list, found_lines_list, found_lxs_list = grep_result

    # HACK, duplicate behavior. TODO: write grep print result function
    reflags = grepkw.get('reflags', 0)
    _exprs_flags = [ut.extend_regex2(expr, reflags) for expr in tofind_list]
    extended_regex_list = ut.take_column(_exprs_flags, 0)
    reflags_list = ut.take_column(_exprs_flags, 1)
    # HACK
    # pat = ut.util_regex.regex_or(extended_regex_list)
    reflags = reflags_list[0]

    # from utool import util_regex
    resultstr = ut.make_grep_resultstr(grep_result,
                                       extended_regex_list,
                                       reflags,
                                       colored=colored)
    msg_list2.append(resultstr)
    print_ = msg_list2.append
    #for fpath, lines, lxs in zip(found_fpath_list, found_lines_list,
    #                             found_lxs_list):
    #    print_('----------------------')
    #    print_('found %d line(s) in %r: ' % (len(lines), fpath))
    #    name = split(fpath)[1]
    #    max_line = len(lines)
    #    ndigits = str(len(str(max_line)))
    #    for (lx, line) in zip(lxs, lines):
    #        line = line.replace('\n', '')
    #        print_(('%s : %' + ndigits + 'd |%s') % (name, lx, line))
    # iter_ = zip(found_fpath_list, found_lines_list, found_lxs_list)
    # for fpath, lines, lxs in iter_:
    #     print_('----------------------')
    #     print_('found %d line(s) in %r: ' % (len(lines), fpath))
    #     name = split(fpath)[1]
    #     max_line = len(lines)
    #     ndigits = str(len(str(max_line)))
    #     for (lx, line) in zip(lxs, lines):
    #         line = line.replace('\n', '')
    #         colored_line = ut.highlight_regex(
    #             line.rstrip('\n'), pat, reflags=reflags)
    #         print_(('%s : %' + ndigits + 'd |%s') % (name, lx, colored_line))

    print_('====================')
    print_('found_fpath_list = ' + ut.list_str(found_fpath_list))
    print_('')
    #print_('gvim -o ' + ' '.join(found_fpath_list))
    if verbose:
        print('\n'.join(msg_list2))
    msg_list = msg_list1 + msg_list2

    if new:
        return GrepResult(found_fpath_list, found_lines_list, found_lxs_list,
                          extended_regex_list, reflags)
    else:
        return msg_list
Esempio n. 39
0
def findcite():
    """
    prints info about used and unused citations
    """
    tex_fpath_list = testdata_fpaths()
    citekey_list = find_used_citations(tex_fpath_list)

    # Find uncited entries
    #bibtexparser = ut.tryimport('bibtexparser')
    bib_fpath = 'My_Library_clean.bib'
    bibtex_str = ut.read_from(bib_fpath)
    bib_database = bibtexparser.loads(bibtex_str)
    bibtex_dict = bib_database.get_entry_dict()

    for key in bibtex_dict.keys():
        entry = bibtex_dict[key]
        entry = ut.map_dict_keys(six.text_type, entry)
        entry = ut.map_dict_keys(six.text_type.lower, entry)
        bibtex_dict[key] = entry

    print('ALL')
    ignore = ['JP', '?']
    citekey_list = ut.setdiff_ordered(sorted(ut.unique(citekey_list)), ignore)
    #print(ut.indentjoin(citekey_list))
    print('len(citekey_list) = %r' % (len(citekey_list), ))

    unknown_keys = list(set(citekey_list) - set(bibtex_dict.keys()))
    unused_keys = list(set(bibtex_dict.keys()) - set(citekey_list))

    try:
        if len(unknown_keys) != 0:
            print('\nUNKNOWN KEYS:')
            print(ut.list_str(unknown_keys))
            raise AssertionError('unknown keys')
    except AssertionError as ex:
        ut.printex(ex, iswarning=True, keys=['unknown_keys'])

    @ut.argv_flag_dec(indent='    ')
    def close_keys():
        if len(unknown_keys) > 0:
            bibtex_dict.keys()
            print('\nDid you mean:')
            for key in unknown_keys:
                print('---')
                print(key)
                print(ut.closet_words(key, bibtex_dict.keys(), 3))
            print('L___')
        else:
            print('no unkown keys')

    close_keys()

    @ut.argv_flag_dec(indent='    ')
    def print_unused():
        print(ut.indentjoin(ut.sortedby(unused_keys, map(len, unused_keys))))

        print('len(unused_keys) = %r' % (len(unused_keys), ))

    print_unused()

    all_authors = []
    for key in bibtex_dict.keys():
        entry = bibtex_dict[key]
        toremove = ['author', '{', '}', r'\\textbackslash']
        author = ut.multi_replace(entry.get('author', ''), toremove, '')
        authors = author.split(' and ')
        all_authors.extend(authors)

    @ut.argv_flag_dec(indent='    ')
    def author_hist():
        #print(all_authors)
        hist_ = ut.dict_hist(all_authors, ordered=True)
        hist_[''] = None
        del hist_['']
        print('Author histogram')
        print(ut.dict_str(hist_)[-1000:])

    author_hist()

    @ut.argv_flag_dec(indent='    ')
    def unused_important():
        important_authors = [
            'hinton',
            'chum',
            'Jegou',
            'zisserman',
            'schmid',
            'sivic',
            'matas',
            'lowe',
            'perronnin',
            'douze',
        ]

        for key in unused_keys:
            entry = bibtex_dict[key]
            author = entry.get('author', '')
            #authors = author.split(' and ')
            hasimportant = any(auth in author.lower()
                               for auth in important_authors)
            if hasimportant or 'smk' in str(entry).lower():
                toremove = [
                    'note', 'month', 'type', 'pages', 'urldate', 'language',
                    'volume', 'number', 'publisher'
                ]
                entry = ut.delete_dict_keys(entry, toremove)
                print(
                    ut.dict_str(entry,
                                strvals=True,
                                key_order=['title', 'author', 'id']))

    unused_important()
Esempio n. 40
0
    show_diff = ut.get_argflag('--diff')
    do_write = ut.get_argflag('--write')

    need_encoding_fpaths = []

    for pat in pattern_items:
        print('Checking for pattern: %r' % (pat, ))
        for fpath in fpath_list:
            pattern = re.escape(pat)
            found_lines, found_lxs = ut.grepfile(fpath, pattern)
            # DID NOT FIND ENCODING LINE
            if len(found_lines) == 0:
                need_encoding_fpaths.append(fpath)

    print('The following fpaths need encoding lines: ' +
          ut.list_str(need_encoding_fpaths, strvals=True))

    if do_write or show_diff:
        for fpath in need_encoding_fpaths:
            print('\n-----------------\nFound file without encodeing line: ' +
                  fpath)
            line_list = ut.read_lines_from(fpath)
            linenum = find_encoding_insert_position(line_list)
            if linenum is not None:
                #print(' * linenum = %r' % (linenum,))
                new_lines = line_list[:linenum] + [encoding_line + '\n'
                                                   ] + line_list[linenum:]
                new_text = ''.join(new_lines)
                if show_diff:
                    old_text = ''.join(line_list)
                    textdiff = ut.get_textdiff(old_text,
Esempio n. 41
0
def assert_modules():
    """
    checkinfo functions return info_dict
    checkinfo_func

    CommandLine:
        python -m ibeis.tests.assert_modules --test-assert_modules

    Example:
        >>> # DOCTEST_ENABLE
        >>> from ibeis.tests.assert_modules import *   # NOQA
        >>> detailed_msg = assert_modules()
        >>> print(detailed_msg)
    """

    MACHINE_NAME = ut.get_computer_name()

    machine_info_lines = []

    machine_info_lines.append('sys.version = %r ' % (sys.version))
    machine_info_lines.append('PATH = ' + ut.list_str(ut.get_path_dirs()))
    machine_info_lines.append('\n\n\n============================')
    machine_info_lines.append('Begining assert modules main')
    machine_info_lines.append('* MACHINE_NAME = %r' % MACHINE_NAME)
    machine_info_text = '\n'.join(machine_info_lines)
    print(machine_info_text)

    statustext_list = []
    failed_list = []
    fix_list = []

    SHOW_STATUS = not ut.get_argflag(('--nostatus', '--nostat'))

    for checkinfo_wrapper in ASSERT_FUNCS:
        passed, current_version, target, infodict, statustext, suggested_fix = checkinfo_wrapper()
        funcname = get_funcname(checkinfo_wrapper)
        if SHOW_STATUS:
            statustext_list.append(statustext)
        if passed:
            statustext_list.append(funcname + ' ' + str(infodict['__version__']) + ' passed')
            #statustext_list.append('')
        else:
            failed_list.append(funcname + ' FAILED!!!')
            fix_list.append(suggested_fix)
            statustext_list.append(funcname + ' FAILED!!!')
        if SHOW_STATUS:
            statustext_list.append('')

    output_text = '\n'.join(statustext_list)

    failed_text = '\n'.join(failed_list)
    print(output_text)
    print(failed_text)
    check_exist_text = check_modules_exists()
    print(check_exist_text)
    fix_text = ''
    if len(fix_list) > 0:
        fix_text += ('suggested fixes:\n')
        fix_text += ('\n'.join(fix_list) + '\n')
        print(fix_text)

    detailed_msg = '\n'.join([
        machine_info_text,
        output_text,
        failed_text,
        check_exist_text,
        fix_text,
    ])

    return detailed_msg
Esempio n. 42
0
def ensure_pz_mtest_mergesplit_test():
    r"""
    Make a test database for MERGE and SPLIT cases

    CommandLine:
        python -m ibeis.init.sysres --test-ensure_pz_mtest_mergesplit_test

    Example:
        >>> # SCRIPT
        >>> from ibeis.init.sysres import *  # NOQA
        >>> ensure_pz_mtest_mergesplit_test()
    """
    import ibeis
    ibeis.ensure_pz_mtest()
    workdir = ibeis.sysres.get_workdir()
    mtest_dbpath = join(workdir, 'PZ_MTEST')

    source_dbdir = mtest_dbpath
    dest_dbdir = join(workdir, 'PZ_MERGESPLIT_MTEST')

    if ut.get_argflag('--reset'):
        ut.delete(dest_dbdir)
    if ut.checkpath(dest_dbdir):
        return

    copy_ibeisdb(source_dbdir, dest_dbdir)

    ibs = ibeis.opendb('PZ_MERGESPLIT_MTEST')
    assert len(ibs.get_valid_aids()) == 119
    assert len(ibs.get_valid_nids()) == 41

    aid_list = ibs.get_valid_aids()
    aids_list, nid_list = ibs.group_annots_by_name(aid_list)
    num_aids = list(map(len, aids_list))

    # num cases wanted
    num_merge = 3
    num_split = 1
    num_combo = 1

    # num inputs needed
    num_merge_names = num_merge
    num_split_names = num_split * 2
    num_combo_names = num_combo * 3

    total_names = num_merge_names + num_split_names + num_combo_names

    modify_aids = ut.take(aids_list, ut.list_argsort(num_aids, reverse=True)[0:total_names])

    merge_nids1 = ibs.make_next_nids(num_merge, location_text='XMERGE')
    merge_nids2 = ibs.make_next_nids(num_merge, location_text='XMERGE')
    split_nid  = ibs.make_next_nids(num_split, location_text='XSPLIT')[0]
    combo_nids = ibs.make_next_nids(num_combo * 2, location_text='XCOMBO')

    # the first 3 become merge cases
    #left = 0
    #right = left + num_merge
    for aids, nid1, nid2 in zip(modify_aids[0:3], merge_nids1, merge_nids2):
        #ibs.get_annot_nids(aids)
        aids_ = aids[::2]
        ibs.set_annot_name_rowids(aids_, [nid1] * len(aids_))
        ibs.set_annot_name_rowids(aids_, [nid2] * len(aids_))

    # the next 2 become split cases
    #left = right
    #right = left + num_split_names
    for aids in modify_aids[3:5]:
        ibs.set_annot_name_rowids(aids, [split_nid] * len(aids))

    #left = right
    #right = left + num_combo_names
    # The final 3 are a combination case
    for aids in modify_aids[5:8]:
        aids_even = aids[::2]
        aids_odd = aids[1::2]
        ibs.set_annot_name_rowids(aids_even, [combo_nids[0]] * len(aids_even))
        ibs.set_annot_name_rowids(aids_odd, [combo_nids[1]] * len(aids_odd))

    final_result = ibs.unflat_map(ibs.get_annot_nids, modify_aids)
    print('final_result = %s' % (ut.list_str(final_result),))
Esempio n. 43
0
def test_siamese_performance(model, data, labels, flat_metadata, dataname=''):
    r"""
    CommandLine:
        utprof.py -m ibeis_cnn --tf pz_patchmatch --db liberty --test --weights=liberty:current --arch=siaml2_128 --test
        python -m ibeis_cnn --tf netrun --db liberty --arch=siaml2_128 --test  --ensure
        python -m ibeis_cnn --tf netrun --db liberty --arch=siaml2_128 --test  --ensure --weights=new
        python -m ibeis_cnn --tf netrun --db liberty --arch=siaml2_128 --train --weights=new
        python -m ibeis_cnn --tf netrun --db pzmtest --weights=liberty:current --arch=siaml2_128 --test  # NOQA
        python -m ibeis_cnn --tf netrun --db pzmtest --weights=liberty:current --arch=siaml2_128
    """
    import vtool as vt
    import plottool as pt

    # TODO: save in model.trainind_dpath/diagnostics/figures
    ut.colorprint('\n[siam_perf] Testing Siamese Performance', 'white')
    #epoch_dpath = model.get_epoch_diagnostic_dpath()
    epoch_dpath = model.arch_dpath
    ut.vd(epoch_dpath)

    dataname += ' ' + model.get_history_hashid() + '\n'

    history_text = ut.list_str(model.era_history, newlines=True)

    ut.write_to(ut.unixjoin(epoch_dpath, 'era_history.txt'), history_text)

    #if True:
    #    import matplotlib as mpl
    #    mpl.rcParams['agg.path.chunksize'] = 100000

    #data   = data[::50]
    #labels = labels[::50]
    #from ibeis_cnn import utils
    #data, labels = utils.random_xy_sample(data, labels, 10000, model.data_per_label_input)

    FULL = not ut.get_argflag('--quick')

    fnum_gen = pt.make_fnum_nextgen()

    ut.colorprint('[siam_perf] Show era history', 'white')
    fig = model.show_era_loss(fnum=fnum_gen())
    pt.save_figure(fig=fig, dpath=epoch_dpath, dpi=180)

    # hack
    ut.colorprint('[siam_perf] Show weights image', 'white')
    fig = model.show_weights_image(fnum=fnum_gen())
    pt.save_figure(fig=fig, dpath=epoch_dpath, dpi=180)
    #model.draw_all_conv_layer_weights(fnum=fnum_gen())
    #model.imwrite_weights(1)
    #model.imwrite_weights(2)

    # Compute each type of score
    ut.colorprint('[siam_perf] Building Scores', 'white')
    test_outputs = model.predict2(model, data)
    network_output = test_outputs['network_output_determ']
    # hack converting network output to distances for non-descriptor networks
    if len(network_output.shape) == 2 and network_output.shape[1] == 1:
        cnn_scores = network_output.T[0]
    elif len(network_output.shape) == 1:
        cnn_scores = network_output
    elif len(network_output.shape) == 2 and network_output.shape[1] > 1:
        assert model.data_per_label_output == 2
        vecs1 = network_output[0::2]
        vecs2 = network_output[1::2]
        cnn_scores = vt.L2(vecs1, vecs2)
    else:
        assert False
    cnn_scores = cnn_scores.astype(np.float64)

    # Segfaults with the data passed in is large (AND MEMMAPPED apparently)
    # Fixed in hesaff implementation
    SIFT = FULL
    if SIFT:
        sift_scores, sift_list = test_sift_patchmatch_scores(data, labels)
        sift_scores = sift_scores.astype(np.float64)

    ut.colorprint('[siam_perf] Learning Encoders', 'white')
    # Learn encoders
    encoder_kw = {
        #'monotonize': False,
        'monotonize': True,
    }
    cnn_encoder = vt.ScoreNormalizer(**encoder_kw)
    cnn_encoder.fit(cnn_scores, labels)

    if SIFT:
        sift_encoder = vt.ScoreNormalizer(**encoder_kw)
        sift_encoder.fit(sift_scores, labels)

    # Visualize
    ut.colorprint('[siam_perf] Visualize Encoders', 'white')
    viz_kw = dict(
        with_scores=False,
        with_postbayes=False,
        with_prebayes=False,
        target_tpr=.95,
    )
    inter_cnn = cnn_encoder.visualize(
        figtitle=dataname + ' CNN scores. #data=' + str(len(data)),
        fnum=fnum_gen(), **viz_kw)
    if SIFT:
        inter_sift = sift_encoder.visualize(
            figtitle=dataname + ' SIFT scores. #data=' + str(len(data)),
            fnum=fnum_gen(), **viz_kw)

    # Save
    pt.save_figure(fig=inter_cnn.fig, dpath=epoch_dpath)
    if SIFT:
        pt.save_figure(fig=inter_sift.fig, dpath=epoch_dpath)

    # Save out examples of hard errors
    #cnn_fp_label_indicies, cnn_fn_label_indicies =
    #cnn_encoder.get_error_indicies(cnn_scores, labels)
    #sift_fp_label_indicies, sift_fn_label_indicies =
    #sift_encoder.get_error_indicies(sift_scores, labels)

    with_patch_examples = FULL
    if with_patch_examples:
        ut.colorprint('[siam_perf] Visualize Confusion Examples', 'white')
        cnn_indicies = cnn_encoder.get_confusion_indicies(cnn_scores, labels)
        if SIFT:
            sift_indicies = sift_encoder.get_confusion_indicies(sift_scores, labels)

        warped_patch1_list, warped_patch2_list = list(zip(*ut.ichunks(data, 2)))
        samp_args = (warped_patch1_list, warped_patch2_list, labels)
        _sample = functools.partial(draw_results.get_patch_sample_img, *samp_args)

        cnn_fp_img = _sample({'fs': cnn_scores}, cnn_indicies.fp)[0]
        cnn_fn_img = _sample({'fs': cnn_scores}, cnn_indicies.fn)[0]
        cnn_tp_img = _sample({'fs': cnn_scores}, cnn_indicies.tp)[0]
        cnn_tn_img = _sample({'fs': cnn_scores}, cnn_indicies.tn)[0]

        if SIFT:
            sift_fp_img = _sample({'fs': sift_scores}, sift_indicies.fp)[0]
            sift_fn_img = _sample({'fs': sift_scores}, sift_indicies.fn)[0]
            sift_tp_img = _sample({'fs': sift_scores}, sift_indicies.tp)[0]
            sift_tn_img = _sample({'fs': sift_scores}, sift_indicies.tn)[0]

        #if ut.show_was_requested():
        #def rectify(arr):
        #    return np.flipud(arr)
        SINGLE_FIG = False
        if SINGLE_FIG:
            def dump_img(img_, lbl, fnum):
                fig, ax = pt.imshow(img_, figtitle=dataname + ' ' + lbl, fnum=fnum)
                pt.save_figure(fig=fig, dpath=epoch_dpath, dpi=180)
            dump_img(cnn_fp_img, 'cnn_fp_img', fnum_gen())
            dump_img(cnn_fn_img, 'cnn_fn_img', fnum_gen())
            dump_img(cnn_tp_img, 'cnn_tp_img', fnum_gen())
            dump_img(cnn_tn_img, 'cnn_tn_img', fnum_gen())

            dump_img(sift_fp_img, 'sift_fp_img', fnum_gen())
            dump_img(sift_fn_img, 'sift_fn_img', fnum_gen())
            dump_img(sift_tp_img, 'sift_tp_img', fnum_gen())
            dump_img(sift_tn_img, 'sift_tn_img', fnum_gen())
            #vt.imwrite(dataname + '_' + 'cnn_fp_img.png', (cnn_fp_img))
            #vt.imwrite(dataname + '_' + 'cnn_fn_img.png', (cnn_fn_img))
            #vt.imwrite(dataname + '_' + 'sift_fp_img.png', (sift_fp_img))
            #vt.imwrite(dataname + '_' + 'sift_fn_img.png', (sift_fn_img))
        else:
            print('Drawing TP FP TN FN')
            fnum = fnum_gen()
            pnum_gen = pt.make_pnum_nextgen(4, 2)
            fig = pt.figure(fnum)
            pt.imshow(cnn_fp_img,  title='CNN FP',  fnum=fnum, pnum=pnum_gen())
            pt.imshow(sift_fp_img, title='SIFT FP', fnum=fnum, pnum=pnum_gen())
            pt.imshow(cnn_fn_img,  title='CNN FN',  fnum=fnum, pnum=pnum_gen())
            pt.imshow(sift_fn_img, title='SIFT FN', fnum=fnum, pnum=pnum_gen())
            pt.imshow(cnn_tp_img,  title='CNN TP',  fnum=fnum, pnum=pnum_gen())
            pt.imshow(sift_tp_img, title='SIFT TP', fnum=fnum, pnum=pnum_gen())
            pt.imshow(cnn_tn_img,  title='CNN TN',  fnum=fnum, pnum=pnum_gen())
            pt.imshow(sift_tn_img, title='SIFT TN', fnum=fnum, pnum=pnum_gen())
            pt.set_figtitle(dataname + ' confusions')
            pt.adjust_subplots(left=0, right=1.0, bottom=0., wspace=.01, hspace=.05)
            pt.save_figure(fig=fig, dpath=epoch_dpath, dpi=180, figsize=(9, 18))

    with_patch_desc = FULL
    if with_patch_desc:
        ut.colorprint('[siam_perf] Visualize Patch Descriptors', 'white')
        fnum = fnum_gen()
        fig = pt.figure(fnum=fnum, pnum=(1, 1, 1))
        num_rows = 7
        pnum_gen = pt.make_pnum_nextgen(num_rows, 3)
        # Compare actual output descriptors
        for index in ut.random_indexes(len(sift_list), num_rows):
            vec_sift = sift_list[index]
            vec_cnn = network_output[index]
            patch = data[index]
            pt.imshow(patch, fnum=fnum, pnum=pnum_gen())
            pt.plot_descriptor_signature(vec_cnn, 'cnn vec',  fnum=fnum, pnum=pnum_gen())
            pt.plot_sift_signature(vec_sift, 'sift vec',  fnum=fnum, pnum=pnum_gen())
        pt.set_figtitle('Patch Descriptors')
        pt.adjust_subplots(left=0, right=0.95, bottom=0., wspace=.1, hspace=.15)
        pt.save_figure(fig=fig, dpath=epoch_dpath, dpi=180, figsize=(9, 18))
Esempio n. 44
0
def process_batch(X_train, y_train, batch_size, theano_fn, **kwargs):
    """
    compute the loss over all training batches

    Jon, if you get to this before I do, please fix. -J

    CommandLine:
        python -m ibeis_cnn.batch_processing --test-process_batch

    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis_cnn.batch_processing import *  # NOQA
        >>> from ibeis_cnn import models
        >>> model = models.DummyModel(autoinit=True)
        >>> X_train, y_train = model.make_random_testdata()
        >>> theano_fn = create_unbuffered_iter_funcs_train2(model)
        >>> kwargs = {'X_is_cv2_native': False}
        >>> batch_size = model.batch_size
        >>> (loss, accu, prob_list, albl_list, pred_list, conf_list) = process_batch(X_train, y_train, batch_size, theano_fn)
        >>> result = str((loss, accu, prob_list, albl_list, pred_list, conf_list))
        >>> print(result)

    Ignore:
        Xb, yb = batch_iter.next()
        assert Xb.shape == (8, 1, 4, 4)
        yb.shape == (8,)
    """
    batch_output_list = []  # NOQA
    output_names = [op.variable.name for op in theano_fn.outputs]  # NOQA
    albl_list = []  # [a]ugmented [l]a[b]e[l] list
    show = False
    batch_iter = batch_iterator(X_train, y_train, batch_size, **kwargs)
    for Xb, yb in batch_iter:
        # Runs a batch through the network and updates the weights. Just returns what it did
        batch_output = theano_fn(Xb, yb)
        albl_list.append(yb)
        batch_output_list.append(batch_output)

        if show:
            # Print the network output for the first batch
            print('--------------')
            print(ut.list_str(zip(output_names, batch_output)))
            print('Correct: ', yb)
            print('--------------')
            show = False
    # Convert to numpy array

    # get outputs of each type

    def concatenate_hack(sequence, axis=0):
        # Hack to fix numpy bug. concatenate should do hstacks on 0-dim arrays
        if len(_output_unstacked) > 0 and len(_output_unstacked[1].shape) == 0:
            res = np.hstack(_output_unstacked)
        else:
            res = np.concatenate(_output_unstacked, axis=axis)
        return res

    unstacked_output_gen = ([bop[count] for bop in batch_output_list] for count, name in enumerate(output_names))
    stacked_output_list  = [concatenate_hack(_output_unstacked, axis=-1) for _output_unstacked in unstacked_output_gen]

    albl_list = np.hstack(albl_list)

    # Calculate performance
    loss_index = ut.listfind(output_names, 'loss_train')
    if loss_index is not None:
        loss_list = stacked_output_list[loss_index]
        loss = np.mean(loss_list)

    pred_index = ut.listfind(output_names, 'prediction')
    if pred_index is not None:
        pred_list = stacked_output_list[pred_index]
        accu = np.mean(np.equal(albl_list, pred_list))

    # Return
    return loss, accu, prob_list, albl_list, pred_list, conf_list
Esempio n. 45
0
def process_batch(model, X, y, theano_fn, fix_output=False, buffered=False,
                  show=False, spatial=False, showprog=True, **kwargs):
    """
    Compute the loss over all training batches.
    Passes data to function that splits it into batches and appropriately
    preproecsses the data. Then this function sends that data to theano. Then
    the results are packaged up nicely before returning.

    CommandLine:
        python -m ibeis_cnn --tf process_batch --verbose
        python -m ibeis_cnn --tf process_batch:0 --verbose
        python -m ibeis_cnn --tf process_batch:1 --verbose

    Example0:
        >>> # ENABLE_DOCTEST
        >>> from ibeis_cnn.batch_processing import *  # NOQA
        >>> from ibeis_cnn import models
        >>> model = models.DummyModel(batch_size=128)
        >>> X, y = model.make_random_testdata(num=2000, seed=None)
        >>> model.init_arch()
        >>> theano_fn = model.build_predict_func()
        >>> kwargs = {'X_is_cv2_native': False, 'showprog': True,
        ...           'randomize_batch_order': True}
        >>> outputs_ = process_batch(model, X, y, theano_fn, **kwargs)
        >>> result = ut.dict_str(outputs_)
        >>> print(result)

    Example0:
        >>> # ENABLE_DOCTEST
        >>> from ibeis_cnn.batch_processing import *  # NOQA
        >>> from ibeis_cnn import models
        >>> model = models.SiameseL2(batch_size=128, data_shape=(32, 32, 1),
        ...                          strict_batch_size=True)
        >>> X, y = model.make_random_testdata(num=2000, seed=None)
        >>> model.init_arch()
        >>> theano_fn = model.build_predict_func()
        >>> kwargs = {'X_is_cv2_native': False, 'showprog': True,
        ...           'randomize_batch_order': True}
        >>> outputs_ = process_batch(model, X, y, theano_fn, **kwargs)
        >>> result = ut.dict_str(outputs_)
        >>> print(result)

    Ignore:
        Xb, yb = batch_iter.next()
        assert Xb.shape == (8, 1, 4, 4)
        yb.shape == (8,)

    Ignore:
        X, y = model.make_random_testdata(num=2000, seed=None)
        kwargs = {'X_is_cv2_native': False, 'showprog': True,
                  'randomize_batch_order': True, 'time_thresh': .5,
                  }

        print('Testing Unbuffered')
        batch_iter = batch_iterator(model, X, y, lbl=theano_fn.name, **kwargs)
        for Xb, yb in ut.ProgressIter(batch_iter, lbl=':EXEC FG'):
            [ut.is_prime(346373) for _ in range(2)]

        # Notice how the progress iters are not interlaced like
        # they are in the unbuffered version
        import sys
        sys.stdout.flush()
        print('Testing Buffered')
        sys.stdout.flush()
        batch_iter2 = batch_iterator(model, X, y, lbl=theano_fn.name, **kwargs)
        batch_iter2 = ut.buffered_generator(batch_iter2, buffer_size=4)
        print('Iterating')
        for Xb, yb in ut.ProgressIter(batch_iter2, lbl=':EXEC FG'):
            [ut.is_prime(346373) for _ in range(2)]
    """
    import vtool as vt
    batch_output_list = []
    output_names = [
        str(outexpr.variable)
        if outexpr.variable.name is None else
        outexpr.variable.name
        for outexpr in theano_fn.outputs
    ]
    # augmented label list
    batch_target_list = []
    show = VERBOSE_BATCH or show

    # Break data into generated batches
    # generated data with explicit iteration
    batch_iter = batch_iterator(model, X, y, **kwargs)
    if buffered:
        batch_iter = ut.buffered_generator(batch_iter)

    if showprog:
        bs = VERBOSE_BATCH < 1
        num_batches = (X.shape[0] + model.batch_size - 1) // model.batch_size
        # progress iterator should be outside of this function
        batch_iter = ut.ProgressIter(batch_iter, nTotal=num_batches, lbl=theano_fn.name,
                                     freq=10, bs=bs, adjust=True)
    if y is None:
        # Labels are not known, only one argument
        for Xb, yb in batch_iter:
            pass
            batch_output = theano_fn(Xb)
            batch_output_list.append(batch_output)
    else:
        # TODO: sliced batches
        for Xb, yb in batch_iter:
            # Runs a batch through the network and updates the weights. Just
            # returns what it did
            batch_output = theano_fn(Xb, yb)
            batch_output_list.append(batch_output)
            batch_target_list.append(yb)

            if show:
                # Print the network output for the first batch
                print('--------------')
                print(ut.list_str(zip(output_names, batch_output)))
                print('Correct: ', yb)
                print('--------------')
                show = False

    # get outputs of each type
    unstacked_output_gen = ([bop[count] for bop in batch_output_list]
                            for count, name in enumerate(output_names))

    if spatial:
        unstacked_output_gen = list(unstacked_output_gen)
        stacked_output_list = [[] for _ in range(len(unstacked_output_gen))]
        for index, output in enumerate(unstacked_output_gen):
            output = np.vstack(output)
            stacked_output_list[index] = output
    else:
        stacked_output_list  = [
            vt.safe_cat(_output_unstacked, axis=0)
            # concatenate_hack(_output_unstacked, axis=0)
            for _output_unstacked in unstacked_output_gen
        ]

    outputs_ = dict(zip(output_names, stacked_output_list))

    if y  is not None:
        auglbl_list = np.hstack(batch_target_list)
        outputs_['auglbl_list'] = auglbl_list

    if fix_output:
        # batch iteration may wrap-around returned data. slice off the padding
        num_inputs = X.shape[0] / model.data_per_label_input
        num_outputs = num_inputs * model.data_per_label_output
        for key in six.iterkeys(outputs_):
            outputs_[key] = outputs_[key][0:num_outputs]

    encoder = getattr(model, 'encoder', None)
    if encoder is not None and 'predictions' in outputs_:
        pred = outputs_['predictions']
        outputs_['labeled_predictions'] = encoder.inverse_transform(pred)
    return outputs_
Esempio n. 46
0
def build_correspondences(sparse_list, qfxs_list, dfxs_list, daids_list):
    """ helper
    these list comprehensions replace the prevous for loop
    they still need to be optimized a little bit (and made clearer)
    can probably unnest the list comprehensions as well
    """
    """
    IGNORE
    Legacy::
        def old_build_correspondences(sparse_list, qfxs_list, dfxs_list, daids_list):
            fm_nestlist_ = []
            fs_nestlist_ = []
            daid_nestlist_ = []
            for scores, qfxs, dfxs, daids in zip(sparse_list, qfxs_list, dfxs_list, daids_list):
                for rx, cx, score in zip(scores.row, scores.col, scores.data):
                    _fm = tuple(product(qfxs[rx], dfxs[cx]))
                    _fs = [score / len(_fm)] * len(_fm)
                    _daid = [daids[cx]] * len(_fm)
                    fm_nestlist_.append(_fm)
                    fs_nestlist_.append(_fs)
                    daid_nestlist_.append(_daid)
            return fm_nestlist_, fs_nestlist_, daid_nestlist_

        oldtup_ = old_build_correspondences(sparse_list, qfxs_list, dfxs_list, daids_list)
        fm_nestlist_, fs_nestlist_, daid_nestlist_ = oldtup_
        newtup_ = build_correspondences(sparse_list, qfxs_list, dfxs_list, daids_list)
        fm_nestlist, fs_nestlist, daid_nestlist = newtup_

        assert fm_nestlist == fm_nestlist_
        assert fs_nestlist == fs_nestlist_
        assert daid_nestlist == daid_nestlist_

        47ms
        %timeit build_correspondences(sparse_list, qfxs_list, dfxs_list, daids_list)

        59ms
        %timeit old_build_correspondences(sparse_list, qfxs_list, dfxs_list, daids_list)
    IGNORE
    """
    # FIXME: rewrite double comprehension as a flat comprehension

    # Build nested feature matches (a single match might have many members)
    fm_nestlist = [
        tuple(product(qfxs[rx], dfxs[cx]))
        for scores, qfxs, dfxs in zip(sparse_list, qfxs_list, dfxs_list)
        for rx, cx in zip(scores.row, scores.col)
    ]
    nFm_list = [len(fm) for fm in fm_nestlist]
    #fs_unsplit = (score
    #              for scores in sparse_list
    #              for score in scores.data)
    #daid_unsplit = (daids[cx]
    #                for scores, daids in zip(sparse_list, daids_list)
    #                for cx in scores.col)
    # Build nested feature scores
    fs_unsplit = utool.iflatten((scores.data for scores in sparse_list))
    # Build nested feature matches (a single match might have many members)
    daid_unsplit = utool.iflatten(
        (daids.take(scores.col)
         for scores, daids in zip(sparse_list, daids_list)))
    # Expand feature scores and daids splitting scores amongst match members
    fs_nestlist = [[score / nFm] * nFm
                   for score, nFm in zip(fs_unsplit, nFm_list)]
    daid_nestlist = [[daid] * nFm for daid, nFm in zip(daid_unsplit, nFm_list)]

    if DEBUG_SMK:
        assert len(fm_nestlist) == len(fs_nestlist), 'inconsistent len'
        assert len(fm_nestlist) == len(nFm_list), 'inconsistent len'
        assert len(daid_nestlist) == len(fs_nestlist), 'inconsistent len'
        min_ = min(2, len(nFm_list))
        max_ = min(15, len(nFm_list))
        print('nFm_list[_min:_max]      = ' +
              utool.list_str(nFm_list[min_:max_]))
        print('fm_nestlist[_min:_max]   = ' +
              utool.list_str(fm_nestlist[min_:max_]))
        print('fs_nestlist[_min:_max]   = ' +
              utool.list_str(fs_nestlist[min_:max_]))
        print('daid_nestlist[_min:_max] = ' +
              utool.list_str(daid_nestlist[min_:max_]))
        for fm_, fs_, daid_ in zip(fm_nestlist, fs_nestlist, daid_nestlist):
            assert len(fm_) == len(fs_), 'inconsistent len'
            assert len(fm_) == len(daid_), 'inconsistent len'
        print('[smk_core] checked build_chipmatch correspondence ...ok')
    return fm_nestlist, fs_nestlist, daid_nestlist
Esempio n. 47
0
File: dev.py Progetto: whaozl/ibeis
def run_devcmds(ibs, qaid_list, daid_list, acfg=None):
    """
    This function runs tests passed in with the -t flag
    """
    print('\n')
    #print('[dev] run_devcmds')
    print('==========================')
    print('[DEV] RUN EXPERIMENTS %s' % ibs.get_dbname())
    print('==========================')
    input_test_list = params.args.tests[:]
    print('input_test_list = %s' % (ut.list_str(input_test_list), ))
    # fnum = 1

    valid_test_list = []  # build list for printing in case of failure
    valid_test_helpstr_list = []  # for printing

    def mark_test_handled(testname):
        input_test_list.remove(testname)

    def intest(*args, **kwargs):
        helpstr = kwargs.get('help', '')
        valid_test_helpstr_list.append('   -t ' + ', '.join(args) + helpstr)
        for testname in args:
            valid_test_list.append(testname)
            ret = testname in input_test_list
            ret2 = testname in params.unknown  # Let unparsed args count towards tests
            if ret or ret2:
                if ret:
                    mark_test_handled(testname)
                else:
                    ret = ret2
                print('\n+===================')
                print(' [dev] running testname = %s' % (args, ))
                print('+-------------------\n')
                return ret
        return False

    valid_test_helpstr_list.append('    # --- Simple Tests ---')

    # Explicit (simple) test functions
    if intest('export'):
        export(ibs)
    if intest('dbinfo'):
        dbinfo.get_dbinfo(ibs)
    if intest('headers', 'schema'):
        ibs.db.print_schema()
    if intest('info'):
        print(ibs.get_infostr())
    if intest('printcfg'):
        printcfg(ibs)
    if intest('tables'):
        ibs.print_tables()
    if intest('imgtbl'):
        ibs.print_image_table()

    valid_test_helpstr_list.append('    # --- Decor Tests ---')

    locals_ = locals()

    # Implicit (decorated) test functions
    for (func_aliases, func) in DEVCMD_FUNCTIONS:
        if intest(*func_aliases):
            funcname = get_funcname(func)
            #with utool.Indenter('[dev.' + funcname + ']'):
            with utool.Timer(funcname):
                #print('[dev] qid_list=%r' % (qaid_list,))
                # FIXME: , daid_list
                if len(ut.get_func_argspec(func).args) == 0:
                    ret = func()
                else:
                    ret = func(ibs, qaid_list, daid_list)
                # Add variables returned by the function to the
                # "local scope" (the exec scop)
                if hasattr(ret, 'items'):
                    for key, val in ret.items():
                        if utool.is_valid_varname(key):
                            locals_[key] = val

    valid_test_helpstr_list.append('    # --- Config Tests ---')

    # ------
    # RUNS EXPERIMENT HARNESS OVER VALID TESTNAMES SPECIFIED WITH -t
    # ------

    # Config driven test functions
    # Allow any testcfg to be in tests like: vsone_1 or vsmany_3
    test_cfg_name_list = []
    for test_cfg_name in experiment_configs.TEST_NAMES:
        if intest(test_cfg_name):
            test_cfg_name_list.append(test_cfg_name)
    # Hack to allow for very customized harness tests
    for testname in input_test_list[:]:
        if testname.startswith('custom:'):
            test_cfg_name_list.append(testname)
            mark_test_handled(testname)
    if len(test_cfg_name_list):
        fnum = pt.next_fnum()
        # Run Experiments
        # backwards compatibility yo
        acfgstr_name_list = {'OVERRIDE_HACK': (qaid_list, daid_list)}
        assert False, 'This way of running tests no longer works. It may be fixed in the future'
        #acfg
        harness.test_configurations(ibs, acfgstr_name_list, test_cfg_name_list)

    valid_test_helpstr_list.append('    # --- Help ---')

    if intest('help'):
        print('valid tests are:')
        print('\n'.join(valid_test_helpstr_list))
        return locals_

    if len(input_test_list) > 0:
        print('valid tests are: \n')
        print('\n'.join(valid_test_list))
        raise Exception('Unknown tests: %r ' % input_test_list)
    return locals_
Esempio n. 48
0
def run_devcmds(ibs, qaid_list, daid_list, acfg=None):
    """
    This function runs tests passed in with the -t flag
    """
    print('\n')
    #print('[dev] run_devcmds')
    print('==========================')
    print('[DEV] RUN EXPERIMENTS %s' % ibs.get_dbname())
    print('==========================')
    input_test_list = params.args.tests[:]
    print('input_test_list = %s' % (ut.list_str(input_test_list),))
    # fnum = 1

    valid_test_list = []  # build list for printing in case of failure
    valid_test_helpstr_list = []  # for printing

    def mark_test_handled(testname):
        input_test_list.remove(testname)

    def intest(*args, **kwargs):
        helpstr = kwargs.get('help', '')
        valid_test_helpstr_list.append('   -t ' + ', '.join(args) + helpstr)
        for testname in args:
            valid_test_list.append(testname)
            ret = testname in input_test_list
            ret2 = testname in params.unknown  # Let unparsed args count towards tests
            if ret or ret2:
                if ret:
                    mark_test_handled(testname)
                else:
                    ret = ret2
                print('\n+===================')
                print(' [dev] running testname = %s' % (args,))
                print('+-------------------\n')
                return ret
        return False

    valid_test_helpstr_list.append('    # --- Simple Tests ---')

    # Explicit (simple) test functions
    if intest('export'):
        export(ibs)
    if intest('dbinfo'):
        dbinfo.get_dbinfo(ibs)
    if intest('headers', 'schema'):
        ibs.db.print_schema()
    if intest('info'):
        print(ibs.get_infostr())
    if intest('printcfg'):
        printcfg(ibs)
    if intest('tables'):
        ibs.print_tables()
    if intest('imgtbl'):
        ibs.print_image_table()

    valid_test_helpstr_list.append('    # --- Decor Tests ---')

    locals_ = locals()

    # Implicit (decorated) test functions
    for (func_aliases, func) in DEVCMD_FUNCTIONS:
        if intest(*func_aliases):
            funcname = get_funcname(func)
            #with utool.Indenter('[dev.' + funcname + ']'):
            with utool.Timer(funcname):
                #print('[dev] qid_list=%r' % (qaid_list,))
                # FIXME: , daid_list
                if len(ut.get_func_argspec(func).args) == 0:
                    ret = func()
                else:
                    ret = func(ibs, qaid_list, daid_list)
                # Add variables returned by the function to the
                # "local scope" (the exec scop)
                if hasattr(ret, 'items'):
                    for key, val in ret.items():
                        if utool.is_valid_varname(key):
                            locals_[key] = val

    valid_test_helpstr_list.append('    # --- Config Tests ---')

    # ------
    # RUNS EXPERIMENT HARNESS OVER VALID TESTNAMES SPECIFIED WITH -t
    # ------

    # Config driven test functions
    # Allow any testcfg to be in tests like: vsone_1 or vsmany_3
    test_cfg_name_list = []
    for test_cfg_name in experiment_configs.TEST_NAMES:
        if intest(test_cfg_name):
            test_cfg_name_list.append(test_cfg_name)
    # Hack to allow for very customized harness tests
    for testname in input_test_list[:]:
        if testname.startswith('custom:'):
            test_cfg_name_list.append(testname)
            mark_test_handled(testname)
    if len(test_cfg_name_list):
        fnum = pt.next_fnum()
        # Run Experiments
        # backwards compatibility yo
        acfgstr_name_list = {'OVERRIDE_HACK': (qaid_list, daid_list)}
        assert False, 'This way of running tests no longer works. It may be fixed in the future'
        #acfg
        harness.test_configurations(ibs, acfgstr_name_list, test_cfg_name_list)

    valid_test_helpstr_list.append('    # --- Help ---')

    if intest('help'):
        print('valid tests are:')
        print('\n'.join(valid_test_helpstr_list))
        return locals_

    if len(input_test_list) > 0:
        print('valid tests are: \n')
        print('\n'.join(valid_test_list))
        raise Exception('Unknown tests: %r ' % input_test_list)
    return locals_
Esempio n. 49
0
def parse_latex_comments_for_commmands():
    r"""
    CommandLine:
        python -m ibeis.scripts.gen_cand_expts --exec-parse_latex_comments_for_commmands

    Example:
        >>> # SCRIPT
        >>> from ibeis.scripts.gen_cand_expts import *  # NOQA
        >>> parse_latex_comments_for_commmands()
    """
    fname = ut.get_argval('--fname', type_=str, default='figdefexpt.tex')
    text = ut.read_from(ut.truepath('~/latex/crall-candidacy-2015/' + fname))
    #text = ut.read_from(ut.truepath('~/latex/crall-candidacy-2015/figdefindiv.tex'))
    lines = text.split('\n')
    cmd_list = ['']
    in_comment = True
    for line in lines:
        if line.startswith('% ---'):
            # Keep separators
            toadd = line.replace('%', '#')
            if not (len(cmd_list) > 1 and cmd_list[-1].startswith('# ---')):
                cmd_list[-1] += (toadd)
            else:
                cmd_list.append(toadd)
            cmd_list.append('')

        if line.strip().startswith(r'\begin{comment}'):
            in_comment = True
            continue
        if in_comment:
            line = line.strip()
            if line == '' or line.startswith('#') or line.startswith('%'):
                in_comment = False
            else:
                cmd_list[-1] = cmd_list[-1] + line
                if not line.strip().endswith('\\'):
                    cmd_list[-1] = cmd_list[-1] + ' $@'
                    #cmd_list.append('')
                    #cmd_list.append('#--')
                    cmd_list.append('')
                    in_comment = False
                else:
                    cmd_list[-1] = cmd_list[-1] + '\n'

    cmd_list = [
        cmd.replace('--render', '').replace('--diskshow', '')
        for cmd in cmd_list
    ]

    # formatting
    cmd_list2 = []
    for cmd in cmd_list:
        #cmd = cmd.replace(' -t ', ' \\\n    -t ')
        #cmd = cmd.replace('--db', '\\\n    --db')
        #cmd = cmd.replace('python -m ibeis.dev', './dev.py')
        cmd = cmd.replace('python -m ibeis.dev -e', 'ibeis -e')
        cmd_list2.append(cmd)
    cmd_list = cmd_list2

    print('cmd_list = %s' % (ut.list_str(cmd_list), ))
    from os.path import splitext
    script_fname = 'regen_' + splitext(fname)[0] + '.sh'
    fname, script, line_list = write_script_lines(cmd_list, script_fname)
Esempio n. 50
0
def parse_shark_tags(orig_fname_list):
    import re

    invalid_tag_patterns = [
        re.escape('-'),
        re.escape('(') + '?\\d*' + re.escape(')') + '?',
        '\\d+-\\d+-\\d+', '\\d+,',
        '\\d+', 'vi*', 'i*v', 'i+',
        '\\d+th', '\\d+nd', '\\d+rd',
        'remant', 'timnfe', 't', 'e', 'sjl', 'disc', 'dec', 'road', 'easter',
        'western', 'west', 'tn',
        '\\d*ap',
        'whaleshark\\d*', 'shark\\d*', 'whale\\d*',
        'whalesharking', 'sharking', 'whalesharks', 'whales',
        'picture',
        'australien',
        'australia',
        'nick', 'tim\\d*',
        'imageset',
        'holiday', 'visit', 'tour', 'trip', 'pec', 'sv',
        'a', 'b',
        'gender', 'sex',
        'img', 'image', 'pic', 'pics', 'leith', 'trips', 'kings', 'photo', 'video', 'media',
        'fix', 'feeding',
        'nrd', 'nd', 'gen', 'wa', 'nmp', 'bo', 'kd', 'ow', 'ne', 'dsc', 'nwd',
        'mg', 'w', 'mai', 'blue', 'stumpy',
        'oea', 'cbe', 'edc', 'knrt',
        'tiws2',
        'ando', 'adv', 'str', 'adventure',
        'camera', 'tag', 'id',
        'of', 'and',
        'tagged', 'from',
        'day', '\\d*april', '\\d*may', '\\d*july', '\\d*june',
        'ningaloo', 'ningblue\\d*', 'kooling',
    ]

    valid_tag_level_set = [
        ['view-left', 'left', 'lhs', 'l', 'leftside'],
        ['view-right', 'right', 'rhs', 'r', 'rightside'],
        ['view-back', 'back'],
        ['view-top', 'top'],
        ['sex-male', 'male', 'm', 'sexm'],
        ['sex-female', 'female', 'f'],
        ['sex-unknown', 'unknown', 'u'],
        ['part-tail', 'tail'],
        ['part-flank', 'side', 'flank'],
        ['part-head', 'head'],
        ['part-pectoral', 'pectoral', 'pec'],
        ['part-dorsal', 'dorsal', 'dorsals'],
        ['part-claspers', 'claspers', 'clasper'],
        ['part-fin', 'fin'],
        ['cropped', 'crop'],
        ['scar', 'scar2'],
        ['notch'],
        ['small'],
        ['bite'],
        ['cam-slr2', 'slr2'],
        #['cam-5m', '5m']
        ['5m'],
        ['7m'],
        ['4m'],
        ['copy'],
        ['qual-resize'],
        ['qual-stretched'],
    ]

    def apply_enum_regex(pat_list):
        enum_endings = [
            '[a-g]',
            '\\d*',
            'i*',
        ]
        expanded_pats = ut.flatten([
            [pat + end for end in enum_endings]
            for pat  in pat_list
        ])
        return expanded_pats

    def apply_regex_endings(pat_list):
        return [p + '$' for p in pat_list]

    tag_alias_map = {}
    for level_set in valid_tag_level_set:
        main_key = level_set[0]
        for key in level_set:
            tag_alias_map[key] = main_key

    inverse_alias_map = {}
    for level_set in valid_tag_level_set:
        inverse_alias_map[level_set[0]] = level_set

    regex_alias_map = {
        'view-left': apply_regex_endings(apply_enum_regex(inverse_alias_map['view-left'])),
        'view-right': apply_regex_endings(apply_enum_regex(inverse_alias_map['view-right'])),
    }

    valid_tags = list(inverse_alias_map.keys())

    invalid_tag_patterns = apply_regex_endings(invalid_tag_patterns)

    def parse_all_fname_tags(fname):
        _tags = [splitext(fname)[0]]
        _tags = ut.flatten([t.split('_') for t in _tags])
        _tags = ut.flatten([t.split('.') for t in _tags])
        _tags = [t.lower() for t in _tags]
        _tags = [tag_alias_map.get(t, t) for t in _tags]
        for key, vals in regex_alias_map.items():
            pat = ut.regex_or(vals)
            _tags = [key if re.match(pat, t) else t for t in _tags]
        pat = ut.regex_or(invalid_tag_patterns)
        _tags = [t for t in _tags if not re.match(pat, t)]
        _tags = ut.unique_ordered(_tags)
        return _tags

    all_img_tag_list = list(map(parse_all_fname_tags, orig_fname_list))

    known_img_tag_list = [list(set(tags).intersection(set(valid_tags)))
                          for tags in all_img_tag_list]

    if False:
        # Help figure out which tags are important
        _parsed_tags = ut.flatten(all_img_tag_list)

        taghist =  ut.dict_hist(_parsed_tags)
        taghist = {key: val for key, val in taghist.items() if val > 1}

        unknown_taghist = sorted([
            (val, key) for key, val in taghist.items()
            if key not in valid_tags
        ])[::-1]
        known_taghist = sorted([
            (val, key) for key, val in taghist.items()
            if key in valid_tags
        ])[::-1]

        print('Known')
        print(ut.list_str(known_taghist[0:100]))

        print('Unknown')
        print(ut.list_str(unknown_taghist[0:100]))

        print(ut.dict_str(
            ut.dict_hist(ut.flatten(known_img_tag_list)),
            key_order_metric='val'
        ))

    return known_img_tag_list
Esempio n. 51
0
    def fix_duplicates(drive):
        r"""
        for every duplicate file passing a (eg avi) filter, remove the file
        that is in the smallest directory. On a tie use the smallest dpath.
        This will filter all duplicate files in a folder into a single folder.

        but... need to look at non-duplicates in that folder and decide if they
        should be moved as well.  So, should trigger on folders that have at
        least 50% duplicate.  Might not want to move curated folders.

        Example:
            cd ~/local/scripts
            >>> from register_files import *  # NOQA
            >>> dpaths = ut.get_argval('--drives', type_=list, default=['E:/'])#'D:/', 'E:/', 'F:/'])
            >>> drives = [Drive(root_dpath) for root_dpath in dpaths]
            >>> E = drive = drives[0]
            >>> #D, E, F = drives
        """
        print('Fixing Duplicates in %r' % (drive,))
        list_ = drive.fpath_hashX_list
        multiindex_dict_ = build_multindex(list_)
        duplicate_hashes = [
            key for key, val in six.iteritems(multiindex_dict_)
            if len(val) > 1
        ]
        duplicate_idxs = ut.dict_take(multiindex_dict_, duplicate_hashes)
        unflat_fpaths = ut.list_unflat_take(drive.fpath_list, duplicate_idxs)
        # Check if any dups have been removed
        still_exists = ut.unflat_map(exists, unflat_fpaths)
        unflat_idxs2 = ut.zipcompress(duplicate_idxs, still_exists)
        duplicate_idxs = [idxs for idxs in unflat_idxs2 if len(idxs) > 1]
        # Look at duplicate files
        unflat_fpaths = ut.list_unflat_take(drive.fpath_list, duplicate_idxs)
        unflat_sizes = ut.list_unflat_take(drive.fpath_bytes_list, duplicate_idxs)
        # Find highly coupled directories
        if True:
            coupled_dirs = []
            for fpaths in unflat_fpaths:
                #basedir = ut.longest_existing_path(commonprefix(fpaths))
                dirs = sorted(list(map(dirname, fpaths)))
                _list = list(range(len(dirs)))
                idxs = ut.upper_diag_self_prodx(_list)
                coupled_dirs.extend(list(map(tuple, ut.list_unflat_take(dirs, idxs))))
            hist_ = ut.dict_hist(coupled_dirs)
            coupled_idxs = ut.list_argsort(hist_.values())[::-1]
            most_coupled = ut.take(list(hist_.keys()), coupled_idxs[0:100])
            print('Coupled fpaths: ' + ut.list_str(most_coupled, nl=True))
        print('%d unique files are duplicated' % (len(unflat_sizes),))
        #print('Duplicate sizes: ' + ut.list_str(unflat_sizes[0:10], nl=True))
        #print('Duplicate fpaths: ' + ut.list_str(unflat_fpaths[0:10], nl=True))
        #print('Duplicate fpaths: ' + ut.list_str(unflat_fpaths[0::5], nl=True))
        print('Duplicate fpaths: ' + ut.list_str(unflat_fpaths, nl=True))
        # Find duplicate directories
        dpath_list = list(drive.dpath_to_fidx.keys())
        fidxs_list = ut.dict_take(drive.dpath_to_fidx, drive.dpath_list)
        #exists_list = list(map(exists, drive.fpath_list))
        #unflat_exists = ut.list_unflat_take(exists_list, fidxs_list)
        fname_registry = [basename(fpath) for fpath in drive.fpath_list]
        unflat_fnames = ut.list_unflat_take(fname_registry, fidxs_list)
        def unsorted_list_hash(list_):
            return ut.hashstr27(str(sorted(list_)))
        unflat_fname_sets = list(map(unsorted_list_hash, ut.ProgIter(unflat_fnames, freq=10000)))
        fname_based_duplicate_dpaths = []
        multiindex_dict2_ = build_multindex(unflat_fname_sets)
        fname_based_duplicate_hashes = [key for key, val in multiindex_dict2_.items() if len(val) > 1]
        print('#fname_based_duplicate_dpaths = %r' % (len(fname_based_duplicate_hashes),))
        fname_based_duplicate_didxs = ut.dict_take(multiindex_dict2_, fname_based_duplicate_hashes)
        fname_based_duplicate_dpaths = ut.list_unflat_take(dpath_list, fname_based_duplicate_didxs)
        print(ut.repr3(fname_based_duplicate_dpaths[0:10]))
Esempio n. 52
0
def make_individual_latex_figures(ibs, fpaths_list, flat_case_labels,
                                  cfgx2_shortlbl, case_figdir,
                                  analysis_fpath_list):
    # HACK MAKE LATEX CONVINENCE STUFF
    #print('LATEX HACK')
    if len(fpaths_list) == 0:
        print('nothing to render')
        return
    RENDER = ut.get_argflag('--render')
    DUMP_FIGDEF = ut.get_argflag(('--figdump', '--dump-figdef', '--figdef'))

    if not (DUMP_FIGDEF or RENDER):  # HACK
        return

    latex_code_blocks = []
    latex_block_keys = []

    caption_prefix = ut.get_argval('--cappref', type_=str, default='')
    caption_suffix = ut.get_argval('--capsuf', type_=str, default='')
    cmdaug = ut.get_argval('--cmdaug', type_=str, default='custom')

    selected = None

    for case_idx, (fpaths,
                   labels) in enumerate(zip(fpaths_list, flat_case_labels)):
        if labels is None:
            labels = [cmdaug]
        if len(fpaths) < 4:
            nCols = len(fpaths)
        else:
            nCols = 2

        _cmdname = ibs.get_dbname() + ' Case ' + ' '.join(labels) + '_' + str(
            case_idx)
        #print('_cmdname = %r' % (_cmdname,))
        cmdname = ut.latex_sanitize_command_name(_cmdname)
        label_str = cmdname
        if len(caption_prefix) == 0:
            caption_str = ut.escape_latex(
                'Casetags: ' + ut.list_str(labels, nl=False, strvals=True) +
                ', db=' + ibs.get_dbname() + '. ')
        else:
            caption_str = ''

        use_sublbls = len(cfgx2_shortlbl) > 1
        if use_sublbls:
            caption_str += ut.escape_latex(
                'Each figure shows a different configuration: ')
            sublbls = [
                '(' + chr(97 + count) + ') '
                for count in range(len(cfgx2_shortlbl))
            ]
        else:
            #caption_str += ut.escape_latex('This figure depicts correct and
            #incorrect matches from configuration: ')
            sublbls = [''] * len(cfgx2_shortlbl)

        def wrap_tt(text):
            return r'{\tt ' + text + '}'

        _shortlbls = cfgx2_shortlbl
        _shortlbls = list(map(ut.escape_latex, _shortlbls))
        # Adjust spacing for breaks
        #tex_small_space = r''
        tex_small_space = r'\hspace{0pt}'
        # Remove query specific config flags in individual results
        _shortlbls = [
            re.sub('\\bq[^,]*,?', '', shortlbl) for shortlbl in _shortlbls
        ]
        # Let config strings be broken over newlines
        _shortlbls = [
            re.sub('\\+', tex_small_space + '+' + tex_small_space, shortlbl)
            for shortlbl in _shortlbls
        ]
        _shortlbls = [
            re.sub(', *', ',' + tex_small_space, shortlbl)
            for shortlbl in _shortlbls
        ]
        _shortlbls = list(map(wrap_tt, _shortlbls))
        cfgx2_texshortlbl = [
            '\n    ' + lbl + shortlbl
            for lbl, shortlbl in zip(sublbls, _shortlbls)
        ]

        caption_str += ut.conj_phrase(cfgx2_texshortlbl, 'and') + '.\n    '
        caption_str = '\n    ' + caption_prefix + caption_str + caption_suffix
        caption_str = caption_str.rstrip()
        figure_str = ut.get_latex_figure_str(fpaths,
                                             nCols=nCols,
                                             label_str=label_str,
                                             caption_str=caption_str,
                                             use_sublbls=None,
                                             use_frame=True)
        latex_block = ut.latex_newcommand(cmdname, figure_str)
        latex_block = '\n%----------\n' + latex_block
        latex_code_blocks.append(latex_block)
        latex_block_keys.append(cmdname)

    # HACK
    remove_fpath = ut.truepath('~/latex/crall-candidacy-2015') + '/'

    latex_fpath = join(case_figdir, 'latex_cases.tex')

    if selected is not None:
        selected_keys = selected
    else:
        selected_keys = latex_block_keys

    selected_blocks = ut.dict_take(
        dict(zip(latex_block_keys, latex_code_blocks)), selected_keys)

    figdef_block = '\n'.join(selected_blocks)
    figcmd_block = '\n'.join(['\\' + key for key in latex_block_keys])

    selected_block = figdef_block + '\n\n' + figcmd_block

    # HACK: need full paths to render
    selected_block_renderable = selected_block
    selected_block = selected_block.replace(remove_fpath, '')
    if RENDER:
        ut.render_latex_text(selected_block_renderable)

    if DUMP_FIGDEF:
        ut.writeto(latex_fpath, selected_block)

    #if NOT DUMP AND NOT RENDER:
    #    print('STANDARD LATEX RESULTS')
    #    cmdname = ibs.get_dbname() + 'Results'
    #    latex_block  = ut.get_latex_figure_str2(analysis_fpath_list, cmdname, nCols=1)
    #    ut.print_code(latex_block, 'latex')
    if DUMP_FIGDEF or RENDER:
        ut.print_code(selected_block, 'latex')
Esempio n. 53
0
    def fix_empty_dirs(drive):
        """
        # --- FIND EMPTY DIRECTORIES ---
        """
        print('Fixing Empty Dirs in %r' % (drive,))
        fidxs_list = ut.dict_take(drive.dpath_to_fidx, drive.dpath_list)
        isempty_flags = [len(fidxs) == 0 for fidxs in fidxs_list]
        empty_dpaths = ut.compress(drive.dpath_list, isempty_flags)

        def is_cplat_link(path_):
            try:
                if islink(path_):
                    return True
                os.listdir(d)
                return False
            except SystemErrors:
                return True
        valid_flags = [not is_cplat_link(d) for d  in empty_dpaths]
        if not all(valid_flags):
            print('Filtered windows links %r / %r' % (
                len(empty_dpaths) - sum(valid_flags), len(empty_dpaths)))
            #print(ut.list_str(empty_dpaths[0:10]))
            empty_dpaths = ut.compress(empty_dpaths, valid_flags)

        print('Found %r / %r empty_dpaths' % (len(empty_dpaths), len(drive.dpath_list)))
        print(ut.list_str(empty_dpaths[0:10]))

        # Ensure actually still empty
        current_contents = [ut.glob(d, with_dirs=False)
                            for d in ut.ProgIter(empty_dpaths, 'checking empty status')]
        current_lens = list(map(len, current_contents))
        assert not any(current_lens), 'some dirs are not empty'

        # n ** 2 check to get only the base directories
        isbase_dir = [
            not any([d.startswith(dpath_) and d != dpath_
                        for dpath_ in empty_dpaths])
            for d in ut.ProgIter(empty_dpaths, 'finding base dirs')
        ]
        base_empty_dirs = ut.compress(empty_dpaths, isbase_dir)
        def list_only_files(dpath):
            # glob is too slow
            for root, dirs, fpaths in os.walk(dpath):
                for fpath in fpaths:
                    yield fpath
        base_current_contents = [
            list(list_only_files(d))
            for d in ut.ProgIter(base_empty_dirs, 'checking emptyness', freq=10)]
        is_actually_empty = [len(fs) == 0 for fs in base_current_contents]
        not_really_empty = ut.compress(base_empty_dirs, ut.not_list(is_actually_empty))
        print('%d dirs are not actually empty' % (len(not_really_empty),))
        print('not_really_empty = %s' % (ut.list_str(not_really_empty[0:10]),))
        truly_empty_dirs = ut.compress(base_empty_dirs, is_actually_empty)

        def list_all(dpath):
            # glob is too slow
            for root, dirs, fpaths in os.walk(dpath):
                for dir_ in dirs:
                    yield dir_
                for fpath in fpaths:
                    yield fpath

        exclude_base_dirs = [join(drive.root_dpath, 'AppData')]
        exclude_end_dirs = ['__pycache__']
        truly_empty_dirs1 = truly_empty_dirs
        for ed in exclude_base_dirs:
            truly_empty_dirs1 = [
                d for d in truly_empty_dirs1
                if (
                    not any(d.startswith(ed) for ed in exclude_base_dirs) and
                    not any(d.endswith(ed) for ed in exclude_end_dirs)
                )
            ]
        # Ensure actually still empty (with recursive checks for hidden files)
        print('truly_empty_dirs1[::5] = %s' % (
            ut.list_str(truly_empty_dirs1[0::5], strvals=True),))
        #print('truly_empty_dirs1 = %s' % (ut.list_str(truly_empty_dirs1, strvals=True),))

        if not dryrun:
            # FIX PART
            #from os.path import normpath
            #for d in ut.ProgIter(truly_empty_dirs):
            #    break
            #    if ut.WIN32:
            #        # http://www.sevenforums.com/system-security/53095-file-folder-read-only-attribute-wont-disable.html
            #        ut.cmd('attrib', '-r', '-s', normpath(d), verbose=False)
            #x = ut.remove_fpaths(truly_empty_dirs, strict=False)

            print('Deleting %d truly_empty_dirs1' % (len(truly_empty_dirs1),))

            for d in ut.ProgIter(truly_empty_dirs1, 'DELETE empty dirs', freq=1000):  # NOQA
                ut.delete(d, quiet=True)

            if ut.WIN32 and False:
                # remove file that failed removing
                flags = list(map(exists, truly_empty_dirs1))
                truly_empty_dirs1 = ut.compress(truly_empty_dirs1, flags)
                for d in ut.ProgIter(truly_empty_dirs1, 'rming', freq=1000):
                    ut.cmd('rmdir', d)