Пример #1
0
def get_allkeys(dict_):
    import utool as ut
    if not isinstance(dict_, dict):
        return []
    subkeys = [[key] + get_allkeys(val)
               for key, val in dict_.items()]
    return ut.unique_ordered(ut.flatten(subkeys))
Пример #2
0
def add_to_win32_PATH(script_fpath, *add_path_list):
    r"""
    Writes a registery script to update the PATH variable into the sync registry

    CommandLine:
        python -m utool.util_win32 --test-add_to_win32_PATH --newpath "C:\Program Files (x86)\Graphviz2.38\bin"

    Example:
        >>> # SCRIPT
        >>> from utool.util_win32 import *  # NOQA
        >>> script_fpath = join(ut.truepath('~'), 'Sync/win7/registry', 'UPDATE_PATH.reg')
        >>> new_path = ut.get_argval('--newpath', str, default=None)
        >>> result = add_to_win32_PATH(script_fpath, new_path)
        >>> print(result)
    """
    import utool as ut
    write_dir = dirname(script_fpath)
    key = '[HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Control\Session Manager\Environment]'
    rtype = 'REG_EXPAND_SZ'
    # Read current PATH values
    win_pathlist = list(os.environ['PATH'].split(os.path.pathsep))
    new_path_list = ut.unique_ordered(win_pathlist + list(add_path_list))
    #new_path_list = unique_ordered(win_pathlist, rob_pathlist)
    print('\n'.join(new_path_list))
    pathtxt = pathsep.join(new_path_list)
    varval_list = [('Path', pathtxt)]
    regfile_str = make_regfile_str(key, varval_list, rtype)
    ut.view_directory(write_dir)
    print(regfile_str)
    ut.writeto(script_fpath, regfile_str, mode='wb')
    print('Please have an admin run the script. You may need to restart')
Пример #3
0
 def get_column_keys(metadata):
     unflat_colname_list = [[
         cols.keys() for cols in qaid2_cols.values()
     ] for qaid2_cols in six.itervalues(metadata.dictstore)]
     colname_list = ut.unique_ordered(
         ut.flatten(ut.flatten(unflat_colname_list)))
     return colname_list
Пример #4
0
def write_path(r):
    """
    Writes a script to update the PATH variable into the sync registry
    The PATH update mirrors the current RobSettings

    SeeAlso:
        utool.util_win32.add_to_win32_PATH
    """
    import utool
    write_dir = join(r.d.HOME, 'Sync/win7/registry')
    path_fpath = normpath(join(write_dir, 'UPDATE_PATH.reg'))
    key = '[HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Control\Session Manager\Environment]'
    rtype = 'REG_EXPAND_SZ'
    pathsep = os.path.pathsep
    # Read current PATH values
    win_pathlist = list(os.environ['PATH'].split(os.path.pathsep))
    rob_pathlist = list(map(normpath, r.path_vars_list))
    new_path_list = utool.unique_ordered(win_pathlist + rob_pathlist)
    #new_path_list = unique_ordered(win_pathlist, rob_pathlist)
    print('\n'.join(new_path_list))
    pathtxt = pathsep.join(new_path_list)
    varval_list = [('Path', pathtxt)]
    write_regfile(path_fpath, key, varval_list, rtype)

    rob_helpers.view_directory(write_dir)
Пример #5
0
def add_to_win32_PATH(script_fpath, *add_path_list):
    r"""
    Writes a registery script to update the PATH variable into the sync registry

    CommandLine:
        python -m utool.util_win32 --test-add_to_win32_PATH --newpath "C:\Program Files (x86)\Graphviz2.38\bin"

    Example:
        >>> # SCRIPT
        >>> from utool.util_win32 import *  # NOQA
        >>> script_fpath = join(ut.truepath('~'), 'Sync/win7/registry', 'UPDATE_PATH.reg')
        >>> new_path = ut.get_argval('--newpath', str, default=None)
        >>> result = add_to_win32_PATH(script_fpath, new_path)
        >>> print(result)
    """
    import utool as ut
    write_dir = dirname(script_fpath)
    key = '[HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Control\Session Manager\Environment]'
    rtype = 'REG_EXPAND_SZ'
    # Read current PATH values
    win_pathlist = list(os.environ['PATH'].split(os.path.pathsep))
    new_path_list = ut.unique_ordered(win_pathlist + list(add_path_list))
    #new_path_list = unique_ordered(win_pathlist, rob_pathlist)
    print('\n'.join(new_path_list))
    pathtxt = pathsep.join(new_path_list)
    varval_list = [('Path', pathtxt)]
    regfile_str = make_regfile_str(key, varval_list, rtype)
    ut.view_directory(write_dir)
    print(regfile_str)
    ut.writeto(script_fpath, regfile_str, mode='wb')
    print('Please have an admin run the script. You may need to restart')
Пример #6
0
 def get_column_keys(metadata):
     unflat_colname_list = [
         [cols.keys() for cols in qaid2_cols.values()]
         for qaid2_cols in six.itervalues(metadata.dictstore)
     ]
     colname_list = ut.unique_ordered(ut.flatten(ut.flatten(unflat_colname_list)))
     return colname_list
Пример #7
0
 def get_other_nids(self):
     ibs = self.ibs
     all_nid_list = ibs.get_annot_name_rowids(self.all_aid_list)
     unique_nid_list = ut.unique_ordered(all_nid_list)
     is_unknown = ibs.is_nid_unknown(unique_nid_list)
     is_name1 = [nid == self.nid1 for nid in unique_nid_list]
     is_name2 = [nid == self.nid2 for nid in unique_nid_list]
     is_other = ut.and_lists(*tuple(map(ut.not_list, (is_name1, is_name2, is_unknown))))
     other_nid_list = ut.compress(unique_nid_list, is_other)
     return other_nid_list
Пример #8
0
 def get_other_nids(self):
     ibs = self.ibs
     all_nid_list = ibs.get_annot_name_rowids(self.all_aid_list)
     unique_nid_list = ut.unique_ordered(all_nid_list)
     is_unknown = ibs.is_nid_unknown(unique_nid_list)
     is_name1 = [nid == self.nid1 for nid in unique_nid_list]
     is_name2 = [nid == self.nid2 for nid in unique_nid_list]
     is_other = ut.and_lists(
         *tuple(map(ut.not_list, (is_name1, is_name2, is_unknown))))
     other_nid_list = ut.compress(unique_nid_list, is_other)
     return other_nid_list
Пример #9
0
 def parse_all_fname_tags(fname):
     _tags = [splitext(fname)[0]]
     _tags = ut.flatten([t.split('_') for t in _tags])
     _tags = ut.flatten([t.split('.') for t in _tags])
     _tags = [t.lower() for t in _tags]
     _tags = [tag_alias_map.get(t, t) for t in _tags]
     for key, vals in regex_alias_map.items():
         pat = ut.regex_or(vals)
         _tags = [key if re.match(pat, t) else t for t in _tags]
     pat = ut.regex_or(invalid_tag_patterns)
     _tags = [t for t in _tags if not re.match(pat, t)]
     _tags = ut.unique_ordered(_tags)
     return _tags
Пример #10
0
 def parse_all_fname_tags(fname):
     _tags = [splitext(fname)[0]]
     _tags = ut.flatten([t.split('_') for t in _tags])
     _tags = ut.flatten([t.split('.') for t in _tags])
     _tags = [t.lower() for t in _tags]
     _tags = [tag_alias_map.get(t, t) for t in _tags]
     for key, vals in regex_alias_map.items():
         pat = ut.regex_or(vals)
         _tags = [key if re.match(pat, t) else t for t in _tags]
     pat = ut.regex_or(invalid_tag_patterns)
     _tags = [t for t in _tags if not re.match(pat, t)]
     _tags = ut.unique_ordered(_tags)
     return _tags
Пример #11
0
def get_system_python_library():
    """
    FIXME; hacky way of finding python library. Not cross platform yet.
    """
    import os
    import utool as ut
    from os.path import basename, realpath
    pyname = basename(realpath(sys.executable))
    ld_library_path = os.environ['LD_LIBRARY_PATH']
    libdirs = [x for x in ld_library_path.split(os.pathsep) if x] + ['/usr/lib']
    libfiles = ut.flatten([ut.glob(d, '*' + ut.get_lib_ext(), recursive=True) for d in libdirs])
    python_libs = [realpath(f) for f in libfiles if 'lib' + pyname in basename(f)]
    python_libs = ut.unique_ordered(python_libs)
    assert len(python_libs) == 1, str(python_libs)
    return python_libs[0]
Пример #12
0
 def get_square_data(metadata, cfgstr=None):
     # can only support one config at a time right now
     if cfgstr is None:
         cfgstr = metadata.get_cfgstr_list()[0]
     qaid2_cols = metadata.dictstore[cfgstr]
     qaids = list(qaid2_cols.keys())
     col_name_list = ut.unique_ordered(ut.flatten([cols.keys() for cols in qaid2_cols.values()]))
     #col_name_list = ['qx2_scoreexpdiff', 'qx2_gt_aid']
     #colname2_colvals = [None for colname in col_name_list]
     column_list = [
         [colvals.get(colname, None) for qaid, colvals in six.iteritems(qaid2_cols)]
         for colname in col_name_list]
     col_name_list = ['qaids'] + col_name_list
     column_list = [qaids] + column_list
     print('depth_profile(column_list) = %r' % (ut.depth_profile(column_list),))
     return col_name_list, column_list
Пример #13
0
 def get_square_data(metadata, cfgstr=None):
     # can only support one config at a time right now
     if cfgstr is None:
         cfgstr = metadata.get_cfgstr_list()[0]
     qaid2_cols = metadata.dictstore[cfgstr]
     qaids = list(qaid2_cols.keys())
     col_name_list = ut.unique_ordered(ut.flatten([cols.keys() for cols in qaid2_cols.values()]))
     #col_name_list = ['qx2_scoreexpdiff', 'qx2_gt_aid']
     #colname2_colvals = [None for colname in col_name_list]
     column_list = [
         [colvals.get(colname, None) for qaid, colvals in six.iteritems(qaid2_cols)]
         for colname in col_name_list]
     col_name_list = ['qaids'] + col_name_list
     column_list = [qaids] + column_list
     print('depth_profile(column_list) = %r' % (ut.depth_profile(column_list),))
     return col_name_list, column_list
Пример #14
0
    def find_module_callers():
        """
        TODO:
        attempt to build a call graph between module functions to make it easy to see
        what can be removed and what cannot.
        """
        import utool as ut
        from os.path import normpath
        mod_fpath = ut.truepath('~/code/ibeis/ibeis/expt/results_analyzer.py')
        mod_fpath = ut.truepath('~/code/ibeis/ibeis/expt/results_all.py')
        mod_fpath = ut.truepath('~/code/ibeis/ibeis/expt/results_organizer.py')
        module = ut.import_module_from_fpath(mod_fpath)
        user_profile = ut.ensure_user_profile()
        doctestables = list(
            ut.iter_module_doctestable(module, include_builtin=False))
        grepkw = {}
        grepkw['exclude_dirs'] = user_profile.project_exclude_dirs
        grepkw['dpath_list'] = user_profile.project_dpaths
        grepkw['verbose'] = True

        usage_map = {}
        for funcname, func in doctestables:
            print('Searching for funcname = %r' % (funcname, ))
            found_fpath_list, found_lines_list, found_lxs_list = ut.grep(
                [funcname], **grepkw)
            used_in = (found_fpath_list, found_lines_list, found_lxs_list)
            usage_map[funcname] = used_in

        external_usage_map = {}
        for funcname, used_in in usage_map.items():
            (found_fpath_list, found_lines_list, found_lxs_list) = used_in
            isexternal_flag = [
                normpath(fpath) != normpath(mod_fpath)
                for fpath in found_fpath_list
            ]
            ext_used_in = (ut.compress(found_fpath_list, isexternal_flag),
                           ut.compress(found_lines_list, isexternal_flag),
                           ut.compress(found_lxs_list, isexternal_flag))
            external_usage_map[funcname] = ext_used_in

        for funcname, used_in in external_usage_map.items():
            (found_fpath_list, found_lines_list, found_lxs_list) = used_in

        print('Calling modules: \n' + ut.repr2(ut.unique_ordered(
            ut.flatten([used_in[0]
                        for used_in in external_usage_map.values()])),
                                               nl=True))
Пример #15
0
    def infer_data(self):
        """ Initialize data related to the input aids
        """
        ibs = self.ibs
        # The two matching aids
        self.aid_pair = (self.aid1, self.aid2)
        (aid1, aid2) = self.aid_pair
        self.match_text = ibs.get_match_text(self.aid1, self.aid2)
        # The names of the matching annotations
        self.nid1, self.nid2 = ibs.get_annot_name_rowids((aid1, aid2))
        self.name1, self.name2 = ibs.get_annot_names((aid1, aid2))
        self.other_valid_nids = []
        # The other annotations that belong to these two names
        self.gts_list = ibs.get_annot_groundtruth((aid1, aid2))
        self.gt1, self.gt2 = self.gts_list
        # A flat list of all the aids we are looking at
        self.is_split_case = self.nid1 == self.nid2
        self.all_aid_list = ut.unique_ordered([aid1, aid2] + self.gt1 +
                                              self.gt2)
        self.all_nid_list_orig = ibs.get_annot_name_rowids(self.all_aid_list)
        self.other_aids = list(
            set(self.all_aid_list) - set([self.aid1, self.aid2]))

        if self.is_split_case:
            # Split case
            self.nCols = max(2, len(self.other_aids))
            self.nRows = 2 if len(self.other_aids) > 0 else 1
        else:
            # Merge/New Match case
            self.nCols = max(len(self.gt1) + 1, len(self.gt2) + 1)
            self.nRows = 2
        self.nCols = min(self.max_cols, self.nCols)

        # Grab not just the exemplars

        if ut.VERBOSE or ut.is_developer():
            logger.info('[matchver] __init__ nid1=%r, nid2=%r ' %
                        (self.nid1, self.nid2))
            logger.info('[matchver] __init__ self.gts_list=%r ' %
                        (self.gts_list))

        if ut.VERBOSE or ut.is_developer():
            logger.info('[matchver] __init__ nid1=%r, nid2=%r ' %
                        (self.nid1, self.nid2))
            logger.info('[matchver] __init__ self.gts_list=%r ' %
                        (self.gts_list))
Пример #16
0
def get_system_python_library():
    """
    FIXME; hacky way of finding python library. Not cross platform yet.
    """
    import os
    import utool as ut
    from os.path import basename, realpath
    pyname = basename(realpath(sys.executable))
    ld_library_path = os.environ['LD_LIBRARY_PATH']
    libdirs = [x
               for x in ld_library_path.split(os.pathsep) if x] + ['/usr/lib']
    libfiles = ut.flatten(
        [ut.glob(d, '*' + ut.get_lib_ext(), recursive=True) for d in libdirs])
    python_libs = [
        realpath(f) for f in libfiles if 'lib' + pyname in basename(f)
    ]
    python_libs = ut.unique_ordered(python_libs)
    assert len(python_libs) == 1, str(python_libs)
    return python_libs[0]
Пример #17
0
    def find_module_callers():
        """
        TODO:
        attempt to build a call graph between module functions to make it easy to see
        what can be removed and what cannot.
        """
        import utool as ut
        from os.path import normpath
        mod_fpath = ut.truepath('~/code/ibeis/ibeis/expt/results_analyzer.py')
        mod_fpath = ut.truepath('~/code/ibeis/ibeis/expt/results_all.py')
        mod_fpath = ut.truepath('~/code/ibeis/ibeis/expt/results_organizer.py')
        module = ut.import_module_from_fpath(mod_fpath)
        user_profile = ut.ensure_user_profile()
        doctestables = list(ut.iter_module_doctestable(module, include_builtin=False))
        grepkw = {}
        grepkw['exclude_dirs'] = user_profile.project_exclude_dirs
        grepkw['dpath_list'] = user_profile.project_dpaths
        grepkw['verbose'] = True

        usage_map = {}
        for funcname, func in doctestables:
            print('Searching for funcname = %r' % (funcname,))
            found_fpath_list, found_lines_list, found_lxs_list = ut.grep([funcname], **grepkw)
            used_in = (found_fpath_list, found_lines_list, found_lxs_list)
            usage_map[funcname] = used_in

        external_usage_map = {}
        for funcname, used_in in usage_map.items():
            (found_fpath_list, found_lines_list, found_lxs_list) = used_in
            isexternal_flag = [normpath(fpath) != normpath(mod_fpath) for fpath in found_fpath_list]
            ext_used_in = (ut.compress(found_fpath_list, isexternal_flag),
                           ut.compress(found_lines_list, isexternal_flag),
                           ut.compress(found_lxs_list, isexternal_flag))
            external_usage_map[funcname] = ext_used_in

        for funcname, used_in in external_usage_map.items():
            (found_fpath_list, found_lines_list, found_lxs_list) = used_in

        print('Calling modules: \n' +
              ut.repr2(ut.unique_ordered(ut.flatten([used_in[0] for used_in in  external_usage_map.values()])), nl=True))
Пример #18
0
    def infer_data(self):
        """ Initialize data related to the input aids
        """
        ibs = self.ibs
        # The two matching aids
        self.aid_pair = (self.aid1, self.aid2)
        (aid1, aid2) = self.aid_pair
        self.match_text = ibs.get_match_text(self.aid1, self.aid2)
        # The names of the matching annotations
        self.nid1, self.nid2 = ibs.get_annot_name_rowids((aid1, aid2))
        self.name1, self.name2 = ibs.get_annot_names((aid1, aid2))
        self.other_valid_nids = []
        # The other annotations that belong to these two names
        self.gts_list  = ibs.get_annot_groundtruth((aid1, aid2))
        self.gt1, self.gt2 = self.gts_list
        # A flat list of all the aids we are looking at
        self.is_split_case = self.nid1 == self.nid2
        self.all_aid_list = ut.unique_ordered([aid1, aid2] + self.gt1 + self.gt2)
        self.all_nid_list_orig = ibs.get_annot_name_rowids(self.all_aid_list)
        self.other_aids = list(set(self.all_aid_list) - set([self.aid1, self.aid2]))

        if self.is_split_case:
            # Split case
            self.nCols = max(2, len(self.other_aids))
            self.nRows = 2 if len(self.other_aids) > 0 else 1
        else:
            # Merge/New Match case
            self.nCols = max(len(self.gt1) + 1, len(self.gt2) + 1)
            self.nRows = 2
        self.nCols = min(self.max_cols, self.nCols)

        # Grab not just the exemplars

        if ut.VERBOSE or ut.is_developer():
            print('[matchver] __init__ nid1=%r, nid2=%r ' % (self.nid1, self.nid2))
            print('[matchver] __init__ self.gts_list=%r ' % (self.gts_list))

        if ut.VERBOSE or ut.is_developer():
            print('[matchver] __init__ nid1=%r, nid2=%r ' % (self.nid1, self.nid2))
            print('[matchver] __init__ self.gts_list=%r ' % (self.gts_list))
Пример #19
0
def download_sharks(XMLdata, number):
    """
    cd ~/work/WS_ALL
    python -m ibeis.scripts.getshark

    >>> from ibeis.scripts.getshark import *  # NOQA
    >>> url = 'www.whaleshark.org/listImages.jsp'
    >>> XMLdata = ut.url_read(url)
    >>> number = None
    """
    # Prepare the output directory for writing, if it doesn't exist
    output_dir = 'sharkimages'
    ut.ensuredir(output_dir)

    dom = parseString(XMLdata)

    # Download files
    if number:
        maxCount = min(number, len(dom.getElementsByTagName('img')))
    else:
        maxCount = len(dom.getElementsByTagName('img'))

    parsed_info = dict(
        img_url_list=[],
        localid_list=[],
        nameid_list=[],
        orig_fname_list=[],
        new_fname_list=[],
    )

    print('Preparing to fetch %i files...' % maxCount)

    for shark in dom.getElementsByTagName('shark'):
        localCount = 0
        for imageset in shark.getElementsByTagName('imageset'):
            for img in imageset.getElementsByTagName('img'):
                localCount += 1

                img_url = img.getAttribute('href')
                orig_fname = split(img_url)[1]
                ext = splitext(orig_fname)[1].lower()
                nameid = shark.getAttribute('number')

                new_fname = '%s-%i%s' % (
                    nameid, localCount, ext)

                parsed_info['img_url_list'].append(img_url)
                parsed_info['nameid_list'].append(nameid)
                parsed_info['localid_list'].append(localCount)
                parsed_info['orig_fname_list'].append(orig_fname)
                parsed_info['new_fname_list'].append(new_fname)

                print('Parsed %i / %i files.' % (len(parsed_info['orig_fname_list']), maxCount))

                if number is not None and len(parsed_info['orig_fname_list']) == number:
                    break
    parsed_info['new_fpath_list'] = [join(output_dir, _fname)
                                     for _fname in parsed_info['new_fname_list']]

    print('Filtering parsed images')

    # Filter based on image type (keep only jpgs)
    ext_flags = [_fname.endswith('.jpg') or _fname.endswith('.jpg')
                  for _fname in parsed_info['new_fname_list']]
    parsed_info = {key: ut.compress(list_, ext_flags) for key, list_ in parsed_info.items()}

    # Filter to only images matching the appropriate tags
    from ibeis import tag_funcs
    parsed_info['tags_list'] = parse_shark_tags(parsed_info['orig_fname_list'])
    tag_flags = tag_funcs.filterflags_general_tags(
        parsed_info['tags_list'],
        has_any=['view-left'],
        none_match=['qual.*', 'view-top', 'part-.*', 'cropped'],
    )
    parsed_info = {key: ut.compress(list_, tag_flags) for key, list_ in parsed_info.items()}
    print('Tags in chosen images:')
    print(ut.dict_hist(ut.flatten(parsed_info['tags_list'] )))

    # Download selected subset
    print('Downloading selected subset')
    _iter = list(zip(parsed_info['img_url_list'],
                     parsed_info['new_fpath_list']))
    _iter = ut.ProgressIter(_iter, lbl='downloading sharks')
    for img_url, new_fpath in _iter:
        if not exists(new_fpath):
            ut.download_url(img_url, new_fpath)

    # Remove corrupted or ill-formatted images
    print('Checking for corrupted images')
    import vtool as vt
    noncorrupt_flags = vt.filterflags_valid_images(parsed_info['new_fpath_list'])
    parsed_info = {
        key: ut.compress(list_, noncorrupt_flags)
        for key, list_ in parsed_info.items()
    }

    print('Removing small images')
    import numpy as np
    imgsize_list = np.array([vt.open_image_size(gpath) for gpath in parsed_info['new_fpath_list']])
    sqrt_area_list = np.sqrt(np.prod(imgsize_list, axis=1))
    areq_flags_list = sqrt_area_list >= 750
    parsed_info = {key: ut.compress(list_, areq_flags_list)
                   for key, list_ in parsed_info.items()}

    grouped_idxs = ut.group_items(list(range(len(parsed_info['nameid_list']))),
                                  parsed_info['nameid_list'])
    keep_idxs = sorted(ut.flatten([idxs for key, idxs in grouped_idxs.items() if len(idxs) >= 2]))
    parsed_info = {key: ut.take(list_, keep_idxs) for key, list_ in parsed_info.items()}

    print('Moving imagse to secondary directory')
    named_outputdir = 'named-left-sharkimages'
    # Build names
    parsed_info['namedir_fpath_list'] = [
        join(named_outputdir, _nameid, _fname)
        for _fname, _nameid in zip(parsed_info['new_fname_list'],
                                   parsed_info['nameid_list'])]
    # Create directories
    ut.ensuredir(named_outputdir)
    named_dirs = ut.unique_ordered(list(map(dirname, parsed_info['namedir_fpath_list'])))
    for dir_ in named_dirs:
        ut.ensuredir(dir_)
    # Copy
    ut.copy_files_to(src_fpath_list=parsed_info['new_fpath_list'],
                     dst_fpath_list=parsed_info['namedir_fpath_list'])
Пример #20
0
    def show_page(self, bring_to_front=False, onlyrows=None, fulldraw=True):
        """ Plots all subaxes on a page

        onlyrows is a hack to only draw a subset of the data again
        """
        if ut.VERBOSE:
            if not fulldraw:
                print('[matchver] show_page(fulldraw=%r, onlyrows=%r)' % (fulldraw, onlyrows))
            else:
                print('[matchver] show_page(fulldraw=%r)' % (fulldraw))
        self.prepare_page(fulldraw=fulldraw)
        # Variables we will work with to paint a pretty picture
        ibs = self.ibs
        nRows = self.nRows
        colpad = 1 if  self.cm is not None else 0
        nCols = self.nCols + colpad

        # Distinct color for every unique name
        unique_nids = ut.unique_ordered(ibs.get_annot_name_rowids(self.all_aid_list, distinguish_unknowns=False))
        unique_colors = pt.distinct_colors(len(unique_nids), brightness=.7, hue_range=(.05, .95))
        self.nid2_color = dict(zip(unique_nids, unique_colors))

        row_aids_list = self.get_row_aids_list()

        if self.cm is not None:
            print("DRAWING QRES")
            pnum = (1, nCols, 1)
            if not fulldraw:
                # not doing full draw so we have to clear any axes
                # that are here already manually
                ax = self.fig.add_subplot(*pnum)
                self.clear_parent_axes(ax)
            self.cm.show_single_annotmatch(self.qreq_, self.aid2, fnum=self.fnum, pnum=pnum, draw_fmatch=True, colorbar_=False)

        # For each row
        for rowx, aid_list in enumerate(row_aids_list):
            offset = rowx * nCols + 1
            if onlyrows is not None and rowx not in onlyrows:
                continue
            #ibsfuncs.assert_valid_aids(ibs, groundtruth)
            # For each column
            for colx, aid in enumerate(aid_list, start=colpad):
                if colx >= nCols:
                    break
                try:
                    nid = ibs.get_annot_name_rowids(aid)
                    if ibsfuncs.is_nid_unknown(ibs, [nid])[0]:
                        color = const.UNKNOWN_PURPLE_RGBA01
                    else:
                        color = self.nid2_color[nid]
                except Exception as ex:
                    ut.printex(ex)
                    print('nid = %r' % (nid,))
                    print('self.nid2_color = %s' % (ut.dict_str(self.nid2_color),))
                    raise
                px = colx + offset
                ax = self.plot_chip(int(aid), nRows, nCols, px, color=color, fulldraw=fulldraw)
                # If there are still more in this row to display
                if colx + 1 < len(aid_list) and colx + 1 >= nCols:
                    total_indices = len(aid_list)
                    current_index = self.col_offset_list[rowx] + 1
                    next_text = 'next\n%d/%d' % (current_index, total_indices)
                    next_func = functools.partial(self.rotate_row, rowx=rowx)
                    self.append_button(next_text, callback=next_func,
                                       location='right', size='33%', ax=ax)

        if fulldraw:
            self.show_hud()
            #pt.adjust_subplots_safe(top=0.85, hspace=0.03)
            hspace = .05 if (self.nCols) > 1 else .1
            pt.adjust_subplots_safe(top=0.85, hspace=hspace)
        self.draw()
        self.show()
        if bring_to_front:
            self.bring_to_front()
Пример #21
0
def get_test_qaids(ibs, default_qaids=None, return_annot_info=False, aidcfg=None):
    """
    Gets test annot_rowids based on command line arguments

    DEPRICATE

    Args:
        ibs (IBEISController):  ibeis controller object
        default_qaids (None): if list then used only if no other aids are available (default = [1])
           as a string it mimics the command line

    Returns:
        list: available_qaids

    CommandLine:
        python -m ibeis.init.main_helpers --test-get_test_qaids
        python -m ibeis.init.main_helpers --test-get_test_qaids --controlled --db PZ_Master0
        python -m ibeis.init.main_helpers --test-get_test_qaids --controlled --db PZ_Master0 --qaid 1
        python -m ibeis.init.main_helpers --test-get_test_qaids --allgt --db PZ_MTEST
        python -m ibeis.init.main_helpers --test-get_test_qaids --qaid 4 5 8  --verbmhelp
        python -m ibeis.init.main_helpers --test-get_test_qaids --controlled --db PZ_MTEST
        python -m ibeis.init.main_helpers --test-get_test_qaids --controlled --db PZ_MTEST --qaid 2 --verbmhelp
        python -m ibeis.init.main_helpers --test-get_test_qaids --controlled --db PZ_MTEST --qaid 2
        python -m ibeis.init.main_helpers --test-get_test_qaids --controlled --db PZ_Master0 --qindex 0:10 --verbmhelp
        python -m ibeis.init.main_helpers --exec-get_test_qaids --controlled --db PZ_Master0 --exec-mode
        python -m ibeis.init.main_helpers --exec-get_test_qaids --db testdb1 --allgt --qindex 0:256

    Example:
        >>> # ENABLE_DOCTEST
        >>> from ibeis.init.main_helpers import *  # NOQA
        >>> import ibeis
        >>> ibs = ibeis.opendb(defaultdb='testdb1')
        >>> default_qaids = None
        >>> available_qaids = get_test_qaids(ibs, default_qaids)
        >>> ibeis.other.dbinfo.get_dbinfo(ibs, aid_list=available_qaids, with_contrib=False, short=True)
        >>> result = 'available_qaids = ' + ut.obj_str(available_qaids, truncate=True, nl=False)
        >>> print('len(available_qaids) = %d' % len(available_qaids))
        >>> print(result)
        available_qaids = [1]
    """
    qaid_request_info = {}
    if VERB_MAIN_HELPERS:
        print("[get_test_qaids] + --- GET_TEST_QAIDS ---")

    # Old version of this function
    if VERB_MAIN_HELPERS:
        print("[get_test_qaids] + --- GET_TEST_QAIDS ---")
        print("[get_test_qaids] * default_qaids = %s" % (ut.obj_str(default_qaids, truncate=True, nl=False)))

    valid_aids = ibs.get_valid_aids()

    if len(valid_aids) == 0:
        print("[get_test_qaids] WARNING no annotations available")

    # ---- INCLUDING STEP
    if VERB_MAIN_HELPERS:
        print("[get_test_qaids] * include step")

    available_qaids = []

    # ut.get_argflag(('--all-cases', '--all'))
    # ut.get_argflag(('--all-gt-cases', '--allgt'))
    # ut.get_argflag(('--all-hard-cases', '--allhard'))
    # ut.get_argflag(('--qaid', '--qaids'))
    # ut.get_argflag('--controlled') or ut.get_argflag('--controlled_qaids')
    # not ut.get_argflag('--junk')

    ALL_CASES = params.args.all_cases or default_qaids == "all"
    GT_CASES = params.args.all_gt_cases or default_qaids == "gt"
    HARD_CASES = params.args.all_hard_cases or ut.get_argflag(("--all-hard-cases", "--allhard", "--hard"))
    NO_JUNK = not ut.get_argflag("--junk")
    CONTROLLED_CASES = ut.get_argflag("--controlled") or ut.get_argflag("--controlled_qaids")
    NO_REVIEWED = ut.get_argflag("--unreviewed")
    species = ut.get_argval("--species")
    # QAID = params.args.qaid
    QAID = ut.get_argval("--qaid", type_="fuzzy_subset", default=None)
    QINDEX = params.args.qindex
    QSHUFFLE = ut.get_argval("--qshuffle")

    if QAID is not None:
        if VERB_MAIN_HELPERS:
            print("[get_test_qaids] * Including cmdline specified qaids")
        try:
            args_qaid = ensure_flatlistlike(QAID)
        except Exception:
            args_qaid = QAID
        available_qaids.extend(args_qaid)
        qaid_request_info["custom_commandline"] = args_qaid

    if ALL_CASES:
        if VERB_MAIN_HELPERS:
            print("[get_test_qaids] * Including all qaids")
        available_qaids.extend(valid_aids)
        qaid_request_info["all_cases"] = True

    if HARD_CASES:
        if VERB_MAIN_HELPERS:
            print("[get_test_qaids] * Including hard qaids")
        is_hard_list = ibs.get_annot_is_hard(valid_aids)
        hard_aids = ut.compress(valid_aids, is_hard_list)
        available_qaids.extend(hard_aids)
        qaid_request_info["hard_cases"] = True

    if GT_CASES:
        if VERB_MAIN_HELPERS:
            print("[get_test_qaids] * Including groundtruth qaids")
        has_gt_list = ibs.get_annot_has_groundtruth(valid_aids)
        hasgt_aids = ut.compress(valid_aids, has_gt_list)
        print("[get_test_qaids] Adding all %d/%d ground-truthed test cases" % (len(hasgt_aids), len(valid_aids)))
        available_qaids.extend(hasgt_aids)
        qaid_request_info["gt_cases"] = True

    if CONTROLLED_CASES:
        if VERB_MAIN_HELPERS:
            print("[get_test_qaids] * Including controlled qaids")
        from ibeis.other import ibsfuncs

        # Override all other gts with controlled
        controlled_qaids = ibsfuncs.get_two_annots_per_name_and_singletons(ibs, onlygt=True)
        available_qaids.extend(controlled_qaids)
        qaid_request_info["controlled"] = True
    else:
        qaid_request_info["controlled"] = False

    # ---- CHECK_DEFAULTS QUERY
    if VERB_MAIN_HELPERS:
        print("[get_test_qaids] * len(available_qaids) = %r" % (len(available_qaids)))

    if len(available_qaids) == 0:
        print("[get_test_qaids] * ... defaulting, no available qaids on command line.")
        if default_qaids is None:
            default_qaids = valid_aids[0:1]
            qaid_request_info["default_one"] = True
        elif isinstance(default_qaids, six.string_types):
            if default_qaids == "gt" or default_qaids == "allgt":
                default_qaids = ibs.get_valid_aids(hasgt=True)
                qaid_request_info["default_gt"] = True
        available_qaids = default_qaids
    else:
        if VERB_MAIN_HELPERS:
            print("[get_test_qaids] * ... not defaulting")

    available_qaids = ut.unique_ordered(available_qaids)

    # ---- EXCLUSION STEP
    if VERB_MAIN_HELPERS:
        print("[get_test_qaids] * len(available_qaids) = %r" % (len(available_qaids)))
        print("[get_test_qaids] * exclude step")

    if NO_JUNK:
        if VERB_MAIN_HELPERS:
            print("[get_test_qaids] * Filtering junk")
        available_qaids = ibs.filter_junk_annotations(available_qaids)
        qaid_request_info["has_junk"] = False

    if NO_REVIEWED:
        if VERB_MAIN_HELPERS:
            print("[get_test_qaids] * Filtering unreviewed")
        isreviewed_list = ibs.get_annot_has_reviewed_matching_aids(available_qaids)
        available_qaids = ut.filterfalse_items(available_qaids, isreviewed_list)
        qaid_request_info["has_unreviewed"] = False

    if species is not None:
        if species == "primary":
            if VERB_MAIN_HELPERS:
                print("[get_test_qaids] * Finiding primary species")
            # species = ibs.get_primary_database_species(available_qaids)
            species = ibs.get_primary_database_species()
            qaid_request_info["primary_species"] = True

        if VERB_MAIN_HELPERS:
            print("[get_test_qaids] * Filtering to species=%r" % (species,))
        isvalid_list = np.array(ibs.get_annot_species(available_qaids)) == species
        available_qaids = ut.compress(available_qaids, isvalid_list)
        qaid_request_info["species_filter"] = species

    if VERB_MAIN_HELPERS:
        print("[get_test_qaids] * len(available_qaids) = %r" % (len(available_qaids)))
        print("[get_test_qaids] * subindex step")

    # ---- INDEX SUBSET

    # ut.get_argval('--qshuffle')
    if QSHUFFLE:
        # Determenistic shuffling
        available_qaids = ut.take(available_qaids, ut.random_indexes(len(available_qaids), seed=42))
        qaid_request_info["shuffled"] = True

    # Sample a large pool of chosen query qindexes
    if QINDEX is not None:
        # FIXME: should use a slice of the list or a sublist
        qindexes = ensure_flatlistlike(QINDEX)
        _test_qaids = [available_qaids[qx] for qx in qindexes if qx < len(available_qaids)]
        print("[get_test_qaids] Chose subset of size %d/%d" % (len(_test_qaids), len(available_qaids)))
        available_qaids = _test_qaids
        qaid_request_info["subset"] = qindexes

    if VERB_MAIN_HELPERS:
        print("[get_test_qaids] * len(available_qaids) = %r" % (len(available_qaids)))
        print("[get_test_qaids] L ___ GET_TEST_QAIDS ___")
    if return_annot_info:
        return available_qaids, qaid_request_info
    else:
        return available_qaids
Пример #22
0
def get_test_daids(ibs, default_daids='all', qaid_list=None, return_annot_info=False, aidcfg=None):
    """ Gets database annot_rowids based on command line arguments

    DEPRICATE

    CommandLine:
        python dev.py --db PZ_MTEST -t best --exclude-query --qaid 72 -r 0 -c 0 --show --va --vf --dump-extra

    Args:
        ibs (IBEISController):  ibeis controller object
        default_daids (str): (default = 'all')
        qaid_list (list): list of chosen qaids that may affect daids (default = None)

    Returns:
        list: available_daids

    CommandLine:
        python -m ibeis.init.main_helpers --test-get_test_daids
        python -m ibeis.init.main_helpers --test-get_test_daids --db PZ_MTEST  --verbmhelp
        python -m ibeis.init.main_helpers --test-get_test_daids --db PZ_MTEST --exclude-query
        python -m ibeis.init.main_helpers --test-get_test_daids --db PZ_MTEST --daid-exclude 2 3 4
        python -m ibeis.init.main_helpers --test-get_test_daids --db PZ_MTEST --species=zebra_grevys
        python -m ibeis.init.main_helpers --test-get_test_daids --db PZ_Master0 --species=zebra_grevys
        python -m ibeis.init.main_helpers --test-get_test_daids --db PZ_Master0 --controlled --verbmhelp
        python -m ibeis.init.main_helpers --exec-get_test_daids --controlled --db PZ_Master0 --exec-mode

    Example:
        >>> # ENABLE_DOCTEST
        >>> from ibeis.init.main_helpers import *  # NOQA
        >>> import ibeis
        >>> ibs = ibeis.opendb(defaultdb='testdb1')
        >>> default_daids = 'all'
        >>> qaid_list = [1]
        >>> available_daids = get_test_daids(ibs, default_daids, qaid_list)
        >>> ibeis.other.dbinfo.get_dbinfo(ibs, aid_list=available_daids, with_contrib=False, short=True)
        >>> result = 'available_daids = ' + ut.obj_str(available_daids, truncate=True, nl=False)
        >>> print('len(available_daids) %d' % len(available_daids))
        >>> print(result)
        available_daids = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
    """
    daid_request_info = {}

    if VERB_MAIN_HELPERS:
        print('[get_test_daids] + --- GET_TEST_DAIDS ---')
        print('[get_test_daids] * default_daids = %s' % (ut.obj_str(default_daids, truncate=True, nl=False)))
        print('[get_test_daids] * qaid_list = %s' % (ut.obj_str(qaid_list, truncate=True, nl=False)))

    # ---- INCLUDING STEP
    if VERB_MAIN_HELPERS:
        print('[get_test_daids] * include step')

    available_daids = []

    CONTROLLED_CASES = ut.get_argflag('--controlled') or ut.get_argflag('--controlled_daids')
    DSHUFFLE = ut.get_argval('--dshuffle')
    DINDEX = params.args.dindex
    NO_JUNK = not ut.get_argflag('--junk')
    EXCLUDE_QUERY = ut.get_argflag('--exclude-query')
    #daids_exclude = params.args.daid_exclude
    daids_exclude = None

    if CONTROLLED_CASES:
        print('[get_test_daids] * Including controlled daids')
        from ibeis.other import ibsfuncs
        controlled_daids = ibsfuncs.get_two_annots_per_name_and_singletons(ibs, onlygt=False)
        available_daids.extend(controlled_daids)
        daid_request_info['controlled'] = True
    else:
        daid_request_info['controlled'] = False

    # ---- CHECK_DEFAULTS DATA
    if VERB_MAIN_HELPERS:
        print('[get_test_daids] * len(available_daids) = %r' % (len(available_daids)))

    if len(available_daids) == 0:
        print('[get_test_daids] * ... defaulting, no available daids on command line.')
        if isinstance(default_daids, six.string_types):
            if default_daids == 'all':
                default_daids = ibs.get_valid_aids()
                daid_request_info['default_daids'] = 'all'
            elif default_daids == 'gt':
                default_daids = ut.flatten(ibs.get_annot_groundtruth(qaid_list))
                daid_request_info['default_daids'] = 'gt'
        #available_qaids = valid_aids[0:1]
        assert not isinstance(available_daids, six.string_types)
        available_daids = default_daids
    else:
        if VERB_MAIN_HELPERS:
            print('[get_test_daids] * ... not defaulting')

    available_daids = ut.unique_ordered(available_daids)

    # ---- EXCLUSION STEP
    if VERB_MAIN_HELPERS:
        print('[get_test_daids] * len(available_daids) = %r' % (len(available_daids)))
        print('[get_test_daids] * exclude step')

    species = ut.get_argval('--species', type_=str, default=None)

    if NO_JUNK:
        if VERB_MAIN_HELPERS:
            print('[get_test_daids] * Filtering junk')
        available_daids = ibs.filter_junk_annotations(available_daids)

    if EXCLUDE_QUERY:
        if VERB_MAIN_HELPERS:
            print('[get_test_daids] * Excluding query qaids')
        assert qaid_list is not None, 'must specify qaids to exclude'
        available_daids = ut.setdiff_ordered(available_daids, qaid_list)

    if daids_exclude is not None:
        if VERB_MAIN_HELPERS:
            print('[get_test_daids] * Excluding specified daids')
        available_daids = ut.setdiff_ordered(available_daids, daids_exclude)

    if species is not None:
        if species == 'primary':
            if VERB_MAIN_HELPERS:
                print('[get_test_qaids] * Finiding primary species')
            #species = ibs.get_primary_database_species(available_daids)
            species = ibs.get_primary_database_species()
        if VERB_MAIN_HELPERS:
            print('[get_test_daids] * Filtering to species=%r' % (species,))
        import numpy as np
        isvalid_list = np.array(ibs.get_annot_species(available_daids)) == species
        available_daids = ut.compress(available_daids, isvalid_list)

    # ---- SUBINDEXING STEP
    if VERB_MAIN_HELPERS:
        print('[get_test_daids] * len(available_daids) = %r' % (len(available_daids)))
        print('[get_test_daids] * subindex step')

    #ut.get_argval('--qshuffle')
    if DSHUFFLE:
        # Determenistic shuffling
        available_daids = ut.take(available_daids, ut.random_indexes(len(available_daids), seed=43))
        daid_request_info['shuffled'] = True

    if DINDEX is not None:
        dindexes = ensure_flatlistlike(DINDEX)
        _test_daids = [available_daids[dx] for dx in dindexes if dx < len(available_daids)]
        print('[get_test_daids] Chose subset of size %d/%d' % (len(_test_daids), len(available_daids)))
        available_daids = _test_daids

    if VERB_MAIN_HELPERS:
        print('[get_test_daids] * len(available_daids) = %r' % (len(available_daids)))
        print('[get_test_daids] L ___ GET_TEST_DAIDS ___')

    if return_annot_info:
        return available_daids, daid_request_info
    else:
        return available_daids
Пример #23
0
def query_vsone_pairs(ibs, vsone_query_pairs, use_cache=False, save_qcache=False):
    """
    does vsone queries to rerank the top few vsmany querys

    Returns:
        tuple: qaid2_qres_vsone, qreq_vsone_

    CommandLine:
        python -m ibeis.algo.hots.special_query --test-query_vsone_pairs

    Example:
        >>> # SLOW_DOCTEST
        >>> from ibeis.algo.hots.special_query import *  # NOQA
        >>> ibs, valid_aids = testdata_special_query()
        >>> qaids = valid_aids[0:1]
        >>> daids = valid_aids[1:]
        >>> qaid = qaids[0]
        >>> filtkey = hstypes.FiltKeys.DISTINCTIVENESS
        >>> use_cache = False
        >>> save_qcache = False
        >>> # execute function
        >>> qaid2_qres_vsmany, qreq_vsmany_ = query_vsmany_initial(ibs, qaids, daids)
        >>> vsone_query_pairs = build_vsone_shortlist(ibs, qaid2_qres_vsmany)
        >>> qaid2_qres_vsone, qreq_vsone_ = query_vsone_pairs(ibs, vsone_query_pairs)
        >>> qres_vsone = qaid2_qres_vsone[qaid]
        >>> top_namescore_aids = qres_vsone.get_top_aids().tolist()
        >>> result = str(top_namescore_aids)
        >>> top_namescore_names = ibs.get_annot_names(top_namescore_aids)
        >>> assert top_namescore_names[0] == 'easy', 'top_namescore_names[0]=%r' % (top_namescore_names[0],)
    """
    #vsone_cfgdict = dict(codename='vsone_unnorm')
    #codename = 'vsone_unnorm_dist_ratio_extern_distinctiveness',
    codename = 'vsone_unnorm_dist_ratio'
    vsone_cfgdict = dict(
        index_method='single',
        codename=codename,
    )
    #------------------------
    qaid2_qres_vsone = {}
    for qaid, top_aids in vsone_query_pairs:
        # Perform a query request for each
        cm_list_vsone_, __qreq_vsone_ = ibs.query_chips(
            [qaid], top_aids, cfgdict=vsone_cfgdict, return_request=True,
            use_cache=use_cache, save_qcache=save_qcache)
        qaid2_qres_vsone_ = {cm.qaid: cm for cm in cm_list_vsone_}
        qaid2_qres_vsone.update(qaid2_qres_vsone_)
    #------------------------
    # Create pseudo query request because there is no good way to
    # represent the vsone reranking as a single query request and
    # we need one for the score normalizer
    #pseudo_codename_ = codename.replace('unnorm', 'norm') + '_extern_distinctiveness'
    pseudo_codename_ = codename.replace('unnorm', 'norm')  # + '_extern_distinctiveness'
    pseudo_vsone_cfgdict = dict(codename=pseudo_codename_)
    pseudo_qaids = ut.get_list_column(vsone_query_pairs, 0)
    pseudo_daids = ut.unique_ordered(ut.flatten(ut.get_list_column(vsone_query_pairs, 1)))
    # FIXME: making the pseudo qreq_ takes a nontrivial amount of time for what
    # should be a trivial task.
    pseudo_qreq_vsone_ = ibs.new_query_request(pseudo_qaids, pseudo_daids,
                                               cfgdict=pseudo_vsone_cfgdict,
                                               verbose=ut.VERBOSE)
    #pseudo_qreq_vsone_.load_distinctiveness_normalizer()
    qreq_vsone_ = pseudo_qreq_vsone_
    # Hack in a special config name
    qreq_vsone_.qparams.query_cfgstr = '_special' + qreq_vsone_.qparams.query_cfgstr
    return qaid2_qres_vsone, qreq_vsone_
Пример #24
0
def get_test_qaids(ibs, default_qaids=None, return_annot_info=False, aidcfg=None):
    """
    Gets test annot_rowids based on command line arguments

    DEPRICATE

    Args:
        ibs (IBEISController):  ibeis controller object
        default_qaids (None): if list then used only if no other aids are available (default = [1])
           as a string it mimics the command line

    Returns:
        list: available_qaids

    CommandLine:
        python -m ibeis.init.main_helpers --test-get_test_qaids
        python -m ibeis.init.main_helpers --test-get_test_qaids --controlled --db PZ_Master0
        python -m ibeis.init.main_helpers --test-get_test_qaids --controlled --db PZ_Master0 --qaid 1
        python -m ibeis.init.main_helpers --test-get_test_qaids --allgt --db PZ_MTEST
        python -m ibeis.init.main_helpers --test-get_test_qaids --qaid 4 5 8  --verbmhelp
        python -m ibeis.init.main_helpers --test-get_test_qaids --controlled --db PZ_MTEST
        python -m ibeis.init.main_helpers --test-get_test_qaids --controlled --db PZ_MTEST --qaid 2 --verbmhelp
        python -m ibeis.init.main_helpers --test-get_test_qaids --controlled --db PZ_MTEST --qaid 2
        python -m ibeis.init.main_helpers --test-get_test_qaids --controlled --db PZ_Master0 --qindex 0:10 --verbmhelp
        python -m ibeis.init.main_helpers --exec-get_test_qaids --controlled --db PZ_Master0 --exec-mode
        python -m ibeis.init.main_helpers --exec-get_test_qaids --db testdb1 --allgt --qindex 0:256

    Example:
        >>> # ENABLE_DOCTEST
        >>> from ibeis.init.main_helpers import *  # NOQA
        >>> import ibeis
        >>> ibs = ibeis.opendb(defaultdb='testdb1')
        >>> default_qaids = None
        >>> available_qaids = get_test_qaids(ibs, default_qaids)
        >>> ibeis.other.dbinfo.get_dbinfo(ibs, aid_list=available_qaids, with_contrib=False, short=True)
        >>> result = 'available_qaids = ' + ut.obj_str(available_qaids, truncate=True, nl=False)
        >>> print('len(available_qaids) = %d' % len(available_qaids))
        >>> print(result)
        available_qaids = [1]
    """
    qaid_request_info = {}
    if VERB_MAIN_HELPERS:
        print('[get_test_qaids] + --- GET_TEST_QAIDS ---')

    # Old version of this function
    if VERB_MAIN_HELPERS:
        print('[get_test_qaids] + --- GET_TEST_QAIDS ---')
        print('[get_test_qaids] * default_qaids = %s' % (ut.obj_str(default_qaids, truncate=True, nl=False)))

    valid_aids = ibs.get_valid_aids()

    if len(valid_aids) == 0:
        print('[get_test_qaids] WARNING no annotations available')

    # ---- INCLUDING STEP
    if VERB_MAIN_HELPERS:
        print('[get_test_qaids] * include step')

    available_qaids = []

    #ut.get_argflag(('--all-cases', '--all'))
    #ut.get_argflag(('--all-gt-cases', '--allgt'))
    #ut.get_argflag(('--all-hard-cases', '--allhard'))
    #ut.get_argflag(('--qaid', '--qaids'))
    #ut.get_argflag('--controlled') or ut.get_argflag('--controlled_qaids')
    #not ut.get_argflag('--junk')

    ALL_CASES = params.args.all_cases or default_qaids == 'all'
    GT_CASES = params.args.all_gt_cases or default_qaids == 'gt'
    HARD_CASES = params.args.all_hard_cases or ut.get_argflag(('--all-hard-cases', '--allhard', '--hard'))
    NO_JUNK = not ut.get_argflag('--junk')
    CONTROLLED_CASES = ut.get_argflag('--controlled') or ut.get_argflag('--controlled_qaids')
    NO_REVIEWED = ut.get_argflag('--unreviewed')
    species = ut.get_argval('--species')
    #QAID = params.args.qaid
    QAID = ut.get_argval('--qaid', type_='fuzzy_subset', default=None)
    QINDEX = params.args.qindex
    QSHUFFLE = ut.get_argval('--qshuffle')

    if QAID is not None:
        if VERB_MAIN_HELPERS:
            print('[get_test_qaids] * Including cmdline specified qaids')
        try:
            args_qaid = ensure_flatlistlike(QAID)
        except Exception:
            args_qaid = QAID
        available_qaids.extend(args_qaid)
        qaid_request_info['custom_commandline'] = args_qaid

    if ALL_CASES:
        if VERB_MAIN_HELPERS:
            print('[get_test_qaids] * Including all qaids')
        available_qaids.extend(valid_aids)
        qaid_request_info['all_cases'] = True

    if HARD_CASES:
        if VERB_MAIN_HELPERS:
            print('[get_test_qaids] * Including hard qaids')
        is_hard_list = ibs.get_annot_is_hard(valid_aids)
        hard_aids = ut.compress(valid_aids, is_hard_list)
        available_qaids.extend(hard_aids)
        qaid_request_info['hard_cases'] = True

    if GT_CASES:
        if VERB_MAIN_HELPERS:
            print('[get_test_qaids] * Including groundtruth qaids')
        has_gt_list = ibs.get_annot_has_groundtruth(valid_aids)
        hasgt_aids = ut.compress(valid_aids, has_gt_list)
        print('[get_test_qaids] Adding all %d/%d ground-truthed test cases' % (len(hasgt_aids), len(valid_aids)))
        available_qaids.extend(hasgt_aids)
        qaid_request_info['gt_cases'] = True

    if CONTROLLED_CASES:
        if VERB_MAIN_HELPERS:
            print('[get_test_qaids] * Including controlled qaids')
        from ibeis.other import ibsfuncs
        # Override all other gts with controlled
        controlled_qaids = ibsfuncs.get_two_annots_per_name_and_singletons(ibs, onlygt=True)
        available_qaids.extend(controlled_qaids)
        qaid_request_info['controlled'] = True
    else:
        qaid_request_info['controlled'] = False

    # ---- CHECK_DEFAULTS QUERY
    if VERB_MAIN_HELPERS:
        print('[get_test_qaids] * len(available_qaids) = %r' % (len(available_qaids)))

    if len(available_qaids) == 0:
        print('[get_test_qaids] * ... defaulting, no available qaids on command line.')
        if default_qaids is None:
            default_qaids = valid_aids[0:1]
            qaid_request_info['default_one'] = True
        elif isinstance(default_qaids, six.string_types):
            if default_qaids == 'gt' or default_qaids == 'allgt':
                default_qaids = ibs.get_valid_aids(hasgt=True)
                qaid_request_info['default_gt'] = True
        available_qaids = default_qaids
    else:
        if VERB_MAIN_HELPERS:
            print('[get_test_qaids] * ... not defaulting')

    available_qaids = ut.unique_ordered(available_qaids)

    # ---- EXCLUSION STEP
    if VERB_MAIN_HELPERS:
        print('[get_test_qaids] * len(available_qaids) = %r' % (len(available_qaids)))
        print('[get_test_qaids] * exclude step')

    if NO_JUNK:
        if VERB_MAIN_HELPERS:
            print('[get_test_qaids] * Filtering junk')
        available_qaids = ibs.filter_junk_annotations(available_qaids)
        qaid_request_info['has_junk'] = False

    if NO_REVIEWED:
        if VERB_MAIN_HELPERS:
            print('[get_test_qaids] * Filtering unreviewed')
        isreviewed_list = ibs.get_annot_has_reviewed_matching_aids(available_qaids)
        available_qaids = ut.filterfalse_items(available_qaids, isreviewed_list)
        qaid_request_info['has_unreviewed'] = False

    if species is not None:
        if species == 'primary':
            if VERB_MAIN_HELPERS:
                print('[get_test_qaids] * Finiding primary species')
            #species = ibs.get_primary_database_species(available_qaids)
            species = ibs.get_primary_database_species()
            qaid_request_info['primary_species'] = True

        if VERB_MAIN_HELPERS:
            print('[get_test_qaids] * Filtering to species=%r' % (species,))
        isvalid_list = np.array(ibs.get_annot_species(available_qaids)) == species
        available_qaids = ut.compress(available_qaids, isvalid_list)
        qaid_request_info['species_filter'] = species

    if VERB_MAIN_HELPERS:
        print('[get_test_qaids] * len(available_qaids) = %r' % (len(available_qaids)))
        print('[get_test_qaids] * subindex step')

    # ---- INDEX SUBSET

    #ut.get_argval('--qshuffle')
    if QSHUFFLE:
        # Determenistic shuffling
        available_qaids = ut.take(available_qaids, ut.random_indexes(len(available_qaids), seed=42))
        qaid_request_info['shuffled'] = True

    # Sample a large pool of chosen query qindexes
    if QINDEX is not None:
        # FIXME: should use a slice of the list or a sublist
        qindexes = ensure_flatlistlike(QINDEX)
        _test_qaids = [available_qaids[qx] for qx in qindexes if qx < len(available_qaids)]
        print('[get_test_qaids] Chose subset of size %d/%d' % (len(_test_qaids), len(available_qaids)))
        available_qaids = _test_qaids
        qaid_request_info['subset'] = qindexes

    if VERB_MAIN_HELPERS:
        print('[get_test_qaids] * len(available_qaids) = %r' % (len(available_qaids)))
        print('[get_test_qaids] L ___ GET_TEST_QAIDS ___')
    if return_annot_info:
        return available_qaids, qaid_request_info
    else:
        return available_qaids
Пример #25
0
    def mana_potential2(card, deck=None, recurse=True):
        r"""Returns a list of mana sets or mana producers

        CommandLine:
            python -m mtgmonte.mtgobjs --exec-mana_potential2:1

        Example:
            >>> # ENABLE_DOCTEST
            >>> from mtgmonte.mtgobjs import *  # NOQA
            >>> from mtgmonte import mtgobjs
            >>> deck = mtgobjs.Deck(mtgobjs.load_cards(['Tropical Island', 'Sunken Hollow', 'Island']))
            >>> cards = mtgobjs.load_cards(['Tundra', 'Ancient Tomb', 'Black Lotus'])
            >>> card = cards[-1]
            >>> result = ut.repr2([card.mana_potential2(deck) for card in cards])
            >>> print(str(result))

        Example:
            >>> # ENABLE_DOCTEST
            >>> from mtgmonte.mtgobjs import *  # NOQA
            >>> from mtgmonte import mtgobjs
            >>> deck = mtgobjs.Deck(mtgobjs.load_cards(['Tropical Island', 'Sunken Hollow', 'Island']))
            >>> cards = mtgobjs.load_cards(['Flooded Strand', 'Tundra', 'Island', 'Shivan Reef', 'Ancient Tomb'])
            >>> card = cards[-1]
            >>> result = ut.repr2([card.mana_potential2(deck, recurse=True)
            >>>                    for card in cards], nl=1, strvals=1, nobr=1)
            >>> print(result)
            [{G}, {U}, {B}],
            [{W}, {U}],
            [{U}],
            [{C}, {U}, {R}],
            [{CC}],

        Example:
            >>> # ENABLE_DOCTEST
            >>> from mtgmonte.mtgobjs import *  # NOQA
            >>> from mtgmonte import mtgobjs
            >>> deck = mtgobjs.Deck(mtgobjs.load_cards(['Tropical Island', 'Sunken Hollow', 'Island']))
            >>> cards = mtgobjs.load_cards(['Flooded Strand', 'Tundra', 'Island', 'Shivan Reef', 'Ancient Tomb'])
            >>> card = cards[-1]
            >>> result = ut.repr2([card.mana_potential2(deck, recurse=False)
            >>>                    for card in cards], nl=1, strvals=True, nobr=1)
            >>> print(result)
            [Tropical Island, Sunken Hollow, Island],
            [{W}, {U}],
            [{U}],
            [{C}, {U}, {R}],
            [{CC}],
        """
        from mtgmonte import mtgrules
        potential = ManaOption()
        #ManaOption()
        for block in card.ability_blocks:
            mana_generated = mtgrules.mana_generated(block, card)
            if mana_generated is not None:
                potential.extend(mana_generated)
            else:
                if mtgrules.is_fetchland(block, card):
                    fetch_targets = mtgrules.get_fetch_search_targets(block, card, deck)
                    if recurse:
                        mana_generated = [t.mana_potential2(deck) for t in fetch_targets]
                        mana_generated = ut.flatten(mana_generated)
                        mana_generated = ut.unique_ordered(mana_generated)
                        potential.extend(ManaOption(mana_generated))
                    else:
                        potential.extend(fetch_targets)
        return potential
Пример #26
0
def get_test_daids(ibs, default_daids="all", qaid_list=None, return_annot_info=False, aidcfg=None):
    """ Gets database annot_rowids based on command line arguments

    DEPRICATE

    CommandLine:
        python dev.py --db PZ_MTEST -t best --exclude-query --qaid 72 -r 0 -c 0 --show --va --vf --dump-extra

    Args:
        ibs (IBEISController):  ibeis controller object
        default_daids (str): (default = 'all')
        qaid_list (list): list of chosen qaids that may affect daids (default = None)

    Returns:
        list: available_daids

    CommandLine:
        python -m ibeis.init.main_helpers --test-get_test_daids
        python -m ibeis.init.main_helpers --test-get_test_daids --db PZ_MTEST  --verbmhelp
        python -m ibeis.init.main_helpers --test-get_test_daids --db PZ_MTEST --exclude-query
        python -m ibeis.init.main_helpers --test-get_test_daids --db PZ_MTEST --daid-exclude 2 3 4
        python -m ibeis.init.main_helpers --test-get_test_daids --db PZ_MTEST --species=zebra_grevys
        python -m ibeis.init.main_helpers --test-get_test_daids --db PZ_Master0 --species=zebra_grevys
        python -m ibeis.init.main_helpers --test-get_test_daids --db PZ_Master0 --controlled --verbmhelp
        python -m ibeis.init.main_helpers --exec-get_test_daids --controlled --db PZ_Master0 --exec-mode

    Example:
        >>> # ENABLE_DOCTEST
        >>> from ibeis.init.main_helpers import *  # NOQA
        >>> import ibeis
        >>> ibs = ibeis.opendb(defaultdb='testdb1')
        >>> default_daids = 'all'
        >>> qaid_list = [1]
        >>> available_daids = get_test_daids(ibs, default_daids, qaid_list)
        >>> ibeis.other.dbinfo.get_dbinfo(ibs, aid_list=available_daids, with_contrib=False, short=True)
        >>> result = 'available_daids = ' + ut.obj_str(available_daids, truncate=True, nl=False)
        >>> print('len(available_daids) %d' % len(available_daids))
        >>> print(result)
        available_daids = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
    """
    daid_request_info = {}

    if VERB_MAIN_HELPERS:
        print("[get_test_daids] + --- GET_TEST_DAIDS ---")
        print("[get_test_daids] * default_daids = %s" % (ut.obj_str(default_daids, truncate=True, nl=False)))
        print("[get_test_daids] * qaid_list = %s" % (ut.obj_str(qaid_list, truncate=True, nl=False)))

    # ---- INCLUDING STEP
    if VERB_MAIN_HELPERS:
        print("[get_test_daids] * include step")

    available_daids = []

    CONTROLLED_CASES = ut.get_argflag("--controlled") or ut.get_argflag("--controlled_daids")
    DSHUFFLE = ut.get_argval("--dshuffle")
    DINDEX = params.args.dindex
    NO_JUNK = not ut.get_argflag("--junk")
    EXCLUDE_QUERY = ut.get_argflag("--exclude-query")
    # daids_exclude = params.args.daid_exclude
    daids_exclude = None

    if CONTROLLED_CASES:
        print("[get_test_daids] * Including controlled daids")
        from ibeis.other import ibsfuncs

        controlled_daids = ibsfuncs.get_two_annots_per_name_and_singletons(ibs, onlygt=False)
        available_daids.extend(controlled_daids)
        daid_request_info["controlled"] = True
    else:
        daid_request_info["controlled"] = False

    # ---- CHECK_DEFAULTS DATA
    if VERB_MAIN_HELPERS:
        print("[get_test_daids] * len(available_daids) = %r" % (len(available_daids)))

    if len(available_daids) == 0:
        print("[get_test_daids] * ... defaulting, no available daids on command line.")
        if isinstance(default_daids, six.string_types):
            if default_daids == "all":
                default_daids = ibs.get_valid_aids()
                daid_request_info["default_daids"] = "all"
            elif default_daids == "gt":
                default_daids = ut.flatten(ibs.get_annot_groundtruth(qaid_list))
                daid_request_info["default_daids"] = "gt"
        # available_qaids = valid_aids[0:1]
        assert not isinstance(available_daids, six.string_types)
        available_daids = default_daids
    else:
        if VERB_MAIN_HELPERS:
            print("[get_test_daids] * ... not defaulting")

    available_daids = ut.unique_ordered(available_daids)

    # ---- EXCLUSION STEP
    if VERB_MAIN_HELPERS:
        print("[get_test_daids] * len(available_daids) = %r" % (len(available_daids)))
        print("[get_test_daids] * exclude step")

    species = ut.get_argval("--species", type_=str, default=None)

    if NO_JUNK:
        if VERB_MAIN_HELPERS:
            print("[get_test_daids] * Filtering junk")
        available_daids = ibs.filter_junk_annotations(available_daids)

    if EXCLUDE_QUERY:
        if VERB_MAIN_HELPERS:
            print("[get_test_daids] * Excluding query qaids")
        assert qaid_list is not None, "must specify qaids to exclude"
        available_daids = ut.setdiff_ordered(available_daids, qaid_list)

    if daids_exclude is not None:
        if VERB_MAIN_HELPERS:
            print("[get_test_daids] * Excluding specified daids")
        available_daids = ut.setdiff_ordered(available_daids, daids_exclude)

    if species is not None:
        if species == "primary":
            if VERB_MAIN_HELPERS:
                print("[get_test_qaids] * Finiding primary species")
            # species = ibs.get_primary_database_species(available_daids)
            species = ibs.get_primary_database_species()
        if VERB_MAIN_HELPERS:
            print("[get_test_daids] * Filtering to species=%r" % (species,))
        import numpy as np

        isvalid_list = np.array(ibs.get_annot_species(available_daids)) == species
        available_daids = ut.compress(available_daids, isvalid_list)

    # ---- SUBINDEXING STEP
    if VERB_MAIN_HELPERS:
        print("[get_test_daids] * len(available_daids) = %r" % (len(available_daids)))
        print("[get_test_daids] * subindex step")

    # ut.get_argval('--qshuffle')
    if DSHUFFLE:
        # Determenistic shuffling
        available_daids = ut.take(available_daids, ut.random_indexes(len(available_daids), seed=43))
        daid_request_info["shuffled"] = True

    if DINDEX is not None:
        dindexes = ensure_flatlistlike(DINDEX)
        _test_daids = [available_daids[dx] for dx in dindexes if dx < len(available_daids)]
        print("[get_test_daids] Chose subset of size %d/%d" % (len(_test_daids), len(available_daids)))
        available_daids = _test_daids

    if VERB_MAIN_HELPERS:
        print("[get_test_daids] * len(available_daids) = %r" % (len(available_daids)))
        print("[get_test_daids] L ___ GET_TEST_DAIDS ___")

    if return_annot_info:
        return available_daids, daid_request_info
    else:
        return available_daids
Пример #27
0
def show_name_matches(ibs, qaid, name_daid_list, name_fm_list, name_fs_list,
                      name_H1_list, name_featflag_list, qreq_=None, **kwargs):
    """
    Called from chip_match.py

    Args:
        ibs (IBEISController):  ibeis controller object
        qaid (int):  query annotation id
        name_daid_list (list):
        name_fm_list (list):
        name_fs_list (list):
        name_H1_list (list):
        name_featflag_list (list):
        qreq_ (QueryRequest):  query request object with hyper-parameters(default = None)

    Kwargs:
        draw_fmatches, name_rank, fnum, pnum, colorbar_, nonvote_mode,
        fastmode, show_matches, fs, fm_norm, lbl1, lbl2, rect, draw_border,
        cmap, H1, H2, scale_factor1, scale_factor2, draw_pts, draw_ell,
        draw_lines, show_nMatches, all_kpts, in_image, show_query, draw_lbl,
        name_annot_scores, score, rawscore, aid2_raw_rank, show_name,
        show_nid, show_aid, show_annot_score, show_truth, name_score,
        show_name_score, show_name_rank, show_timedelta

    CommandLine:
        python -m ibeis.viz.viz_matches --exec-show_name_matches
        python -m ibeis.viz.viz_matches --test-show_name_matches --show

    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis.viz.viz_matches import *  # NOQA
        >>> from ibeis.algo.hots import chip_match
        >>> from ibeis.algo.hots import name_scoring
        >>> import vtool as vt
        >>> from ibeis.algo.hots import _pipeline_helpers as plh  # NOQA
        >>> import numpy as np
        >>> func = chip_match.ChipMatch.show_single_namematch
        >>> sourcecode = ut.get_func_sourcecode(func, stripdef=True, stripret=True,
        >>>                                     strip_docstr=True)
        >>> setup = ut.regex_replace('viz_matches.show_name_matches', '#', sourcecode)
        >>> homog = False
        >>> print(ut.indent(setup, '>>> '))
        >>> ibs, qreq_, cm_list = plh.testdata_post_sver('PZ_MTEST', qaid_list=[1])
        >>> cm = cm_list[0]
        >>> cm.score_nsum(qreq_)
        >>> dnid = ibs.get_annot_nids(cm.qaid)
        >>> # +--- COPIED SECTION
        >>> locals_ = locals()
        >>> var_list = ut.exec_func_src(
        >>>     func, locals_=locals_,
        >>>     sentinal='name_annot_scores = cm.annot_score_list.take(sorted_groupxs')
        >>> exec(ut.execstr_dict(var_list))
        >>> # L___ COPIED SECTION
        >>> kwargs = {}
        >>> show_name_matches(ibs, qaid, name_daid_list, name_fm_list,
        >>>                   name_fs_list, name_h1_list, name_featflag_list,
        >>>                   qreq_=qreq_, **kwargs)
        >>> ut.quit_if_noshow()
        >>> ut.show_if_requested()
    """
    #print("SHOW NAME MATCHES")
    #print(ut.repr2(kwargs, nl=True))
    #from ibeis import constants as const
    from ibeis import tag_funcs
    draw_fmatches = kwargs.pop('draw_fmatches', True)
    rchip1, kpts1 = get_query_annot_pair_info(ibs, qaid, qreq_, draw_fmatches)
    rchip2_list, kpts2_list = get_data_annot_pair_info(ibs, name_daid_list,
                                                       qreq_, draw_fmatches)
    fm_list = name_fm_list
    fs_list = name_fs_list
    featflag_list = name_featflag_list
    offset_list, sf_list, bbox_list = show_multichip_match(rchip1, rchip2_list,
                                                           kpts1, kpts2_list,
                                                           fm_list, fs_list,
                                                           featflag_list,
                                                           **kwargs)
    aid_list = [qaid] + name_daid_list
    annotate_matches3(ibs, aid_list, bbox_list, offset_list, name_fm_list,
                      name_fs_list, qreq_=None, **kwargs)
    ax = pt.gca()
    title = vh.get_query_text(ibs, None, name_daid_list, False, qaid=qaid,
                              **kwargs)

    pt.set_title(title, ax)

    # Case tags
    annotmatch_rowid_list = ibs.get_annotmatch_rowid_from_superkey(
        [qaid] * len(name_daid_list), name_daid_list)
    annotmatch_rowid_list = ut.filter_Nones(annotmatch_rowid_list)
    tags_list = ibs.get_annotmatch_case_tags(annotmatch_rowid_list)
    if not ut.get_argflag('--show'):  # False:
        tags_list = tag_funcs.consolodate_annotmatch_tags(tags_list)
    tag_list = ut.unique_ordered(ut.flatten(tags_list))

    name_rank = kwargs.get('name_rank', None)
    truth = get_multitruth(ibs, aid_list)

    xlabel = {1: 'Correct ID', 0: 'Incorrect ID', 2: 'Unknown ID'}[truth]

    if False:
        if name_rank is None:
            xlabel = {1: 'Genuine', 0: 'Imposter', 2: 'Unknown'}[truth]
            #xlabel = {1: 'True', 0: 'False', 2: 'Unknown'}[truth]
        else:
            if name_rank == 0:
                xlabel = {
                    1: 'True Positive', 0: 'False Positive', 2: 'Unknown'}[truth]
            else:
                xlabel = {
                    1: 'False Negative', 0: 'True Negative', 2: 'Unknown'}[truth]

    if len(tag_list) > 0:
        xlabel += '\n' + ', '.join(tag_list)

    pt.set_xlabel(xlabel)
    return ax
Пример #28
0
    def show_page(self, bring_to_front=False, onlyrows=None, fulldraw=True):
        """ Plots all subaxes on a page

        onlyrows is a hack to only draw a subset of the data again
        """
        if ut.VERBOSE:
            if not fulldraw:
                print('[matchver] show_page(fulldraw=%r, onlyrows=%r)' %
                      (fulldraw, onlyrows))
            else:
                print('[matchver] show_page(fulldraw=%r)' % (fulldraw))
        self.prepare_page(fulldraw=fulldraw)
        # Variables we will work with to paint a pretty picture
        ibs = self.ibs
        nRows = self.nRows
        colpad = 1 if self.cm is not None else 0
        nCols = self.nCols + colpad

        # Distinct color for every unique name
        unique_nids = ut.unique_ordered(
            ibs.get_annot_name_rowids(self.all_aid_list,
                                      distinguish_unknowns=False))
        unique_colors = pt.distinct_colors(len(unique_nids),
                                           brightness=.7,
                                           hue_range=(.05, .95))
        self.nid2_color = dict(zip(unique_nids, unique_colors))

        row_aids_list = self.get_row_aids_list()

        if self.cm is not None:
            print("DRAWING QRES")
            pnum = (1, nCols, 1)
            if not fulldraw:
                # not doing full draw so we have to clear any axes
                # that are here already manually
                ax = self.fig.add_subplot(*pnum)
                self.clear_parent_axes(ax)
            self.cm.show_single_annotmatch(self.qreq_,
                                           self.aid2,
                                           fnum=self.fnum,
                                           pnum=pnum,
                                           draw_fmatch=True,
                                           colorbar_=False)

        # For each row
        for rowx, aid_list in enumerate(row_aids_list):
            offset = rowx * nCols + 1
            if onlyrows is not None and rowx not in onlyrows:
                continue
            #ibsfuncs.assert_valid_aids(ibs, groundtruth)
            # For each column
            for colx, aid in enumerate(aid_list, start=colpad):
                if colx >= nCols:
                    break
                try:
                    nid = ibs.get_annot_name_rowids(aid)
                    if ibsfuncs.is_nid_unknown(ibs, [nid])[0]:
                        color = const.UNKNOWN_PURPLE_RGBA01
                    else:
                        color = self.nid2_color[nid]
                except Exception as ex:
                    ut.printex(ex)
                    print('nid = %r' % (nid, ))
                    print('self.nid2_color = %s' %
                          (ut.repr2(self.nid2_color), ))
                    raise
                px = colx + offset
                ax = self.plot_chip(int(aid),
                                    nRows,
                                    nCols,
                                    px,
                                    color=color,
                                    fulldraw=fulldraw)
                # If there are still more in this row to display
                if colx + 1 < len(aid_list) and colx + 1 >= nCols:
                    total_indices = len(aid_list)
                    current_index = self.col_offset_list[rowx] + 1
                    next_text = 'next\n%d/%d' % (current_index, total_indices)
                    next_func = functools.partial(self.rotate_row, rowx=rowx)
                    self.append_button(next_text,
                                       callback=next_func,
                                       location='right',
                                       size='33%',
                                       ax=ax)

        if fulldraw:
            self.show_hud()
            hspace = .05 if (self.nCols) > 1 else .1
            subplotspar = {
                'left': .1,
                'right': .9,
                'top': .85,
                'bottom': .1,
                'wspace': .3,
                'hspace': hspace,
            }
            pt.adjust_subplots(**subplotspar)
        self.draw()
        self.show()
        if bring_to_front:
            self.bring_to_front()
Пример #29
0
def latex_dbstats(ibs_list, **kwargs):
    r"""
    Args:
        ibs (IBEISController):  ibeis controller object

    CommandLine:
        python -m ibeis.other.dbinfo --exec-latex_dbstats --dblist testdb1
        python -m ibeis.other.dbinfo --exec-latex_dbstats --dblist testdb1 --show
        python -m ibeis.other.dbinfo --exec-latex_dbstats --dblist PZ_Master0 testdb1 --show
        python -m ibeis.other.dbinfo --exec-latex_dbstats --dblist PZ_Master0 PZ_MTEST GZ_ALL --show
        python -m ibeis.other.dbinfo --test-latex_dbstats --dblist GZ_ALL NNP_MasterGIRM_core --show

    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis.other.dbinfo import *  # NOQA
        >>> import ibeis
        >>> db_list = ut.get_argval('--dblist', type_=list, default=['testdb1'])
        >>> ibs_list = [ibeis.opendb(db=db) for db in db_list]
        >>> tabular_str = latex_dbstats(ibs_list)
        >>> tabular_cmd = ut.latex_newcommand(ut.latex_sanitize_command_name('DatabaseInfo'), tabular_str)
        >>> ut.copy_text_to_clipboard(tabular_cmd)
        >>> write_fpath = ut.get_argval('--write', type_=str, default=None)
        >>> if write_fpath is not None:
        >>>     fpath = ut.truepath(write_fpath)
        >>>     text = ut.readfrom(fpath)
        >>>     new_text = ut.replace_between_tags(text, tabular_cmd, '% <DBINFO>', '% </DBINFO>')
        >>>     ut.writeto(fpath, new_text)
        >>> ut.print_code(tabular_cmd, 'latex')
        >>> ut.quit_if_noshow()
        >>> ut.render_latex_text('\\noindent \n' + tabular_str)
    """

    import ibeis
    # Parse for aids test data
    aids_list = [ibeis.testdata_aids(ibs=ibs) for ibs in ibs_list]

    #dbinfo_list = [get_dbinfo(ibs, with_contrib=False, verbose=False) for ibs in ibs_list]
    dbinfo_list = [get_dbinfo(ibs, with_contrib=False, verbose=False, aid_list=aids)
                   for ibs, aids in zip(ibs_list, aids_list)]

    #title = db_name + ' database statistics'
    title = 'Database statistics'
    stat_title = '# Annotations per name (multiton)'

    #col_lbls = [
    #    'multiton',
    #    #'singleton',
    #    'total',
    #    'multiton',
    #    'singleton',
    #    'total',
    #]
    key_to_col_lbls = {
        'num_names_multiton':   'multiton',
        'num_names_singleton':  'singleton',
        'num_names':            'total',

        'num_multiton_annots':  'multiton',
        'num_singleton_annots': 'singleton',
        'num_unknown_annots':   'unknown',
        'num_annots':           'total',
    }
    # Structure of columns / multicolumns
    multi_col_keys = [
        ('# Names', (
            'num_names_multiton',
            #'num_names_singleton',
            'num_names',
        )),

        ('# Annots', (
            'num_multiton_annots',
            'num_singleton_annots',
            #'num_unknown_annots',
            'num_annots')),
    ]
    #multicol_lbls = [('# Names', 3), ('# Annots', 3)]
    multicol_lbls = [(mcolname, len(mcols)) for mcolname, mcols in multi_col_keys]

    # Flatten column labels
    col_keys = ut.flatten(ut.get_list_column(multi_col_keys, 1))
    col_lbls = ut.dict_take(key_to_col_lbls, col_keys)

    row_lbls   = []
    row_values = []

    #stat_col_lbls = ['max', 'min', 'mean', 'std', 'nMin', 'nMax']
    stat_col_lbls = ['max', 'min', 'mean', 'std', 'med']
    #stat_row_lbls = ['# Annot per Name (multiton)']
    stat_row_lbls = []
    stat_row_values = []

    SINGLE_TABLE = False
    EXTRA = True

    for ibs, dbinfo_locals in zip(ibs_list, dbinfo_list):
        row_ = ut.dict_take(dbinfo_locals, col_keys)
        dbname = ibs.get_dbname_alias()
        row_lbls.append(dbname)
        multiton_annot_stats = ut.get_stats(dbinfo_locals['multiton_nid2_nannots'], use_median=True)
        stat_rows = ut.dict_take(multiton_annot_stats, stat_col_lbls)
        if SINGLE_TABLE:
            row_.extend(stat_rows)
        else:
            stat_row_lbls.append(dbname)
            stat_row_values.append(stat_rows)

        row_values.append(row_)

    CENTERLINE = False
    AS_TABLE = True
    tablekw = dict(
        astable=AS_TABLE, centerline=CENTERLINE, FORCE_INT=False, precision=2,
        col_sep='', multicol_sep='|',
        **kwargs)

    if EXTRA:
        extra_keys = [
            #'species2_nAids',
            'qualtext2_nAnnots',
            'yawtext2_nAnnots',
        ]
        extra_titles = {
            'species2_nAids': 'Annotations per species.',
            'qualtext2_nAnnots': 'Annotations per quality.',
            'yawtext2_nAnnots': 'Annotations per viewpoint.',
        }
        extra_collbls = ut.ddict(list)
        extra_rowvalues = ut.ddict(list)
        extra_tables = ut.ddict(list)

        for ibs, dbinfo_locals in zip(ibs_list, dbinfo_list):
            for key in extra_keys:
                extra_collbls[key] = ut.unique_ordered(extra_collbls[key] + list(dbinfo_locals[key].keys()))

        extra_collbls['qualtext2_nAnnots'] = ['excellent', 'good', 'ok', 'poor', 'junk', 'UNKNOWN']
        #extra_collbls['yawtext2_nAnnots'] = ['backleft', 'left', 'frontleft', 'front', 'frontright', 'right', 'backright', 'back', None]
        extra_collbls['yawtext2_nAnnots'] = ['BL', 'L', 'FL', 'F', 'FR', 'R', 'BR', 'B', None]

        for ibs, dbinfo_locals in zip(ibs_list, dbinfo_list):
            for key in extra_keys:
                extra_rowvalues[key].append(ut.dict_take(dbinfo_locals[key], extra_collbls[key], 0))

        qualalias = {'UNKNOWN': None}

        extra_collbls['yawtext2_nAnnots'] = [ibs.const.YAWALIAS.get(val, val) for val in extra_collbls['yawtext2_nAnnots']]
        extra_collbls['qualtext2_nAnnots'] = [qualalias.get(val, val) for val in extra_collbls['qualtext2_nAnnots']]

        for key in extra_keys:
            extra_tables[key] = ut.util_latex.make_score_tabular(
                row_lbls, extra_collbls[key], extra_rowvalues[key],
                title=extra_titles[key], col_align='r', table_position='[h!]', **tablekw)

    #tabular_str = util_latex.tabular_join(tabular_body_list)
    if SINGLE_TABLE:
        col_lbls += stat_col_lbls
        multicol_lbls += [(stat_title, len(stat_col_lbls))]

    count_tabular_str = ut.util_latex.make_score_tabular(
        row_lbls, col_lbls, row_values, title=title, multicol_lbls=multicol_lbls, table_position='[ht!]', **tablekw)

    #print(row_lbls)

    if SINGLE_TABLE:
        tabular_str = count_tabular_str
    else:
        stat_tabular_str = ut.util_latex.make_score_tabular(
            stat_row_lbls, stat_col_lbls, stat_row_values, title=stat_title,
            col_align='r', table_position='[h!]', **tablekw)

        # Make a table of statistics
        if tablekw['astable']:
            tablesep = '\n%--\n'
        else:
            tablesep = '\\\\\n%--\n'
        if EXTRA:
            tabular_str = tablesep.join([count_tabular_str, stat_tabular_str] + ut.dict_take(extra_tables, extra_keys))
        else:
            tabular_str = tablesep.join([count_tabular_str, stat_tabular_str])

    return tabular_str
Пример #30
0
def latex_dbstats(ibs_list, **kwargs):
    r"""
    Args:
        ibs (IBEISController):  wbia controller object

    CommandLine:
        python -m wbia.other.dbinfo --exec-latex_dbstats --dblist testdb1
        python -m wbia.other.dbinfo --exec-latex_dbstats --dblist testdb1 --show
        python -m wbia.other.dbinfo --exec-latex_dbstats --dblist PZ_Master0 testdb1 --show
        python -m wbia.other.dbinfo --exec-latex_dbstats --dblist PZ_Master0 PZ_MTEST GZ_ALL --show
        python -m wbia.other.dbinfo --test-latex_dbstats --dblist GZ_ALL NNP_MasterGIRM_core --show

    Example:
        >>> # DISABLE_DOCTEST
        >>> from wbia.other.dbinfo import *  # NOQA
        >>> import wbia
        >>> db_list = ut.get_argval('--dblist', type_=list, default=['testdb1'])
        >>> ibs_list = [wbia.opendb(db=db) for db in db_list]
        >>> tabular_str = latex_dbstats(ibs_list)
        >>> tabular_cmd = ut.latex_newcommand(ut.latex_sanitize_command_name('DatabaseInfo'), tabular_str)
        >>> ut.copy_text_to_clipboard(tabular_cmd)
        >>> write_fpath = ut.get_argval('--write', type_=str, default=None)
        >>> if write_fpath is not None:
        >>>     fpath = ut.truepath(write_fpath)
        >>>     text = ut.readfrom(fpath)
        >>>     new_text = ut.replace_between_tags(text, tabular_cmd, '% <DBINFO>', '% </DBINFO>')
        >>>     ut.writeto(fpath, new_text)
        >>> ut.print_code(tabular_cmd, 'latex')
        >>> ut.quit_if_noshow()
        >>> ut.render_latex_text('\\noindent \n' + tabular_str)
    """
    import wbia

    # Parse for aids test data
    aids_list = [wbia.testdata_aids(ibs=ibs) for ibs in ibs_list]

    # dbinfo_list = [get_dbinfo(ibs, with_contrib=False, verbose=False) for ibs in ibs_list]
    dbinfo_list = [
        get_dbinfo(ibs, with_contrib=False, verbose=False, aid_list=aids)
        for ibs, aids in zip(ibs_list, aids_list)
    ]

    # title = db_name + ' database statistics'
    title = 'Database statistics'
    stat_title = '# Annotations per name (multiton)'

    # col_lbls = [
    #    'multiton',
    #    #'singleton',
    #    'total',
    #    'multiton',
    #    'singleton',
    #    'total',
    # ]
    key_to_col_lbls = {
        'num_names_multiton': 'multiton',
        'num_names_singleton': 'singleton',
        'num_names': 'total',
        'num_multiton_annots': 'multiton',
        'num_singleton_annots': 'singleton',
        'num_unknown_annots': 'unknown',
        'num_annots': 'total',
    }
    # Structure of columns / multicolumns
    multi_col_keys = [
        (
            '# Names',
            (
                'num_names_multiton',
                # 'num_names_singleton',
                'num_names',
            ),
        ),
        (
            '# Annots',
            (
                'num_multiton_annots',
                'num_singleton_annots',
                # 'num_unknown_annots',
                'num_annots',
            ),
        ),
    ]
    # multicol_lbls = [('# Names', 3), ('# Annots', 3)]
    multicol_lbls = [(mcolname, len(mcols)) for mcolname, mcols in multi_col_keys]

    # Flatten column labels
    col_keys = ut.flatten(ut.get_list_column(multi_col_keys, 1))
    col_lbls = ut.dict_take(key_to_col_lbls, col_keys)

    row_lbls = []
    row_values = []

    # stat_col_lbls = ['max', 'min', 'mean', 'std', 'nMin', 'nMax']
    stat_col_lbls = ['max', 'min', 'mean', 'std', 'med']
    # stat_row_lbls = ['# Annot per Name (multiton)']
    stat_row_lbls = []
    stat_row_values = []

    SINGLE_TABLE = False
    EXTRA = True

    for ibs, dbinfo_locals in zip(ibs_list, dbinfo_list):
        row_ = ut.dict_take(dbinfo_locals, col_keys)
        dbname = ibs.get_dbname_alias()
        row_lbls.append(dbname)
        multiton_annot_stats = ut.get_stats(
            dbinfo_locals['multiton_nid2_nannots'], use_median=True, nl=1
        )
        stat_rows = ut.dict_take(multiton_annot_stats, stat_col_lbls)
        if SINGLE_TABLE:
            row_.extend(stat_rows)
        else:
            stat_row_lbls.append(dbname)
            stat_row_values.append(stat_rows)

        row_values.append(row_)

    CENTERLINE = False
    AS_TABLE = True
    tablekw = dict(
        astable=AS_TABLE,
        centerline=CENTERLINE,
        FORCE_INT=False,
        precision=2,
        col_sep='',
        multicol_sep='|',
        **kwargs
    )

    if EXTRA:
        extra_keys = [
            # 'species2_nAids',
            'qualtext2_nAnnots',
            'viewcode2_nAnnots',
        ]
        extra_titles = {
            'species2_nAids': 'Annotations per species.',
            'qualtext2_nAnnots': 'Annotations per quality.',
            'viewcode2_nAnnots': 'Annotations per viewpoint.',
        }
        extra_collbls = ut.ddict(list)
        extra_rowvalues = ut.ddict(list)
        extra_tables = ut.ddict(list)

        for ibs, dbinfo_locals in zip(ibs_list, dbinfo_list):
            for key in extra_keys:
                extra_collbls[key] = ut.unique_ordered(
                    extra_collbls[key] + list(dbinfo_locals[key].keys())
                )

        extra_collbls['qualtext2_nAnnots'] = [
            'excellent',
            'good',
            'ok',
            'poor',
            'junk',
            'UNKNOWN',
        ]
        # extra_collbls['viewcode2_nAnnots'] = ['backleft', 'left', 'frontleft', 'front', 'frontright', 'right', 'backright', 'back', None]
        extra_collbls['viewcode2_nAnnots'] = [
            'BL',
            'L',
            'FL',
            'F',
            'FR',
            'R',
            'BR',
            'B',
            None,
        ]

        for ibs, dbinfo_locals in zip(ibs_list, dbinfo_list):
            for key in extra_keys:
                extra_rowvalues[key].append(
                    ut.dict_take(dbinfo_locals[key], extra_collbls[key], 0)
                )

        qualalias = {'UNKNOWN': None}

        extra_collbls['viewcode2_nAnnots'] = [
            ibs.const.YAWALIAS.get(val, val) for val in extra_collbls['viewcode2_nAnnots']
        ]
        extra_collbls['qualtext2_nAnnots'] = [
            qualalias.get(val, val) for val in extra_collbls['qualtext2_nAnnots']
        ]

        for key in extra_keys:
            extra_tables[key] = ut.util_latex.make_score_tabular(
                row_lbls,
                extra_collbls[key],
                extra_rowvalues[key],
                title=extra_titles[key],
                col_align='r',
                table_position='[h!]',
                **tablekw
            )

    # tabular_str = util_latex.tabular_join(tabular_body_list)
    if SINGLE_TABLE:
        col_lbls += stat_col_lbls
        multicol_lbls += [(stat_title, len(stat_col_lbls))]

    count_tabular_str = ut.util_latex.make_score_tabular(
        row_lbls,
        col_lbls,
        row_values,
        title=title,
        multicol_lbls=multicol_lbls,
        table_position='[ht!]',
        **tablekw
    )

    # logger.info(row_lbls)

    if SINGLE_TABLE:
        tabular_str = count_tabular_str
    else:
        stat_tabular_str = ut.util_latex.make_score_tabular(
            stat_row_lbls,
            stat_col_lbls,
            stat_row_values,
            title=stat_title,
            col_align='r',
            table_position='[h!]',
            **tablekw
        )

        # Make a table of statistics
        if tablekw['astable']:
            tablesep = '\n%--\n'
        else:
            tablesep = '\\\\\n%--\n'
        if EXTRA:
            tabular_str = tablesep.join(
                [count_tabular_str, stat_tabular_str]
                + ut.dict_take(extra_tables, extra_keys)
            )
        else:
            tabular_str = tablesep.join([count_tabular_str, stat_tabular_str])

    return tabular_str
Пример #31
0
def make_example_docstr(funcname=None, modname=None, argname_list=None,
                        defaults=None, return_type=None, return_name=None,
                        ismethod=False):
    """
    Creates skeleton code to build an example doctest

    Args:
        funcname (str):  function name
        modname (str):  module name
        argname_list (str):  list of argument names
        defaults (None):
        return_type (None):
        return_name (str):  return variable name
        ismethod (bool):

    Returns:
        str: examplecode

    CommandLine:
        python -m utool.util_autogen --test-make_example_docstr

    Example:
        >>> # ENABLE_DOCTEST
        >>> from utool.util_autogen import *  # NOQA
        >>> # build test data
        >>> funcname = 'make_example_docstr'
        >>> modname = 'utool.util_autogen'
        >>> argname_list = ['qaids', 'qreq_']
        >>> defaults = None
        >>> return_type = tuple
        >>> return_name = 'foo'
        >>> ismethod = False
        >>> # execute function
        >>> examplecode = make_example_docstr(funcname, modname, argname_list, defaults, return_type, return_name, ismethod)
        >>> # verify results
        >>> result = str(examplecode)
        >>> print(result)
        # DISABLE_DOCTEST
        from utool.util_autogen import *  # NOQA
        import utool as ut
        import ibeis
        species = ibeis.const.TEST_SPECIES.ZEB_PLAIN
        qaids = ibs.get_valid_aids(species=species)
        qreq_ = ibeis.testdata_qreq_()
        foo = make_example_docstr(qaids, qreq_)
        result = ('foo = %s' % (ut.repr2(foo),))
        print(result)
    """
    import utool as ut

    examplecode_lines = []
    top_import_fmstr = 'from {modname} import *  # NOQA'
    top_import = top_import_fmstr.format(modname=modname)
    import_lines = [top_import]
    if modname.startswith('utool'):
        import_lines += ['import utool as ut']
    is_show_func = not modname.startswith('utool') and not modname.startswith('mtgmonte')

    # TODO: Externally register these
    default_argval_map = {
        'ibs'       : 'ibeis.opendb(defaultdb=\'testdb1\')',
        'testres'   : 'ibeis.testdata_expts(\'PZ_MTEST\')',
        'qreq_'     : 'ibeis.testdata_qreq_()',
        'cm_list'   : 'qreq_.execute()',
        'cm'        : 'qreq_.execute()[0]',
        'aid_list'  : 'ibs.get_valid_aids()',
        'nid_list'  : 'ibs._get_all_known_nids()',
        'qaids'     : 'ibs.get_valid_aids(species=species)',
        'daids'     : 'ibs.get_valid_aids(species=species)',
        'species'   : 'ibeis.const.TEST_SPECIES.ZEB_PLAIN',
        'kpts'      : 'vt.dummy.get_dummy_kpts()',
        'dodraw'    : 'ut.show_was_requested()',
        'img_fpath' : 'ut.grab_test_imgpath(\'carl.jpg\')',
        'gfpath'    : 'ut.grab_test_imgpath(\'carl.jpg\')',
        'img'       : 'vt.imread(img_fpath)',
        'img_in'    : 'vt.imread(img_fpath)',
        'bbox'      : '(10, 10, 50, 50)',
        'theta'     : '0.0',
        'rng'       : 'np.random.RandomState(0)',
    }
    import_depends_map = {
        'ibeis':    'import ibeis',
        'vt':       'import vtool as vt',
        #'img':      'import vtool as vt',  # TODO: remove. fix dependency
        #'species':  'import ibeis',
    }
    var_depends_map = {
        'species':   ['ibeis'],
        'ibs':       ['ibeis'],
        'testres': ['ibeis'],
        'kpts':      ['vt'],
        #'qreq_':     ['ibs', 'species', 'daids', 'qaids'],
        'qreq_':     ['ibeis'],
        'qaids':     ['ibs'],
        'daids':     ['ibs'],
        'qaids':     ['species'],
        'daids':     ['species'],
        'img':       ['img_fpath', 'vt'],
    }

    def find_arg_defaultrepr(argname, val):
        import types
        if val == '?':
            if argname in default_argval_map:
                val = ut.PythonStatement(default_argval_map[argname])
                if argname in import_depends_map:
                    import_lines.append(import_depends_map[argname])
        elif isinstance(val, types.ModuleType):
            return val.__name__
        return repr(val)

    # augment argname list with dependencies
    dependant_argnames = []  # deque()
    def append_dependant_argnames(argnames, dependant_argnames):
        """ use hints to add known dependencies for certain argument inputs """
        for argname in argnames:
            # Check if argname just implies an import
            if argname in import_depends_map:
                import_lines.append(import_depends_map[argname])
            # Check if argname was already added as dependency
            if (argname not in dependant_argnames and argname not in
                 argname_list and argname not in import_depends_map):
                dependant_argnames.append(argname)
            # Check if argname has dependants
            if argname in var_depends_map:
                argdeps = var_depends_map[argname]
                # RECURSIVE CALL
                append_dependant_argnames(argdeps, dependant_argnames)
    append_dependant_argnames(argname_list, dependant_argnames)

    # Define argnames and dependencies in example code
    # argnames prefixed with dependeancies
    argname_list_ = list(dependant_argnames) + argname_list

    # Default example values
    defaults_ = [] if defaults is None else defaults
    num_unknown = (len(argname_list_) - len(defaults_))
    default_vals = ['?'] * num_unknown + list(defaults_)
    arg_val_iter = zip(argname_list_, default_vals)
    infered_defaults = [find_arg_defaultrepr(argname, val)
                        for argname, val in arg_val_iter]
    argdef_lines = ['%s = %s' % (argname, inferrepr)
                    for argname, inferrepr in
                    zip(argname_list_, infered_defaults)]
    import_lines = ut.unique_ordered(import_lines)

    if any([inferrepr == repr('?') for inferrepr in infered_defaults]):
        examplecode_lines.append('# DISABLE_DOCTEST')
    else:
        # Enable the test if it can be run immediately
        examplecode_lines.append('# DISABLE_DOCTEST')

    examplecode_lines.extend(import_lines)
    #examplecode_lines.append('# build test data')
    examplecode_lines.extend(argdef_lines)
    # Default example result assignment
    result_assign = ''
    result_print = None
    if 'return_name' in vars():
        if return_type is not None:
            if return_name is None:
                return_name = 'result'
            result_assign = return_name + ' = '
            result_print = 'print(result)'  # + return_name + ')'
    # Default example call
    if ismethod:
        selfname = argname_list[0]
        methodargs = ', '.join(argname_list[1:])
        tup = (selfname, '.', funcname, '(', methodargs, ')')
        example_call = ''.join(tup)
    else:
        funcargs = ', '.join(argname_list)
        tup = (funcname, '(', funcargs, ')')
        example_call = ''.join(tup)
    # Append call line
    #examplecode_lines.append('# execute function')
    examplecode_lines.append(result_assign + example_call)
    #examplecode_lines.append('# verify results')
    if result_print is not None:
        if return_name != 'result':
            #examplecode_lines.append('result = str(' + return_name + ')')
            result_line_fmt = 'result = (\'{return_name} = %s\' % (ut.repr2({return_name}),))'
            result_line = result_line_fmt.format(return_name=return_name)
            examplecode_lines.append(result_line)
        examplecode_lines.append(result_print)

    # TODO: infer this
    if is_show_func:
        examplecode_lines += [
            'ut.quit_if_noshow()',
            'import plottool as pt',
            'ut.show_if_requested()',
        ]

    examplecode = '\n'.join(examplecode_lines)
    return examplecode
Пример #32
0
def download_sharks(XMLdata, number):
    """
    cd ~/work/WS_ALL
    python -m ibeis.scripts.getshark

    >>> from ibeis.scripts.getshark import *  # NOQA
    >>> url = 'www.whaleshark.org/listImages.jsp'
    >>> XMLdata = ut.url_read(url)
    >>> number = None
    """
    # Prepare the output directory for writing, if it doesn't exist
    output_dir = 'sharkimages'
    ut.ensuredir(output_dir)

    dom = parseString(XMLdata)

    # Download files
    if number:
        maxCount = min(number, len(dom.getElementsByTagName('img')))
    else:
        maxCount = len(dom.getElementsByTagName('img'))

    parsed_info = dict(
        img_url_list=[],
        localid_list=[],
        nameid_list=[],
        orig_fname_list=[],
        new_fname_list=[],
    )

    print('Preparing to fetch %i files...' % maxCount)

    for shark in dom.getElementsByTagName('shark'):
        localCount = 0
        for imageset in shark.getElementsByTagName('imageset'):
            for img in imageset.getElementsByTagName('img'):
                localCount += 1

                img_url = img.getAttribute('href')
                orig_fname = split(img_url)[1]
                ext = splitext(orig_fname)[1].lower()
                nameid = shark.getAttribute('number')

                new_fname = '%s-%i%s' % (nameid, localCount, ext)

                parsed_info['img_url_list'].append(img_url)
                parsed_info['nameid_list'].append(nameid)
                parsed_info['localid_list'].append(localCount)
                parsed_info['orig_fname_list'].append(orig_fname)
                parsed_info['new_fname_list'].append(new_fname)

                print('Parsed %i / %i files.' %
                      (len(parsed_info['orig_fname_list']), maxCount))

                if number is not None and len(
                        parsed_info['orig_fname_list']) == number:
                    break
    parsed_info['new_fpath_list'] = [
        join(output_dir, _fname) for _fname in parsed_info['new_fname_list']
    ]

    print('Filtering parsed images')

    # Filter based on image type (keep only jpgs)
    ext_flags = [
        _fname.endswith('.jpg') or _fname.endswith('.jpg')
        for _fname in parsed_info['new_fname_list']
    ]
    parsed_info = {
        key: ut.compress(list_, ext_flags)
        for key, list_ in parsed_info.items()
    }

    # Filter to only images matching the appropriate tags
    from ibeis import tag_funcs
    parsed_info['tags_list'] = parse_shark_tags(parsed_info['orig_fname_list'])
    tag_flags = tag_funcs.filterflags_general_tags(
        parsed_info['tags_list'],
        has_any=['view-left'],
        none_match=['qual.*', 'view-top', 'part-.*', 'cropped'],
    )
    parsed_info = {
        key: ut.compress(list_, tag_flags)
        for key, list_ in parsed_info.items()
    }
    print('Tags in chosen images:')
    print(ut.dict_hist(ut.flatten(parsed_info['tags_list'])))

    # Download selected subset
    print('Downloading selected subset')
    _iter = list(
        zip(parsed_info['img_url_list'], parsed_info['new_fpath_list']))
    _iter = ut.ProgressIter(_iter, lbl='downloading sharks')
    for img_url, new_fpath in _iter:
        if not exists(new_fpath):
            ut.download_url(img_url, new_fpath)

    # Remove corrupted or ill-formatted images
    print('Checking for corrupted images')
    import vtool as vt
    noncorrupt_flags = vt.filterflags_valid_images(
        parsed_info['new_fpath_list'])
    parsed_info = {
        key: ut.compress(list_, noncorrupt_flags)
        for key, list_ in parsed_info.items()
    }

    print('Removing small images')
    import numpy as np
    imgsize_list = np.array(
        [vt.open_image_size(gpath) for gpath in parsed_info['new_fpath_list']])
    sqrt_area_list = np.sqrt(np.prod(imgsize_list, axis=1))
    areq_flags_list = sqrt_area_list >= 750
    parsed_info = {
        key: ut.compress(list_, areq_flags_list)
        for key, list_ in parsed_info.items()
    }

    grouped_idxs = ut.group_items(list(range(len(parsed_info['nameid_list']))),
                                  parsed_info['nameid_list'])
    keep_idxs = sorted(
        ut.flatten(
            [idxs for key, idxs in grouped_idxs.items() if len(idxs) >= 2]))
    parsed_info = {
        key: ut.take(list_, keep_idxs)
        for key, list_ in parsed_info.items()
    }

    print('Moving imagse to secondary directory')
    named_outputdir = 'named-left-sharkimages'
    # Build names
    parsed_info['namedir_fpath_list'] = [
        join(named_outputdir, _nameid, _fname) for _fname, _nameid in zip(
            parsed_info['new_fname_list'], parsed_info['nameid_list'])
    ]
    # Create directories
    ut.ensuredir(named_outputdir)
    named_dirs = ut.unique_ordered(
        list(map(dirname, parsed_info['namedir_fpath_list'])))
    for dir_ in named_dirs:
        ut.ensuredir(dir_)
    # Copy
    ut.copy_files_to(src_fpath_list=parsed_info['new_fpath_list'],
                     dst_fpath_list=parsed_info['namedir_fpath_list'])
Пример #33
0
def query_vsone_pairs(ibs,
                      vsone_query_pairs,
                      use_cache=False,
                      save_qcache=False):
    """
    does vsone queries to rerank the top few vsmany querys

    Returns:
        tuple: qaid2_qres_vsone, qreq_vsone_

    CommandLine:
        python -m ibeis.algo.hots.special_query --test-query_vsone_pairs

    Example:
        >>> # SLOW_DOCTEST
        >>> from ibeis.algo.hots.special_query import *  # NOQA
        >>> ibs, valid_aids = testdata_special_query()
        >>> qaids = valid_aids[0:1]
        >>> daids = valid_aids[1:]
        >>> qaid = qaids[0]
        >>> filtkey = hstypes.FiltKeys.DISTINCTIVENESS
        >>> use_cache = False
        >>> save_qcache = False
        >>> # execute function
        >>> qaid2_qres_vsmany, qreq_vsmany_ = query_vsmany_initial(ibs, qaids, daids)
        >>> vsone_query_pairs = build_vsone_shortlist(ibs, qaid2_qres_vsmany)
        >>> qaid2_qres_vsone, qreq_vsone_ = query_vsone_pairs(ibs, vsone_query_pairs)
        >>> qres_vsone = qaid2_qres_vsone[qaid]
        >>> top_namescore_aids = qres_vsone.get_top_aids().tolist()
        >>> result = str(top_namescore_aids)
        >>> top_namescore_names = ibs.get_annot_names(top_namescore_aids)
        >>> assert top_namescore_names[0] == 'easy', 'top_namescore_names[0]=%r' % (top_namescore_names[0],)
    """
    #vsone_cfgdict = dict(codename='vsone_unnorm')
    #codename = 'vsone_unnorm_dist_ratio_extern_distinctiveness',
    codename = 'vsone_unnorm_dist_ratio'
    vsone_cfgdict = dict(
        index_method='single',
        codename=codename,
    )
    #------------------------
    qaid2_qres_vsone = {}
    for qaid, top_aids in vsone_query_pairs:
        # Perform a query request for each
        cm_list_vsone_, __qreq_vsone_ = ibs.query_chips(
            [qaid],
            top_aids,
            cfgdict=vsone_cfgdict,
            return_request=True,
            use_cache=use_cache,
            save_qcache=save_qcache)
        qaid2_qres_vsone_ = {cm.qaid: cm for cm in cm_list_vsone_}
        qaid2_qres_vsone.update(qaid2_qres_vsone_)
    #------------------------
    # Create pseudo query request because there is no good way to
    # represent the vsone reranking as a single query request and
    # we need one for the score normalizer
    #pseudo_codename_ = codename.replace('unnorm', 'norm') + '_extern_distinctiveness'
    pseudo_codename_ = codename.replace('unnorm',
                                        'norm')  # + '_extern_distinctiveness'
    pseudo_vsone_cfgdict = dict(codename=pseudo_codename_)
    pseudo_qaids = ut.get_list_column(vsone_query_pairs, 0)
    pseudo_daids = ut.unique_ordered(
        ut.flatten(ut.get_list_column(vsone_query_pairs, 1)))
    # FIXME: making the pseudo qreq_ takes a nontrivial amount of time for what
    # should be a trivial task.
    pseudo_qreq_vsone_ = ibs.new_query_request(pseudo_qaids,
                                               pseudo_daids,
                                               cfgdict=pseudo_vsone_cfgdict,
                                               verbose=ut.VERBOSE)
    #pseudo_qreq_vsone_.load_distinctiveness_normalizer()
    qreq_vsone_ = pseudo_qreq_vsone_
    # Hack in a special config name
    qreq_vsone_.qparams.query_cfgstr = '_special' + qreq_vsone_.qparams.query_cfgstr
    return qaid2_qres_vsone, qreq_vsone_
Пример #34
0
def show_name_matches(ibs,
                      qaid,
                      name_daid_list,
                      name_fm_list,
                      name_fs_list,
                      name_H1_list,
                      name_featflag_list,
                      qreq_=None,
                      **kwargs):
    """
    Called from chip_match.py

    Args:
        ibs (IBEISController):  ibeis controller object
        qaid (int):  query annotation id
        name_daid_list (list):
        name_fm_list (list):
        name_fs_list (list):
        name_H1_list (list):
        name_featflag_list (list):
        qreq_ (QueryRequest):  query request object with hyper-parameters(default = None)

    Kwargs:
        draw_fmatches, name_rank, fnum, pnum, colorbar_, nonvote_mode,
        fastmode, show_matches, fs, fm_norm, lbl1, lbl2, rect, draw_border,
        cmap, H1, H2, scale_factor1, scale_factor2, draw_pts, draw_ell,
        draw_lines, show_nMatches, all_kpts, in_image, show_query, draw_lbl,
        name_annot_scores, score, rawscore, aid2_raw_rank, show_name,
        show_nid, show_aid, show_annot_score, show_truth, name_score,
        show_name_score, show_name_rank, show_timedelta

    CommandLine:
        python -m ibeis.viz.viz_matches --exec-show_name_matches
        python -m ibeis.viz.viz_matches --test-show_name_matches --show

    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis.viz.viz_matches import *  # NOQA
        >>> from ibeis.algo.hots import chip_match
        >>> from ibeis.algo.hots import name_scoring
        >>> import vtool as vt
        >>> from ibeis.algo.hots import _pipeline_helpers as plh  # NOQA
        >>> import numpy as np
        >>> func = chip_match.ChipMatch.show_single_namematch
        >>> sourcecode = ut.get_func_sourcecode(func, stripdef=True, stripret=True,
        >>>                                     strip_docstr=True)
        >>> setup = ut.regex_replace('viz_matches.show_name_matches', '#', sourcecode)
        >>> homog = False
        >>> print(ut.indent(setup, '>>> '))
        >>> ibs, qreq_, cm_list = plh.testdata_post_sver('PZ_MTEST', qaid_list=[1])
        >>> cm = cm_list[0]
        >>> cm.score_nsum(qreq_)
        >>> dnid = ibs.get_annot_nids(cm.qaid)
        >>> # +--- COPIED SECTION
        >>> locals_ = locals()
        >>> var_list = ut.exec_func_src(
        >>>     func, locals_=locals_,
        >>>     sentinal='name_annot_scores = cm.annot_score_list.take(sorted_groupxs')
        >>> exec(ut.execstr_dict(var_list))
        >>> # L___ COPIED SECTION
        >>> kwargs = {}
        >>> show_name_matches(ibs, qaid, name_daid_list, name_fm_list,
        >>>                   name_fs_list, name_h1_list, name_featflag_list,
        >>>                   qreq_=qreq_, **kwargs)
        >>> ut.quit_if_noshow()
        >>> ut.show_if_requested()
    """
    #print("SHOW NAME MATCHES")
    #print(ut.repr2(kwargs, nl=True))
    #from ibeis import constants as const
    from ibeis import tag_funcs
    draw_fmatches = kwargs.pop('draw_fmatches', True)
    rchip1, kpts1 = get_query_annot_pair_info(ibs, qaid, qreq_, draw_fmatches)
    rchip2_list, kpts2_list = get_data_annot_pair_info(ibs, name_daid_list,
                                                       qreq_, draw_fmatches)
    fm_list = name_fm_list
    fs_list = name_fs_list
    featflag_list = name_featflag_list
    offset_list, sf_list, bbox_list = show_multichip_match(
        rchip1, rchip2_list, kpts1, kpts2_list, fm_list, fs_list,
        featflag_list, **kwargs)
    aid_list = [qaid] + name_daid_list
    annotate_matches3(ibs,
                      aid_list,
                      bbox_list,
                      offset_list,
                      name_fm_list,
                      name_fs_list,
                      qreq_=None,
                      **kwargs)
    ax = pt.gca()
    title = vh.get_query_text(ibs,
                              None,
                              name_daid_list,
                              False,
                              qaid=qaid,
                              **kwargs)

    pt.set_title(title, ax)

    # Case tags
    annotmatch_rowid_list = ibs.get_annotmatch_rowid_from_superkey(
        [qaid] * len(name_daid_list), name_daid_list)
    annotmatch_rowid_list = ut.filter_Nones(annotmatch_rowid_list)
    tags_list = ibs.get_annotmatch_case_tags(annotmatch_rowid_list)
    if not ut.get_argflag('--show'):  # False:
        tags_list = tag_funcs.consolodate_annotmatch_tags(tags_list)
    tag_list = ut.unique_ordered(ut.flatten(tags_list))

    name_rank = kwargs.get('name_rank', None)
    truth = get_multitruth(ibs, aid_list)

    xlabel = {1: 'Correct ID', 0: 'Incorrect ID', 2: 'Unknown ID'}[truth]

    if False:
        if name_rank is None:
            xlabel = {1: 'Genuine', 0: 'Imposter', 2: 'Unknown'}[truth]
            #xlabel = {1: 'True', 0: 'False', 2: 'Unknown'}[truth]
        else:
            if name_rank == 0:
                xlabel = {
                    1: 'True Positive',
                    0: 'False Positive',
                    2: 'Unknown'
                }[truth]
            else:
                xlabel = {
                    1: 'False Negative',
                    0: 'True Negative',
                    2: 'Unknown'
                }[truth]

    if len(tag_list) > 0:
        xlabel += '\n' + ', '.join(tag_list)

    pt.set_xlabel(xlabel)
    return ax
Пример #35
0
def get_all_descendant_rowids(depc, tablename, root_rowids, config=None,
                              ensure=True, eager=True, nInput=None,
                              recompute=False, recompute_all=False,
                              levels_up=None, _debug=False):
    r"""
    Connects `root_rowids` to rowids in `tablename`, and computes all
    values needed along the way. This is the main workhorse function for
    dependency computations.

    Args:
        tablename (str): table to compute dependencies to
        root_rowids (list): rowids for ``tablename``
        config (dict): config applicable for all tables (default = None)
        ensure (bool): eager evaluation if True(default = True)
        eager (bool): (default = True)
        nInput (None): (default = None)
        recompute (bool): (default = False)
        recompute_all (bool): (default = False)
        levels_up (int): only partially compute dependencies (default = 0)
        _debug (bool): (default = False)

    CommandLine:
        python -m dtool.depcache_control --exec-get_all_descendant_rowids:0
        python -m dtool.depcache_control --exec-get_all_descendant_rowids:1

    Example:
        >>> # DISABLE_DOCTEST
        >>> from dtool.depcache_control import *  # NOQA
        >>> from dtool.example_depcache import testdata_depc
        >>> depc = testdata_depc()
        >>> tablename = 'spam'
        >>> root_rowids = [1, 2]
        >>> config1 = {'dim_size': 500}
        >>> config2 = {'dim_size': 100}
        >>> config3 = {'dim_size': 500, 'adapt_shape': False}
        >>> ensure, eager, nInput = True, True, None
        >>> _debug = True
        >>> rowid_dict1 = depc.get_all_descendant_rowids(
        >>>     tablename, root_rowids, config1, ensure, eager, nInput, _debug=_debug)
        >>> rowid_dict2 = depc.get_all_descendant_rowids(
        >>>     tablename, root_rowids, config2, ensure, eager, nInput, _debug=_debug)
        >>> rowid_dict3 = depc.get_all_descendant_rowids(
        >>>     tablename, root_rowids, config3, ensure, eager, nInput, _debug=_debug)
        >>> result1 = 'rowid_dict1 = ' + ut.repr3(rowid_dict1, nl=1)
        >>> result2 = 'rowid_dict2 = ' + ut.repr3(rowid_dict2, nl=1)
        >>> result3 = 'rowid_dict3 = ' + ut.repr3(rowid_dict3, nl=1)
        >>> result = '\n'.join([result1, result2, result3])
        >>> print(result)
        rowid_dict1 = {
            'chip': [1, 2],
            'dummy_annot': [1, 2],
            'fgweight': [1, 2],
            'keypoint': [1, 2],
            'probchip': [1, 2],
            'spam': [1, 2],
        }
        rowid_dict2 = {
            'chip': [3, 4],
            'dummy_annot': [1, 2],
            'fgweight': [3, 4],
            'keypoint': [3, 4],
            'probchip': [1, 2],
            'spam': [3, 4],
        }
        rowid_dict3 = {
            'chip': [1, 2],
            'dummy_annot': [1, 2],
            'fgweight': [5, 6],
            'keypoint': [5, 6],
            'probchip': [1, 2],
            'spam': [5, 6],
        }


    Example:
        >>> # ENABLE_DOCTEST
        >>> from dtool.depcache_control import *  # NOQA
        >>> from dtool.example_depcache import testdata_depc
        >>> depc = testdata_depc()
        >>> _debug = True
        >>> tablename = 'vsmany'
        >>> config = depc.configclass_dict['vsmany']()
        >>> root_rowids = [1, 2, 3]
        >>> ensure, eager, nInput = False, True, None
        >>> # Get rowids of algo ( should be None )
        >>> rowid_dict = depc.get_all_descendant_rowids(
        >>>     tablename, root_rowids, config, ensure, eager, nInput,
        >>>     _debug=_debug)
        >>> result = ut.repr3(rowid_dict, nl=1)
        >>> print(result)
        {
            'dummy_annot': [1, 2, 3],
            'vsmany': [None, None, None],
        }

    Example:
        >>> # ENABLE_DOCTEST
        >>> from dtool.depcache_control import *  # NOQA
        >>> from dtool.example_depcache import testdata_depc
        >>> # Make sure algo config can correctly get properites
        >>> depc = testdata_depc()
        >>> tablename = 'chip'
        >>> recompute = False
        >>> recompute_all = False
        >>> _debug = True
        >>> root_rowids = [1, 2]
        >>> configclass = depc.configclass_dict['chip']
        >>> config_ = configclass()
        >>> config1 = depc.configclass_dict['vsmany'](dim_size=500)
        >>> config2 = depc.configclass_dict['vsmany'](dim_size=100)
        >>> config = config2
        >>> prop_dicts1 = depc.get_all_descendant_rowids(
        >>>     tablename, root_rowids, config=config1, _debug=_debug)
        >>> prop_dicts2 = depc.get_all_descendant_rowids(
        >>>     tablename, root_rowids, config=config2, _debug=_debug)
        >>> print(prop_dicts2)
        >>> print(prop_dicts1)
        >>> assert prop_dicts1 != prop_dicts2

    Example:
        >>> # ENABLE_DOCTEST
        >>> from dtool.depcache_control import *  # NOQA
        >>> from dtool.example_depcache import testdata_depc
        >>> depc = testdata_depc()
        >>> exec(ut.execstr_funckw(depc.get_all_descendant_rowids), globals())
        >>> _debug = True
        >>> qaids, daids = [1, 2, 4], [2, 3, 4]
        >>> root_rowids = list(zip(*ut.product(qaids, daids)))
        >>> request = depc.new_request('vsone', qaids, daids)
        >>> results = request.execute()
        >>> tablename = 'vsone'
        >>> rowid_dict = depc.get_all_descendant_rowids(
        >>>     tablename, root_rowids, config=None, _debug=_debug)
    """
    # TODO: Need to have a nice way of ensuring configs dont overlap
    # via namespaces.
    _debug = depc._debug if _debug is None else _debug
    indenter = ut.Indenter('[Descend-to-%s]' % (tablename,), enabled=_debug)
    if _debug:
        indenter.start()
        print(' * GET DESCENDANT ROWIDS %s ' % (tablename,))
        print(' * config = %r' % (config,))
    dependency_levels = depc.get_dependencies(tablename)
    if levels_up is not None:
        dependency_levels = dependency_levels[:-levels_up]

    configclass_levels = [
        [depc.configclass_dict.get(tablekey, None)
         for tablekey in keys]
        for keys in dependency_levels
    ]
    if _debug:
        print('[depc] dependency_levels = %s' %
              ut.repr3(dependency_levels, nl=1))
        print('[depc] config_levels = %s' %
              ut.repr3(configclass_levels, nl=1))

    # TODO: better support for multi-edges
    if (len(root_rowids) > 0 and ut.isiterable(root_rowids[0]) and
         not depc[tablename].ismulti):
        rowid_dict = {}
        for colx, col in enumerate(root_rowids):
            rowid_dict[depc.root + '%d' % (colx + 1,)] = col
        rowid_dict[depc.root] = ut.unique_ordered(ut.flatten(root_rowids))
    else:
        rowid_dict = {depc.root: root_rowids}

    # Ensure that each level ``tablename``'s dependencies have been computed
    for level_keys in dependency_levels[1:]:
        if _debug:
            print(' * level_keys %s ' % (level_keys,))
        # For each table in the level
        for tablekey in level_keys:
            try:
                child_rowids = depc._expand_level_rowids(
                    tablename, tablekey, rowid_dict, ensure, eager, nInput,
                    config, recompute, recompute_all, _debug)
            except Exception as ex:
                table = depc[tablekey]  # NOQA
                keys = ['tablename', 'tablekey', 'rowid_dict', 'config',
                        'table', 'dependency_levels']
                ut.printex(ex, 'error expanding rowids', keys=keys)
                raise
            rowid_dict[tablekey] = child_rowids
    if _debug:
        print(' GOT DESCENDANT ROWIDS')
        indenter.stop()
    return rowid_dict