Example #1
0
    def update_registry(drive):
        print('Updating registered files in %r' % (drive,))
        # Update existing files
        fpath_exists_list = list(map(exists, ut.ProgIter(drive.fpath_list, 'checkexist fpath', freq=1000)))
        dpath_exists_list = list(map(exists, ut.ProgIter(drive.dpath_list, 'checkexist dpath', freq=1000)))
        if all(fpath_exists_list):
            print('No change in file structure')
        else:
            print('%d/%d files no longer exist' % (
                len(drive.fpath_list) - sum(fpath_exists_list),
                len(drive.fpath_list)))
            removed_fpaths = ut.compress(drive.fpath_list, ut.not_list(fpath_exists_list))
            print('removed_fpaths = %s' % (ut.list_str(removed_fpaths),))
        if all(dpath_exists_list):
            print('No change in dpath structure')
        else:
            print('%d/%d dirs no longer exist' % (
                len(drive.dpath_list) - sum(dpath_exists_list),
                len(drive.dpath_list)))
            removed_dpaths = ut.compress(
                drive.dpath_list,
                ut.not_list(dpath_exists_list))
            print('removed_dpaths = %s' % (ut.list_str(removed_dpaths),))

        drive.fpath_list = ut.compress(drive.fpath_list, fpath_exists_list)
        drive.dpath_list = ut.compress(drive.dpath_list, dpath_exists_list)
        drive.cache.save('fpath_list', drive.fpath_list)
        drive.cache.save('dpath_list', drive.dpath_list)
Example #2
0
def color_by_nids(graph, unique_nids=None, ibs=None, nid2_color_=None):
    """ Colors edges and nodes by nid """
    # TODO use ut.color_nodes
    import plottool as pt

    ensure_graph_nid_labels(graph, unique_nids, ibs=ibs)
    node_to_nid = nx.get_node_attributes(graph, 'nid')
    unique_nids = ut.unique(node_to_nid.values())
    ncolors = len(unique_nids)
    if (ncolors) == 1:
        unique_colors = [pt.UNKNOWN_PURP]
    else:
        if nid2_color_ is not None:
            unique_colors = pt.distinct_colors(ncolors + len(nid2_color_) * 2)
        else:
            unique_colors = pt.distinct_colors(ncolors)
    # Find edges and aids strictly between two nids
    nid_to_color = dict(zip(unique_nids, unique_colors))
    if nid2_color_ is not None:
        # HACK NEED TO ENSURE COLORS ARE NOT REUSED
        nid_to_color.update(nid2_color_)
    edge_aids = list(graph.edges())
    edge_nids = ut.unflat_take(node_to_nid, edge_aids)
    flags = [nids[0] == nids[1] for nids in edge_nids]
    flagged_edge_aids = ut.compress(edge_aids, flags)
    flagged_edge_nids = ut.compress(edge_nids, flags)
    flagged_edge_colors = [nid_to_color[nids[0]] for nids in flagged_edge_nids]
    edge_to_color = dict(zip(flagged_edge_aids, flagged_edge_colors))
    node_to_color = ut.map_dict_vals(ut.partial(ut.take, nid_to_color),
                                     node_to_nid)
    nx.set_edge_attributes(graph, 'color', edge_to_color)
    nx.set_node_attributes(graph, 'color', node_to_color)
Example #3
0
def color_by_nids(graph, unique_nids=None, ibs=None, nid2_color_=None):
    """ Colors edges and nodes by nid """
    # TODO use ut.color_nodes
    import plottool as pt

    ensure_graph_nid_labels(graph, unique_nids, ibs=ibs)
    node_to_nid = nx.get_node_attributes(graph, 'nid')
    unique_nids = ut.unique(node_to_nid.values())
    ncolors = len(unique_nids)
    if (ncolors) == 1:
        unique_colors = [pt.UNKNOWN_PURP]
    else:
        if nid2_color_ is not None:
            unique_colors = pt.distinct_colors(ncolors + len(nid2_color_) * 2)
        else:
            unique_colors = pt.distinct_colors(ncolors)
    # Find edges and aids strictly between two nids
    nid_to_color = dict(zip(unique_nids, unique_colors))
    if nid2_color_ is not None:
        # HACK NEED TO ENSURE COLORS ARE NOT REUSED
        nid_to_color.update(nid2_color_)
    edge_aids = list(graph.edges())
    edge_nids = ut.unflat_take(node_to_nid, edge_aids)
    flags = [nids[0] == nids[1] for nids in edge_nids]
    flagged_edge_aids = ut.compress(edge_aids, flags)
    flagged_edge_nids = ut.compress(edge_nids, flags)
    flagged_edge_colors = [nid_to_color[nids[0]] for nids in flagged_edge_nids]
    edge_to_color = dict(zip(flagged_edge_aids, flagged_edge_colors))
    node_to_color = ut.map_dict_vals(ut.partial(ut.take, nid_to_color), node_to_nid)
    nx.set_edge_attributes(graph, 'color', edge_to_color)
    nx.set_node_attributes(graph, 'color', node_to_color)
Example #4
0
    def update_registry(drive):
        print('Updating registered files in %r' % (drive, ))
        # Update existing files
        fpath_exists_list = list(
            map(exists,
                ut.ProgIter(drive.fpath_list, 'checkexist fpath', freq=1000)))
        dpath_exists_list = list(
            map(exists,
                ut.ProgIter(drive.dpath_list, 'checkexist dpath', freq=1000)))
        if all(fpath_exists_list):
            print('No change in file structure')
        else:
            print('%d/%d files no longer exist' %
                  (len(drive.fpath_list) - sum(fpath_exists_list),
                   len(drive.fpath_list)))
            removed_fpaths = ut.compress(drive.fpath_list,
                                         ut.not_list(fpath_exists_list))
            print('removed_fpaths = %s' % (ut.repr2(removed_fpaths), ))
        if all(dpath_exists_list):
            print('No change in dpath structure')
        else:
            print('%d/%d dirs no longer exist' %
                  (len(drive.dpath_list) - sum(dpath_exists_list),
                   len(drive.dpath_list)))
            removed_dpaths = ut.compress(drive.dpath_list,
                                         ut.not_list(dpath_exists_list))
            print('removed_dpaths = %s' % (ut.repr2(removed_dpaths), ))

        drive.fpath_list = ut.compress(drive.fpath_list, fpath_exists_list)
        drive.dpath_list = ut.compress(drive.dpath_list, dpath_exists_list)
        drive.cache.save('fpath_list', drive.fpath_list)
        drive.cache.save('dpath_list', drive.dpath_list)
Example #5
0
def get_valid_imgsetids(ibs, min_num_gids=0, processed=None, shipped=None, is_occurrence=None):
    r"""
    FIX NAME imgagesetids

    Returns:
        list_ (list):  list of all imageset ids

    RESTful:
        Method: GET
        URL:    /api/imageset/
    """
    imgsetid_list = ibs._get_all_imgsetids()
    if min_num_gids > 0:
        num_gids_list = ibs.get_imageset_num_gids(imgsetid_list)
        flag_list = [num_gids >= min_num_gids for num_gids in num_gids_list]
        imgsetid_list = ut.compress(imgsetid_list, flag_list)
    if processed is not None:
        flag_list = ibs.get_imageset_processed_flags(imgsetid_list)
        isvalid_list = [flag == 1 if processed else flag == 0 for flag in flag_list]
        imgsetid_list = ut.compress(imgsetid_list, isvalid_list)
    if shipped is not None:
        flag_list = ibs.get_imageset_shipped_flags(imgsetid_list)
        isvalid_list = [flag == 1 if shipped else flag == 0 for flag in flag_list]
        imgsetid_list = ut.compress(imgsetid_list, isvalid_list)
    if is_occurrence is not None:
        flag_list = ibs.get_imageset_isoccurrence(imgsetid_list)
        isvalid_list = [flag == is_occurrence for flag in flag_list]
        imgsetid_list = ut.compress(imgsetid_list, isvalid_list)
    return imgsetid_list
def _load_singles(qreq_):
    # Find existing cached chip matches
    # Try loading as many as possible
    fpath_list = qreq_.get_chipmatch_fpaths(qreq_.qaids)
    exists_flags = [exists(fpath) for fpath in fpath_list]
    qaids_hit = ut.compress(qreq_.qaids, exists_flags)
    fpaths_hit = ut.compress(fpath_list, exists_flags)
    # First, try a fast reload assuming no errors
    fpath_iter = ut.ProgIter(
        fpaths_hit,
        length=len(fpaths_hit),
        enabled=len(fpaths_hit) > 1,
        label='loading cache hits',
        adjust=True,
        freq=1,
    )
    try:
        qaid_to_hit = {
            qaid: chip_match.ChipMatch.load_from_fpath(fpath, verbose=False)
            for qaid, fpath in zip(qaids_hit, fpath_iter)
        }
    except chip_match.NeedRecomputeError as ex:
        # Fallback to a slow reload
        ut.printex(ex, 'Some cached results need to recompute', iswarning=True)
        qaid_to_hit = _load_singles_fallback(fpaths_hit)
    return qaid_to_hit
Example #7
0
def nx_from_matrix(weight_matrix, nodes=None, remove_self=True):
    import networkx as nx
    import utool as ut
    import numpy as np
    if nodes is None:
        nodes = list(range(len(weight_matrix)))
    weight_list = weight_matrix.ravel()
    flat_idxs_ = np.arange(weight_matrix.size)
    multi_idxs_ = np.unravel_index(flat_idxs_, weight_matrix.shape)

    # Remove 0 weight edges
    flags = np.logical_not(np.isclose(weight_list, 0))
    weight_list = ut.compress(weight_list, flags)
    multi_idxs = ut.compress(list(zip(*multi_idxs_)), flags)
    edge_list = ut.lmap(tuple, ut.unflat_take(nodes, multi_idxs))

    if remove_self:
        flags = [e1 != e2 for e1, e2 in edge_list]
        edge_list = ut.compress(edge_list, flags)
        weight_list = ut.compress(weight_list, flags)

    graph = nx.Graph()
    graph.add_nodes_from(nodes)
    graph.add_edges_from(edge_list)
    label_list = ['%.2f' % w for w in weight_list]
    nx.set_edge_attributes(graph, 'weight', dict(zip(edge_list,
                                                     weight_list)))
    nx.set_edge_attributes(graph, 'label', dict(zip(edge_list,
                                                     label_list)))
    return graph
Example #8
0
def set_annot_lblannot_from_value(ibs, aid_list, value_list, _lbltype, ensure=True):
    """
    Associates the annot and lblannot of a specific type and value
    Adds the lblannot if it doesnt exist.
    Wrapper around convenience function for set_annot_from_lblannot_rowid
    """
    assert value_list is not None
    assert _lbltype is not None
    if ensure:
        pass
    # a value consisting of an empty string or all spaces is set to the default
    DEFAULT_VALUE = const.KEY_DEFAULTS[_lbltype]
    EMPTY_KEY = const.EMPTY_KEY
    # setting a name to DEFAULT_VALUE or EMPTY is equivalent to unnaming it
    value_list_ = [DEFAULT_VALUE if value.strip() == EMPTY_KEY else value for value in value_list]
    notdefault_list = [value != DEFAULT_VALUE for value in value_list_]
    aid_list_to_delete = ut.get_dirty_items(aid_list, notdefault_list)
    # Set all the valid valids
    aids_to_set   = ut.compress(aid_list, notdefault_list)
    values_to_set = ut.compress(value_list_, notdefault_list)
    ibs.delete_annot_relations_oftype(aid_list_to_delete, _lbltype)
    # remove the relationships that have now been unnamed
    # Convert names into lblannot_rowid
    # FIXME: This function should not be able to set label realationships
    # to labels that have not been added!!
    # This is an inefficient way of getting lblannot_rowids!
    lbltype_rowid_list = [ibs.lbltype_ids[_lbltype]] * len(values_to_set)
    # auto ensure
    lblannot_rowid_list = ibs.add_lblannots(lbltype_rowid_list, values_to_set)
    # Call set_annot_from_lblannot_rowid to finish the conditional adding
    ibs.set_annot_lblannot_from_rowid(aids_to_set, lblannot_rowid_list, _lbltype)
Example #9
0
def show_function_usage(fname, funcname_list, dpath_list):
    # Check to see for function usage
    funcname_list = [r'\b%s\b' % (funcname.strip(),) for funcname in funcname_list if len(funcname) > 0]
    flagged_funcnames = []
    for funcname in funcname_list:
        found_filestr_list, found_lines_list, found_lxs_list = ut.grep([funcname], dpath_list=dpath_list)
        total = 0
        for lines in found_lines_list:
            total += len(lines)
        funcname_ = funcname.replace('\\b', '')
        print(funcname_ + ' ' + str(total))
        if total == 1:
            flagged_funcnames.append(funcname_)
        # See where external usage is
        isexternal_list = [fname == fname_ for fname_ in found_filestr_list]
        external_filestr_list = ut.compress(found_filestr_list, isexternal_list)
        external_lines_list = ut.compress(found_lines_list, isexternal_list)
        #external_lxs_list = ut.compress(found_lxs_list, isexternal_list)
        if len(external_filestr_list) == 0:
            print(' no external usage')
        else:
            for filename, lines in zip(external_filestr_list, external_lines_list):
                print(' * filename=%r' % (filename,))
                print(ut.list_str(lines))
            #print(ut.list_str(list(zip(external_filestr_list, external_lines_list))))
    print('----------')
    print('flagged:')
    print('\n'.join(flagged_funcnames))
def set_annot_lblannot_from_value(ibs, aid_list, value_list, _lbltype, ensure=True):
    """
    Associates the annot and lblannot of a specific type and value
    Adds the lblannot if it doesnt exist.
    Wrapper around convenience function for set_annot_from_lblannot_rowid
    """
    assert value_list is not None
    assert _lbltype is not None
    if ensure:
        pass
    # a value consisting of an empty string or all spaces is set to the default
    DEFAULT_VALUE = const.KEY_DEFAULTS[_lbltype]
    EMPTY_KEY = const.EMPTY_KEY
    # setting a name to DEFAULT_VALUE or EMPTY is equivalent to unnaming it
    value_list_ = [
        DEFAULT_VALUE if value.strip() == EMPTY_KEY else value for value in value_list
    ]
    notdefault_list = [value != DEFAULT_VALUE for value in value_list_]
    aid_list_to_delete = ut.get_dirty_items(aid_list, notdefault_list)
    # Set all the valid valids
    aids_to_set = ut.compress(aid_list, notdefault_list)
    values_to_set = ut.compress(value_list_, notdefault_list)
    ibs.delete_annot_relations_oftype(aid_list_to_delete, _lbltype)
    # remove the relationships that have now been unnamed
    # Convert names into lblannot_rowid
    # FIXME: This function should not be able to set label realationships
    # to labels that have not been added!!
    # This is an inefficient way of getting lblannot_rowids!
    lbltype_rowid_list = [ibs.lbltype_ids[_lbltype]] * len(values_to_set)
    # auto ensure
    lblannot_rowid_list = ibs.add_lblannots(lbltype_rowid_list, values_to_set)
    # Call set_annot_from_lblannot_rowid to finish the conditional adding
    ibs.set_annot_lblannot_from_rowid(aids_to_set, lblannot_rowid_list, _lbltype)
Example #11
0
def update_imageset_info(ibs, imageset_rowid_list):
    r"""
    sets start and end time for imagesets

    FIXME: should not need to bulk update, should be handled as it goes

    RESTful:
        Method: PUT
        URL:    /api/imageset/info/

    Example:
        >>> # DOCTEST_DISABLE
        >>> imageset_rowid_list = ibs.get_valid_imgsetids()
    """
    gids_list_ = ibs.get_imageset_gids(imageset_rowid_list)
    hasgids_list = [len(gids) > 0 for gids in gids_list_]
    gids_list = ut.compress(gids_list_, hasgids_list)
    imgsetid_list = ut.compress(imageset_rowid_list, hasgids_list)
    unixtimes_list = ibs.unflat_map(ibs.get_image_unixtime, gids_list)
    # TODO: replace -1's with nans and do nanmin
    imageset_start_time_posix_list = [
        min(unixtimes) for unixtimes in unixtimes_list
    ]
    imageset_end_time_posix_list = [
        max(unixtimes) for unixtimes in unixtimes_list
    ]
    ibs.set_imageset_start_time_posix(imgsetid_list,
                                      imageset_start_time_posix_list)
    ibs.set_imageset_end_time_posix(imgsetid_list,
                                    imageset_end_time_posix_list)
def filter_part_set(
    ibs,
    part_rowid_list,
    include_only_aid_list=None,
    is_staged=False,
    viewpoint='no-filter',
    minqual=None,
):
    # -- valid part_rowid filtering --

    # filter by is_staged
    if is_staged is True:
        # corresponding unoptimized hack for is_staged
        flag_list = ibs.get_part_staged_flags(part_rowid_list)
        part_rowid_list = ut.compress(part_rowid_list, flag_list)
    elif is_staged is False:
        flag_list = ibs.get_part_staged_flags(part_rowid_list)
        part_rowid_list = ut.filterfalse_items(part_rowid_list, flag_list)

    if include_only_aid_list is not None:
        gid_list = ibs.get_part_gids(part_rowid_list)
        is_valid_gid = [gid in include_only_aid_list for gid in gid_list]
        part_rowid_list = ut.compress(part_rowid_list, is_valid_gid)
    if viewpoint != 'no-filter':
        viewpoint_list = ibs.get_part_viewpoints(part_rowid_list)
        is_valid_viewpoint = [viewpoint == flag for flag in viewpoint_list]
        part_rowid_list = ut.compress(part_rowid_list, is_valid_viewpoint)
    if minqual is not None:
        part_rowid_list = ibs.filter_part_rowids_to_quality(part_rowid_list,
                                                            minqual,
                                                            unknown_ok=True)
    part_rowid_list = sorted(part_rowid_list)
    return part_rowid_list
Example #13
0
    def hardcase_review_gen(infr):
        """
        Subiterator for hardcase review

        Re-review non-confident edges that vsone did not classify correctly
        """
        infr.print('==============================', color='white')
        infr.print('--- HARDCASE PRIORITY LOOP ---', color='white')

        verifiers = infr.learn_evaluation_verifiers()
        verif = verifiers['match_state']

        edges_ = list(infr.edges())
        real_ = list(infr.edge_decision_from(edges_))
        flags_ = [r in {POSTV, NEGTV, INCMP} for r in real_]
        real = ut.compress(real_, flags_)
        edges = ut.compress(edges_, flags_)

        hardness = 1 - verif.easiness(edges, real)

        if True:
            df = pd.DataFrame({'edges': edges, 'real': real})
            df['hardness'] = hardness

            pred = verif.predict(edges)
            df['pred'] = pred.values

            df.sort_values('hardness', ascending=False)
            infr.print('hardness analysis')
            infr.print(str(df))

            infr.print('infr status: ' + ut.repr4(infr.status()))

        # Don't re-review anything that was confidently reviewed
        # CONFIDENCE = const.CONFIDENCE
        # CODE_TO_INT = CONFIDENCE.CODE_TO_INT.copy()
        # CODE_TO_INT[CONFIDENCE.CODE.UNKNOWN] = 0
        # conf = ut.take(CODE_TO_INT, infr.gen_edge_values(
        #     'confidence', edges, on_missing='default',
        #     default=CONFIDENCE.CODE.UNKNOWN))

        # This should only be run with certain params
        assert not infr.params['autoreview.enabled']
        assert not infr.params['redun.enabled']
        assert not infr.params['ranking.enabled']
        assert infr.params['inference.enabled']
        # const.CONFIDENCE.CODE.PRETTY_SURE
        if infr.params['queue.conf.thresh'] is None:
            # != 'pretty_sure':
            infr.print('WARNING: should queue.conf.thresh = "pretty_sure"?')

        # work around add_candidate_edges
        infr.prioritize(metric='hardness', edges=edges,
                        scores=hardness)
        infr.set_edge_attrs('hardness', ut.dzip(edges, hardness))
        for _ in infr._inner_priority_gen(use_refresh=False):
            yield _
Example #14
0
 def fuzzy_filter_columns(self, fuzzy_headers):
     import utool as ut
     col_flags = ut.filterflags_general_tags(
         self.header_tags, logic='or',
         in_any=fuzzy_headers)
     self.header = ut.compress(self.header, col_flags)
     self.header_tags = ut.compress(self.header_tags, col_flags)
     self.row_data = ut.listT(ut.compress(ut.listT(self.row_data), col_flags))
     if self.short_header is not None:
         self.short_header = ut.compress(self.short_header, col_flags)
Example #15
0
 def fuzzy_filter_columns(self, fuzzy_headers):
     import utool as ut
     col_flags = ut.filterflags_general_tags(self.header_tags,
                                             logic='or',
                                             in_any=fuzzy_headers)
     self.header = ut.compress(self.header, col_flags)
     self.header_tags = ut.compress(self.header_tags, col_flags)
     self.row_data = ut.listT(
         ut.compress(ut.listT(self.row_data), col_flags))
     if self.short_header is not None:
         self.short_header = ut.compress(self.short_header, col_flags)
Example #16
0
 def compress_rows(self, flags, with_header=True, inplace=True):
     if not inplace:
         import copy
         self = copy.deepcopy(self)
     import utool as ut
     if with_header:
         assert flags[0] is True
         self.row_data = ut.compress(self.row_data, flags)
     else:
         self.row_data = self.row_data[0:1] + ut.compress(self.row_data[1:], flags)
     return self
Example #17
0
 def compress_rows(self, flags, with_header=True, inplace=True):
     if not inplace:
         import copy
         self = copy.deepcopy(self)
     import utool as ut
     if with_header:
         assert flags[0] is True
         self.row_data = ut.compress(self.row_data, flags)
     else:
         self.row_data = self.row_data[0:1] + ut.compress(
             self.row_data[1:], flags)
     return self
Example #18
0
def possible_mana_combinations(land_list, deck=None):
    """

    CommandLine:
        python -m mtgmonte.mtgutils --test-possible_mana_combinations

    Example:
        >>> # ENABLE_DOCTEST
        >>> from mtgmonte.mtgutils import *  # NOQA
        >>> from mtgmonte import mtgobjs
        >>> deck = mtgobjs.Deck(mtgobjs.load_cards(['Tropical Island', 'Sunken Hollow', 'Island']))
        >>> land_list = mtgobjs.load_cards(['Ancient Tomb', 'Island', 'Flooded Strand', 'Flooded Strand', 'Shivan Reef'])
        >>> card = land_list[-1]
        >>> mana_combos = possible_mana_combinations(land_list, deck)
        >>> result = (ut.repr2(mana_combos, nl=1, strvals=True, nobraces=True))
        >>> print(result)
        ({CC}, {U}, {G}, {U}, {C}),
        ({CC}, {U}, {G}, {B}, {C}),
        ({CC}, {U}, {U}, {U}, {C}),
        ({CC}, {U}, {U}, {B}, {C}),
        ({CC}, {U}, {G}, {U}, {R}),
        ({CC}, {U}, {G}, {B}, {R}),
        ({CC}, {U}, {U}, {U}, {R}),
        ({CC}, {U}, {U}, {B}, {R}),
    """
    from mtgmonte import mtgobjs

    avail_mana = [land.mana_potential2(deck=deck, recurse=False) for land in land_list]
    avail_mana = filter(len, avail_mana)
    mana_combos1 = list(ut.iprod(*avail_mana))
    # Encode the idea that two fetches cant fetch the same land
    non_class1 = [[c for c in co if not isinstance(c, six.string_types)] for co in mana_combos1]
    flags = [len(co) == 0 or len(set(co)) == len(co) for co in non_class1]
    mana_combos2 = ut.compress(mana_combos1, flags)
    mana_combos3 = [
        [[c] if isinstance(c, mtgobjs.ManaSet) else c.mana_potential2(deck=deck) for c in co] for co in mana_combos2
    ]
    unflat_combos3 = [list(ut.iprod(*co)) for co in mana_combos3]
    mana_combos4 = ut.flatten(unflat_combos3)
    # mana_combos4 = [reduce(operator.add, m) for m in mana_combos4]
    # z = reduce(operator.add, m)
    # import utool
    # utool.embed()
    # avail_mana = [land.mana_potential(deck=deck) for land in land_list]
    # avail_mana = filter(len, avail_mana)
    # mana_combos4 = list(ut.iprod(*avail_mana))
    combo_ids = [tuple(sorted(x)) for x in mana_combos4]
    flags = ut.flag_unique_items(combo_ids)
    mana_combos = ut.compress(mana_combos4, flags)
    # mana_combos = list(map(tuple, [''.join(c) for c in mana_combos]))
    return mana_combos
Example #19
0
def get_name_exemplar_aids(ibs, nid_list):
    r"""
    Returns:
        list_ (list):  a list of list of cids in each name


    CommandLine:
        python -m ibeis.control.manual_name_funcs --test-get_name_exemplar_aids

    RESTful:
        Method: GET
        URL:    /api/name/exemplar_aids/

    Example:
        >>> # ENABLE_DOCTEST
        >>> from ibeis.control.manual_name_funcs import *  # NOQA
        >>> import ibeis
        >>> ibs = ibeis.opendb('testdb1')
        >>> aid_list = ibs.get_valid_aids()
        >>> nid_list = ibs.get_annot_name_rowids(aid_list)
        >>> exemplar_aids_list = ibs.get_name_exemplar_aids(nid_list)
        >>> result = exemplar_aids_list
        >>> print(result)
        [[], [2, 3], [2, 3], [], [5, 6], [5, 6], [7], [8], [], [10], [], [12], [13]]
    """
    # Get all annot ids for each name
    aids_list = ibs.get_name_aids(nid_list, enable_unknown_fix=True)
    # Flag any annots that are not exemplar and remove them
    flags_list = ibsfuncs.unflat_map(ibs.get_annot_exemplar_flags, aids_list)
    exemplar_aids_list = [ut.compress(aids, flags) for aids, flags in
                          zip(aids_list, flags_list)]
    return exemplar_aids_list
Example #20
0
def get_valid_nids(ibs, imgsetid=None, filter_empty=False, min_pername=None):
    r"""
    Returns:
        list_ (list): all valid names with at least one animal
        (does not include unknown names)

    RESTful:
        Method: GET
        URL:    /api/name/
    """
    if imgsetid is None:
        _nid_list = ibs._get_all_known_name_rowids()
    else:
        _nid_list = ibs.get_imageset_nids(imgsetid)
    # HACK FOR UNKNOWN. Makes things crash
    #_nid_list += [0]
    nid_list = _nid_list

    if filter_empty:
        min_pername = 1 if min_pername is None else max(min_pername, 1)

    if min_pername is not None:
        nAnnot_list = ibs.get_name_num_annotations(nid_list)
        flag_list = np.array(nAnnot_list) >= min_pername
        nid_list = ut.compress(nid_list, flag_list)
    return nid_list
Example #21
0
def get_annot_alrids_oftype(ibs, aid_list, lbltype_rowid, configid=None):
    """
    Get all the relationship ids belonging to the input annotations where the
    relationship ids are filtered to be only of a specific lbltype/category/type
    """
    alrids_list = ibs.get_annot_alrids(aid_list, configid=configid)
    # Get lblannot_rowid of each relationship
    lblannot_rowids_list = ibsfuncs.unflat_map(ibs.get_alr_lblannot_rowids, alrids_list)
    # Get the type of each lblannot
    lbltype_rowids_list = ibsfuncs.unflat_map(ibs.get_lblannot_lbltypes_rowids, lblannot_rowids_list)
    # only want the nids of individuals, not species, for example
    valids_list = [[typeid == lbltype_rowid for typeid in rowids] for rowids in lbltype_rowids_list]
    alrids_list = [ut.compress(alrids, valids) for alrids, valids in zip(alrids_list, valids_list)]
    if configid is None:
        def resolution_func_first(alrid_list):
            return [ alrid_list[0] ]

        def resolution_func_lowest_config(alrid_list):
            config_rowid_list = ibs.get_alr_config(alrid_list)
            temp = sorted(list(zip(config_rowid_list, alrid_list)))
            return [ temp[0][0] ]

        alrids_list = [
            resolution_func_first(alrid_list)
            if len(alrid_list) > 1 else
            alrid_list
            for alrid_list in alrids_list
        ]
    assert all([len(alrid_list) < 2 for alrid_list in alrids_list]),\
        ("More than one type per lbltype.  ALRIDS: " + str(alrids_list) +
         ", ROW: " + str(lbltype_rowid) + ", KEYS:" + str(ibs.lbltype_ids))
    return alrids_list
Example #22
0
def shark_misc():
    import ibeis
    ibs = ibeis.opendb('WS_ALL')
    aid_list = ibs.get_valid_aids()
    flag_list = ibs.get_annot_been_adjusted(aid_list)
    adjusted_aids = ut.compress(aid_list, flag_list)
    return adjusted_aids
Example #23
0
def get_valid_nids(ibs, eid=None, filter_empty=False, min_pername=None):
    r"""
    Returns:
        list_ (list): all valid names with at least one animal
        (does not include unknown names)

    RESTful:
        Method: GET
        URL:    /api/name/
    """
    if eid is None:
        _nid_list = ibs._get_all_known_name_rowids()
    else:
        _nid_list = ibs.get_encounter_nids(eid)
    # HACK FOR UNKNOWN. Makes things crash
    #_nid_list += [0]
    nid_list = _nid_list

    if filter_empty:
        min_pername = 1 if min_pername is None else max(min_pername, 1)

    if min_pername is not None:
        nAnnot_list = ibs.get_name_num_annotations(nid_list)
        flag_list = np.array(nAnnot_list) >= min_pername
        nid_list = ut.compress(nid_list, flag_list)
    return nid_list
Example #24
0
def shark_misc():
    import ibeis
    ibs = ibeis.opendb('WS_ALL')
    aid_list = ibs.get_valid_aids()
    flag_list = ibs.get_annot_been_adjusted(aid_list)
    adjusted_aids = ut.compress(aid_list, flag_list)
    return adjusted_aids
Example #25
0
def debug_depcache(ibs):
    r"""
    CommandLine:
        python -m ibeis_flukematch.plugin --exec-debug_depcache
        python -m ibeis_flukematch.plugin --exec-debug_depcache --show --no-cnn
        python -m ibeis_flukematch.plugin --exec-debug_depcache --clear-all-depcache --db humbpacks
        python -m ibeis_flukematch.plugin --exec-debug_depcache --show --no-cnn --db humpbacks

        python -m ibeis_flukematch.plugin --exec-preproc_notch_tips --db humpbacks --no-cnn --show

    Example:
        >>> # SCRIPT
        >>> from ibeis_flukematch.plugin import *  # NOQA
        >>> ibs = ibeis.opendb(defaultdb='PZ_MTEST')
        >>> debug_depcache(ibs)
        >>> ut.show_if_requested()
    """
    print(ibs.depc)
    nas_notch_deps = ibs.depc.get_dependencies('Has_Notch')
    print('nas_notch_deps = %r' % (nas_notch_deps,))
    te_deps = ibs.depc.get_dependencies('Trailing_Edge')
    print('te_deps = %r' % (te_deps,))
    notch_tip_deps = ibs.depc.get_dependencies('Notch_Tips')
    print('notch_tip_deps = %r' % (notch_tip_deps,))
    ibs.depc.print_schemas()
    try:
        ibs.depc.show_graph()
    except Exception as ex:
        ut.printex(ex, iswarning=True)

    all_aids = ibs.get_valid_aids()
    isvalid = ibs.depc.get('Has_Notch', all_aids, 'flag')
    aid_list = ut.compress(all_aids, isvalid)
    aid_list = aid_list[0:10]
    ibs.depc.print_config_tables()
Example #26
0
def delete_empty_eids(ibs):
    """ Removes encounters without images

    Args:
        ibs (IBEISController):  ibeis controller object

    CommandLine:
        python -m ibeis.control.manual_egrelate_funcs --test-delete_empty_eids

    Example:
        >>> # ENABLE_DOCTEST
        >>> from ibeis.control.manual_egrelate_funcs import *  # NOQA
        >>> import ibeis
        >>> # build test data
        >>> ibs = ibeis.opendb('testdb1')
        >>> # execute function
        >>> result = ibs.delete_empty_eids()
        >>> # verify results
        >>> print(result)
    """
    eid_list = ibs.get_valid_eids(min_num_gids=0)
    nGids_list = ibs.get_encounter_num_gids(eid_list)
    is_invalid = [nGids == 0 for nGids in nGids_list]
    invalid_eids = ut.compress(eid_list, is_invalid)
    ibs.delete_encounters(invalid_eids)
Example #27
0
 def compress(self, flags, inplace=False):
     subarr = self.kparr.compress(flags, axis=0)
     info = {
         key: ut.compress(val, flags)
         for key, val in self.info.items()
     }
     return Keypoints(subarr, info)
Example #28
0
def find_close_incorrect_match(ibs, qaids):
    use_cache = False
    save_qcache = False
    cfgdict_vsmany = dict(
        index_method='single',
        pipeline_root='vsmany',
    )
    qres_vsmany_list, qreq_vsmany_ = ibs.query_chips(qaids,
                                                     ibs.get_valid_aids(),
                                                     cfgdict=cfgdict_vsmany,
                                                     return_request=True,
                                                     use_cache=use_cache,
                                                     save_qcache=save_qcache,
                                                     verbose=True)
    qres_vsmany = qres_vsmany_list[0]
    qres_vsmany.ishow_top(ibs)
    top_aids = qres_vsmany.get_top_aids()
    top_nids = ibs.get_annot_nids(top_aids)
    qaid = qaids[0]
    qnid = ibs.get_annot_nids(qaid)
    is_groundfalse = [nid != qnid for nid in top_nids]
    top_gf_aids = ut.compress(top_aids, is_groundfalse)
    #top_gt_aids = ut.filterfalse_items(top_aids, is_groundfalse)
    top_gf_vuuids = ibs.get_annot_visual_uuids(top_gf_aids)
    qvuuid = ibs.get_annot_visual_uuids(qaid)
    gf_mapping = {qvuuid: top_gf_vuuids[0:1]}
    print('gf_mapping = ' + ut.dict_str(gf_mapping))
    pass
Example #29
0
 def _estimate_threshold(model, method=None, curve=None):
     """
         import plottool as pt
         idx3 = vt.find_elbow_point(curve[idx1:idx2 + 1]) + idx1
         pt.plot(curve)
         pt.plot(idx1, curve[idx1], 'bo')
         pt.plot(idx2, curve[idx2], 'ro')
         pt.plot(idx3, curve[idx3], 'go')
     """
     if curve is None:
         isvalid = ~np.isnan(model.edge_weights)
         curve = sorted(ut.compress(model.edge_weights, isvalid))
     thresh = estimate_threshold(curve, method)
     #if len(curve) == 0:
     #    return 0
     #if method is None:
     #    method = 'mean'
     #if method == 'mean':
     #    thresh = np.mean(curve)
     #elif method == 'elbow':
     #    idx1 = vt.find_elbow_point(curve)
     #    idx2 = vt.find_elbow_point(curve[idx1:]) + idx1
     #    thresh = curve[idx2]
     #else:
     #    raise ValueError('method = %r' % (method,))
     return thresh
Example #30
0
    def find_maxval_spell_sequence(player):
        # sim land in play
        # really need available mana
        from mtgmonte import mtgutils

        land_in_play = player.get_cards_in_play(["Land"])
        nonland_in_hand = player.get_cards_in_hand(["Land"], invert=True)

        land_list = land_in_play
        spell_list = nonland_in_hand

        max_avail_cmc = mtgutils.get_max_avail_cmc(land_list, deck=player.deck)
        cmc_feasible_sequences = mtgutils.get_cmc_feasible_sequences(spell_list, max_avail_cmc)

        if len(cmc_feasible_sequences) == 0:
            sequence = []
            value = 0
        else:
            mana_combos = mtgutils.possible_mana_combinations(land_list, player.deck)
            flags = [mtgutils.can_cast(spell_sequence, mana_combos) for spell_sequence in cmc_feasible_sequences]
            feasible_sequences = ut.compress(cmc_feasible_sequences, flags)
            if len(feasible_sequences) == 0:
                sequence = []
                value = 0
            else:
                # Find best value in feasible solutions
                value_list = [sum([card.get_goldfish_value() for card in combo]) for combo in feasible_sequences]
                index = ut.list_argmax(value_list)
                sequence = feasible_sequences[index]
                value = len(sequence)
        return sequence, value
Example #31
0
def get_unconverted_hsdbs(workdir=None):
    r"""
    Args:
        workdir (None): (default = None)

    CommandLine:
        python -m wbia.dbio.ingest_hsdb --test-get_unconverted_hsdbs

    Example:
        >>> # SCRIPT
        >>> from wbia.dbio.ingest_hsdb import *  # NOQA
        >>> workdir = None
        >>> result = get_unconverted_hsdbs(workdir)
        >>> print(result)
    """
    import os
    import numpy as np

    if workdir is None:
        workdir = sysres.get_workdir()
    dbname_list = os.listdir(workdir)
    dbpath_list = np.array([join(workdir, name) for name in dbname_list])
    needs_convert = list(map(check_unconverted_hsdb, dbpath_list))
    needs_convert_hsdbs = ut.compress(dbpath_list, needs_convert)
    return needs_convert_hsdbs
Example #32
0
def delete_empty_imgsetids(ibs):
    """ Removes imagesets without images

    Args:
        ibs (IBEISController):  wbia controller object

    CommandLine:
        python -m wbia.control.manual_gsgrelate_funcs --test-delete_empty_imgsetids

    Example:
        >>> # ENABLE_DOCTEST
        >>> from wbia.control.manual_gsgrelate_funcs import *  # NOQA
        >>> import wbia
        >>> # build test data
        >>> ibs = wbia.opendb('testdb1')
        >>> # execute function
        >>> result = ibs.delete_empty_imgsetids()
        >>> # verify results
        >>> print(result)
    """
    imgsetid_list = ibs.get_valid_imgsetids(min_num_gids=0)
    nGids_list = ibs.get_imageset_num_gids(imgsetid_list)
    is_invalid = [nGids == 0 for nGids in nGids_list]
    invalid_imgsetids = ut.compress(imgsetid_list, is_invalid)
    ibs.delete_imagesets(invalid_imgsetids)
def get_annot_alrids_oftype(ibs, aid_list, lbltype_rowid):
    """
    Get all the relationship ids belonging to the input annotations where the
    relationship ids are filtered to be only of a specific lbltype/category/type
    """
    alrids_list = ibs.get_annot_alrids(aid_list)
    # Get lblannot_rowid of each relationship
    lblannot_rowids_list = ibsfuncs.unflat_map(ibs.get_alr_lblannot_rowids,
                                               alrids_list)
    # Get the type of each lblannot
    lbltype_rowids_list = ibsfuncs.unflat_map(ibs.get_lblannot_lbltypes_rowids,
                                              lblannot_rowids_list)
    # only want the nids of individuals, not species, for example
    valids_list = [[typeid == lbltype_rowid for typeid in rowids]
                   for rowids in lbltype_rowids_list]
    alrids_list = [
        ut.compress(alrids, valids)
        for alrids, valids in zip(alrids_list, valids_list)
    ]

    def resolution_func_first(alrid_list):
        return [alrid_list[0]]

    alrids_list = [
        resolution_func_first(alrid_list)
        if len(alrid_list) > 1 else alrid_list for alrid_list in alrids_list
    ]
    assert all([
        len(alrid_list) < 2 for alrid_list in alrids_list
    ]), ('More than one type per lbltype.  ALRIDS: ' + str(alrids_list) +
         ', ROW: ' + str(lbltype_rowid) + ', KEYS:' + str(ibs.lbltype_ids))
    return alrids_list
Example #34
0
def debug_depcache(ibs):
    r"""
    CommandLine:
        python -m ibeis_flukematch.plugin --exec-debug_depcache
        python -m ibeis_flukematch.plugin --exec-debug_depcache --show --no-cnn
        python -m ibeis_flukematch.plugin --exec-debug_depcache --clear-all-depcache --db humbpacks
        python -m ibeis_flukematch.plugin --exec-debug_depcache --show --no-cnn --db humpbacks

        python -m ibeis_flukematch.plugin --exec-preproc_notch_tips --db humpbacks --no-cnn --show

    Example:
        >>> # SCRIPT
        >>> from ibeis_flukematch.plugin import *  # NOQA
        >>> ibs = ibeis.opendb(defaultdb='PZ_MTEST')
        >>> debug_depcache(ibs)
        >>> ut.show_if_requested()
    """
    print(ibs.depc)
    nas_notch_deps = ibs.depc.get_dependencies('Has_Notch')
    print('nas_notch_deps = %r' % (nas_notch_deps, ))
    te_deps = ibs.depc.get_dependencies('Trailing_Edge')
    print('te_deps = %r' % (te_deps, ))
    notch_tip_deps = ibs.depc.get_dependencies('Notch_Tips')
    print('notch_tip_deps = %r' % (notch_tip_deps, ))
    ibs.depc.print_schemas()
    try:
        ibs.depc.show_graph()
    except Exception as ex:
        ut.printex(ex, iswarning=True)

    all_aids = ibs.get_valid_aids()
    isvalid = ibs.depc.get('Has_Notch', all_aids, 'flag')
    aid_list = ut.compress(all_aids, isvalid)
    aid_list = aid_list[0:10]
    ibs.depc.print_config_tables()
Example #35
0
def get_name_exemplar_aids(ibs, nid_list):
    r"""
    Returns:
        list_ (list):  a list of list of cids in each name


    CommandLine:
        python -m ibeis.control.manual_name_funcs --test-get_name_exemplar_aids

    RESTful:
        Method: GET
        URL:    /api/name/exemplar_aids/

    Example:
        >>> # ENABLE_DOCTEST
        >>> from ibeis.control.manual_name_funcs import *  # NOQA
        >>> import ibeis
        >>> ibs = ibeis.opendb('testdb1')
        >>> aid_list = ibs.get_valid_aids()
        >>> nid_list = ibs.get_annot_name_rowids(aid_list)
        >>> exemplar_aids_list = ibs.get_name_exemplar_aids(nid_list)
        >>> result = exemplar_aids_list
        >>> print(result)
        [[], [2, 3], [2, 3], [], [5, 6], [5, 6], [7], [8], [], [10], [], [12], [13]]
    """
    # Get all annot ids for each name
    aids_list = ibs.get_name_aids(nid_list, enable_unknown_fix=True)
    # Flag any annots that are not exemplar and remove them
    flags_list = ibsfuncs.unflat_map(ibs.get_annot_exemplar_flags, aids_list)
    exemplar_aids_list = [ut.compress(aids, flags) for aids, flags in
                          zip(aids_list, flags_list)]
    return exemplar_aids_list
Example #36
0
    def find_module_callers():
        """
        TODO:
        attempt to build a call graph between module functions to make it easy to see
        what can be removed and what cannot.
        """
        import utool as ut
        from os.path import normpath
        mod_fpath = ut.truepath('~/code/ibeis/ibeis/expt/results_analyzer.py')
        mod_fpath = ut.truepath('~/code/ibeis/ibeis/expt/results_all.py')
        mod_fpath = ut.truepath('~/code/ibeis/ibeis/expt/results_organizer.py')
        module = ut.import_module_from_fpath(mod_fpath)
        user_profile = ut.ensure_user_profile()
        doctestables = list(
            ut.iter_module_doctestable(module, include_builtin=False))
        grepkw = {}
        grepkw['exclude_dirs'] = user_profile.project_exclude_dirs
        grepkw['dpath_list'] = user_profile.project_dpaths
        grepkw['verbose'] = True

        usage_map = {}
        for funcname, func in doctestables:
            print('Searching for funcname = %r' % (funcname, ))
            found_fpath_list, found_lines_list, found_lxs_list = ut.grep(
                [funcname], **grepkw)
            used_in = (found_fpath_list, found_lines_list, found_lxs_list)
            usage_map[funcname] = used_in

        external_usage_map = {}
        for funcname, used_in in usage_map.items():
            (found_fpath_list, found_lines_list, found_lxs_list) = used_in
            isexternal_flag = [
                normpath(fpath) != normpath(mod_fpath)
                for fpath in found_fpath_list
            ]
            ext_used_in = (ut.compress(found_fpath_list, isexternal_flag),
                           ut.compress(found_lines_list, isexternal_flag),
                           ut.compress(found_lxs_list, isexternal_flag))
            external_usage_map[funcname] = ext_used_in

        for funcname, used_in in external_usage_map.items():
            (found_fpath_list, found_lines_list, found_lxs_list) = used_in

        print('Calling modules: \n' + ut.repr2(ut.unique_ordered(
            ut.flatten([used_in[0]
                        for used_in in external_usage_map.values()])),
                                               nl=True))
Example #37
0
    def ensure_task_probs(infr, edges):
        """
        Ensures that probabilities are assigned to the edges.
        This gaurentees that infr.task_probs contains data for edges.
        (Currently only the primary task is actually ensured)

        CommandLine:
            python -m ibeis.algo.graph.mixin_matching ensure_task_probs

        Doctest:
            >>> from ibeis.algo.graph.mixin_matching import *
            >>> import ibeis
            >>> infr = ibeis.AnnotInference('PZ_MTEST', aids='all',
            >>>                             autoinit='staging')
            >>> edges = list(infr.edges())[0:3]
            >>> infr.load_published()
            >>> assert len(infr.task_probs['match_state']) == 0
            >>> infr.ensure_task_probs(edges)
            >>> assert len(infr.task_probs['match_state']) == 3
            >>> infr.ensure_task_probs(edges)
            >>> assert len(infr.task_probs['match_state']) == 3

        Doctest:
            >>> from ibeis.algo.graph.mixin_matching import *
            >>> from ibeis.algo.graph import demo
            >>> infr = demo.demodata_infr(num_pccs=6, p_incon=.5, size_std=2)
            >>> edges = list(infr.edges())
            >>> infr.ensure_task_probs(edges)
            >>> assert all([np.isclose(sum(p.values()), 1)
            >>>             for p in infr.task_probs['match_state'].values()])
        """
        if not infr.verifiers:
            raise Exception('Verifiers are needed to predict probabilities')

        # Construct pairwise features on edges in infr
        primary_task = 'match_state'

        match_task = infr.task_probs[primary_task]
        need_flags = [e not in match_task for e in edges]

        if any(need_flags):
            need_edges = ut.compress(edges, need_flags)
            infr.print(
                'There are {} edges without probabilities'.format(
                    len(need_edges)), 1)

            # Only recompute for the needed edges
            task_probs = infr._make_task_probs(need_edges)
            # Store task probs in internal data structure
            # FIXME: this is slow
            for task, probs in task_probs.items():
                probs_dict = probs.to_dict(orient='index')
                if task not in infr.task_probs:
                    infr.task_probs[task] = probs_dict
                else:
                    infr.task_probs[task].update(probs_dict)

                # Set edge task attribute as well
                infr.set_edge_attrs(task, probs_dict)
Example #38
0
def testdata_humpbacks():
    import ibeis
    ibs = ibeis.opendb(defaultdb='humpbacks')
    all_aids = ibs.get_valid_aids()
    isvalid = ibs.depc.get('Has_Notch', all_aids, 'flag')
    aid_list = ut.compress(all_aids, isvalid)
    aid_list = aid_list[0:10]
    return ibs, aid_list
Example #39
0
def testdata_humpbacks():
    import ibeis
    ibs = ibeis.opendb(defaultdb='humpbacks')
    all_aids = ibs.get_valid_aids()
    isvalid = ibs.depc.get('Has_Notch', all_aids, 'flag')
    aid_list = ut.compress(all_aids, isvalid)
    aid_list = aid_list[0:10]
    return ibs, aid_list
Example #40
0
 def inplace_filter_results(self, filter_pat):
     import utool as ut
     self.filter_pats.append(filter_pat)
     # Get zipflags
     flags_list = self.pattern_filterflags(filter_pat)
     # Check to see if there are any survivors
     flags = ut.lmap(any, flags_list)
     #
     found_lines_list = ut.zipcompress(self.found_lines_list, flags_list)
     found_lxs_list = ut.zipcompress(self.found_lxs_list, flags_list)
     #
     found_fpath_list = ut.compress(self.found_fpath_list, flags)
     found_lines_list = ut.compress(found_lines_list, flags)
     found_lxs_list = ut.compress(found_lxs_list, flags)
     # In place modification
     self.found_fpath_list = found_fpath_list
     self.found_lines_list = found_lines_list
     self.found_lxs_list = found_lxs_list
Example #41
0
def bzip(*args):
    """
    broadcasting zip. Only broadcasts on the first dimension

    args = [np.array([1, 2, 3, 4]), [[1, 2, 3]]]
    args = [np.array([1, 2, 3, 4]), [[1, 2, 3]]]

    """
    needs_cast = [isinstance(arg, list) for arg in args]
    arg_containers = [np.empty(len(arg), dtype=object) if flag else arg
                      for arg, flag in zip(args, needs_cast)]
    empty_containers = ut.compress(arg_containers, needs_cast)
    tocast_args = ut.compress(args, needs_cast)
    for container, arg in zip(empty_containers, tocast_args):
        container[:] = arg
    #[a.shape for a in arg_containers]
    bc = np.broadcast(*arg_containers)
    return bc
Example #42
0
def get_diffranks(rank_mat, qaids):
    """ Find rows which scored differently over the various configs
    FIXME: duplicated
    """
    isdiff_flags = [not np.all(row == row[0]) for row in rank_mat]
    diff_aids = ut.compress(qaids, isdiff_flags)
    diff_rank = rank_mat.compress(isdiff_flags, axis=0)
    diff_qxs = np.where(isdiff_flags)[0]
    return diff_aids, diff_rank, diff_qxs
Example #43
0
 def inplace_filter_results(self, filter_pat):
     import utool as ut
     self.filter_pats.append(filter_pat)
     # Get zipflags
     flags_list = self.pattern_filterflags(filter_pat)
     # Check to see if there are any survivors
     flags = ut.lmap(any, flags_list)
     #
     found_lines_list = ut.zipcompress(self.found_lines_list, flags_list)
     found_lxs_list = ut.zipcompress(self.found_lxs_list, flags_list)
     #
     found_fpath_list = ut.compress(self.found_fpath_list, flags)
     found_lines_list = ut.compress(found_lines_list, flags)
     found_lxs_list = ut.compress(found_lxs_list, flags)
     # In place modification
     self.found_fpath_list = found_fpath_list
     self.found_lines_list = found_lines_list
     self.found_lxs_list = found_lxs_list
Example #44
0
def get_diffranks(rank_mat, qaids):
    """ Find rows which scored differently over the various configs
    FIXME: duplicated
    """
    isdiff_flags = [not np.all(row == row[0]) for row in rank_mat]
    diff_aids    = ut.compress(qaids, isdiff_flags)
    diff_rank    = rank_mat.compress(isdiff_flags, axis=0)
    diff_qxs     = np.where(isdiff_flags)[0]
    return diff_aids, diff_rank, diff_qxs
Example #45
0
def convert_tests_from_ibeis_to_nose(module_list):
    # PARSE OUT TESTABLE DOCTESTTUPS
    #import utool as ut
    testtup_list = []
    seen_ = set()

    topimport_list = []

    for module in module_list:
        mod_doctest_tup = ut.get_module_doctest_tup(module=module,
                                                    verbose=False,
                                                    allexamples=True)
        enabled_testtup_list, frame_fpath, all_testflags, module = mod_doctest_tup
        flags = [tup.src not in seen_ for tup in enabled_testtup_list]
        enabled_testtup_list = ut.compress(enabled_testtup_list, flags)
        testtup_list.extend(enabled_testtup_list)
        if len(enabled_testtup_list) > 0:
            topimport_list.append('from %s import *  # NOQA' % (module.__name__,))

    print('Found %d test tups' % (len(testtup_list)))

    autogen_test_src_funcs = []
    #import redbaron
    for testtup in testtup_list:
        name = testtup.name
        num  = testtup.num
        src  = testtup.src
        want = testtup.want
        import re
        src = re.sub('# ENABLE_DOCTEST\n', '', src)
        src = re.sub('from [^*]* import \* *# NOQA\n', '', src)
        src = re.sub(r'from [^*]* import \*\n', '', src)

        src = ut.str_between(src, None, 'ut.quit_if_noshow').rstrip('\n')
        src = ut.str_between(src, None, 'ut.show_if_requested').rstrip('\n')
        # import utool
        # utool.embed()
        """
        """
        #flag = testtup.flag
        if want.endswith('\n'):
            want = want[:-1]
        if want:
            #src_node = redbaron.RedBaron(src)
            #if len(src_node.find_all('name', 'result')) > 0:
            #    src_node.append('assert result == %r' % (want,))
            if '\nresult = ' in src:
                src += '\nassert str(result) == %r' % (want,)
        func_src = 'def test_%s_%d():\n' % (name.replace('.', '_'), num,) + ut.indent(src)
        autogen_test_src_funcs.append(func_src)

    autogen_test_src = '\n'.join(topimport_list) + '\n\n\n' + '\n\n\n'.join(autogen_test_src_funcs) + '\n'
    from ibeis import tests
    from os.path import join
    moddir = ut.get_module_dir(tests)
    ut.writeto(join(moddir, 'test_autogen_nose_tests.py'), autogen_test_src)
Example #46
0
    def predict_proba_df(verif, edges):
        """
        CommandLine:
            python -m wbia.algo.graph.demo DummyVerif.predict_edges

        Example:
            >>> # ENABLE_DOCTEST
            >>> from wbia.algo.graph.demo import *  # NOQA
            >>> from wbia.algo.graph import demo
            >>> import networkx as nx
            >>> kwargs = dict(num_pccs=40, size=2)
            >>> infr = demo.demodata_infr(**kwargs)
            >>> verif = infr.dummy_verif
            >>> edges = list(infr.graph.edges())
            >>> probs = verif.predict_proba_df(edges)
            >>> #print('scores = %r' % (scores,))
            >>> #hashid = ut.hash_data(scores)
            >>> #print('hashid = %r' % (hashid,))
            >>> #assert hashid == 'cdlkytilfeqgmtsihvhqwffmhczqmpil'
        """
        infr = verif.infr
        edges = list(it.starmap(verif.infr.e_, edges))
        prob_cache = infr.task_probs['match_state']
        is_miss = np.array([e not in prob_cache for e in edges])
        # is_hit = ~is_miss
        if np.any(is_miss):
            miss_edges = ut.compress(edges, is_miss)
            miss_truths = [verif._get_truth(edge) for edge in miss_edges]
            grouped_edges = ut.group_items(miss_edges,
                                           miss_truths,
                                           sorted_=False)
            # Need to make this determenistic too
            states = [POSTV, NEGTV, INCMP]
            for key in sorted(grouped_edges.keys()):
                group = grouped_edges[key]
                probs0 = randn(
                    shape=[len(group)],
                    rng=verif.rng,
                    a_max=1,
                    a_min=0,
                    **verif.dummy_params[key],
                )
                # Just randomly assign other probs
                probs1 = verif.rng.rand(len(group)) * (1 - probs0)
                probs2 = 1 - (probs0 + probs1)
                for edge, probs in zip(group, zip(probs0, probs1, probs2)):
                    prob_cache[edge] = ut.dzip(states, probs)

        from wbia.algo.graph import nx_utils as nxu
        import pandas as pd

        probs = pd.DataFrame(
            ut.take(prob_cache, edges),
            index=nxu.ensure_multi_index(edges, ('aid1', 'aid2')),
        )
        return probs
def set_annot_lblannot_from_rowid(ibs, aid_list, lblannot_rowid_list, _lbltype):
    """ Sets items/lblannot_rowids of a list of annotations."""
    # Get the alrids_list for the aids, using the lbltype as a filter
    alrids_list = ibs.get_annot_alrids_oftype(aid_list, ibs.lbltype_ids[_lbltype])
    # Find the aids which already have relationships (of _lbltype)
    setflag_list = [len(alrids) > 0 for alrids in alrids_list]
    # Add the relationship if it doesn't exist
    aid_list_to_add = ut.get_dirty_items(aid_list, setflag_list)
    lblannot_rowid_list_to_add = ut.get_dirty_items(lblannot_rowid_list, setflag_list)
    # set the existing relationship if one already exists
    alrids_list_to_set = ut.compress(alrids_list, setflag_list)
    lblannot_rowid_list_to_set = ut.compress(lblannot_rowid_list, setflag_list)
    # Assert each annot has only one relationship of this type
    ibsfuncs.assert_singleton_relationship(ibs, alrids_list_to_set)
    alrid_list_to_set = ut.flatten(alrids_list_to_set)
    # Add the new relationships
    ibs.add_annot_relationship(aid_list_to_add, lblannot_rowid_list_to_add)
    # Set the old relationships
    ibs.set_alr_lblannot_rowids(alrid_list_to_set, lblannot_rowid_list_to_set)
Example #48
0
 def get_other_nids(self):
     ibs = self.ibs
     all_nid_list = ibs.get_annot_name_rowids(self.all_aid_list)
     unique_nid_list = ut.unique_ordered(all_nid_list)
     is_unknown = ibs.is_nid_unknown(unique_nid_list)
     is_name1 = [nid == self.nid1 for nid in unique_nid_list]
     is_name2 = [nid == self.nid2 for nid in unique_nid_list]
     is_other = ut.and_lists(*tuple(map(ut.not_list, (is_name1, is_name2, is_unknown))))
     other_nid_list = ut.compress(unique_nid_list, is_other)
     return other_nid_list
Example #49
0
def filter_and_relabel(labels, label_gids, min_imgs_per_occurence, occur_unixtimes=None):
    """
    Removes clusters with too few members.
    Relabels clusters-labels such that label 0 has the most members
    """
    label_nGids = np.array(list(map(len, label_gids)))
    label_isvalid = label_nGids >= min_imgs_per_occurence
    occur_gids = ut.compress(label_gids, label_isvalid)
    if occur_unixtimes is not None:
        occur_unixtimes = ut.compress(occur_unixtimes, label_isvalid)
        # Rebase ids so occurrence0 has the most images
        # occur_ids  = list(range(label_isvalid.sum()))
        # else:
        # sort by time instead
        unixtime_arr = np.array(occur_unixtimes)
        # Reorder occurrences so the oldest has the lowest number
        occur_gids = ut.take(label_gids, unixtime_arr.argsort())
    occur_ids = list(range(len(occur_gids)))
    return occur_ids, occur_gids
Example #50
0
def set_annot_lblannot_from_rowid(ibs, aid_list, lblannot_rowid_list, _lbltype):
    """ Sets items/lblannot_rowids of a list of annotations."""
    # Get the alrids_list for the aids, using the lbltype as a filter
    alrids_list = ibs.get_annot_alrids_oftype(aid_list, ibs.lbltype_ids[_lbltype])
    # Find the aids which already have relationships (of _lbltype)
    setflag_list = [len(alrids) > 0 for alrids in alrids_list]
    # Add the relationship if it doesn't exist
    aid_list_to_add = ut.get_dirty_items(aid_list, setflag_list)
    lblannot_rowid_list_to_add = ut.get_dirty_items(lblannot_rowid_list, setflag_list)
    # set the existing relationship if one already exists
    alrids_list_to_set = ut.compress(alrids_list, setflag_list)
    lblannot_rowid_list_to_set = ut.compress(lblannot_rowid_list, setflag_list)
    # Assert each annot has only one relationship of this type
    ibsfuncs.assert_singleton_relationship(ibs, alrids_list_to_set)
    alrid_list_to_set = ut.flatten(alrids_list_to_set)
    # Add the new relationships
    ibs.add_annot_relationship(aid_list_to_add, lblannot_rowid_list_to_add)
    # Set the old relationships
    ibs.set_alr_lblannot_rowids(alrid_list_to_set, lblannot_rowid_list_to_set)
Example #51
0
def check_chip_external_storage(ibs, cid_list):
    chip_fpath_list = get_chip_fpath(ibs, cid_list, check_external_storage=False)
    notexists_flags = [not exists(cfpath) for cfpath in chip_fpath_list]
    if any(notexists_flags):
        invalid_cids = ut.compress(cid_list, notexists_flags)
        print('ERROR: %d CHIPS DO NOT EXIST' % (len(invalid_cids)))
        print('ATTEMPING TO FIX %d / %d non-existing chip paths' % (len(invalid_cids), len(cid_list)))
        ibs.delete_chips(invalid_cids)
        raise controller_inject.ExternalStorageException('NON-EXISTING EXTRENAL STORAGE ERROR. REQUIRES RECOMPUTE. TRY AGAIN')
    return chip_fpath_list
Example #52
0
def filter_and_relabel(labels, label_gids, min_imgs_per_occurence, occur_unixtimes=None):
    """
    Removes clusters with too few members.
    Relabels clusters-labels such that label 0 has the most members
    """
    label_nGids = np.array(list(map(len, label_gids)))
    label_isvalid = label_nGids >= min_imgs_per_occurence
    occur_gids = ut.compress(label_gids, label_isvalid)
    if occur_unixtimes is not None:
        occur_unixtimes = ut.compress(occur_unixtimes, label_isvalid)
        # Rebase ids so occurrence0 has the most images
        # occur_ids  = list(range(label_isvalid.sum()))
        # else:
        # sort by time instead
        unixtime_arr = np.array(occur_unixtimes)
        # Reorder occurrences so the oldest has the lowest number
        occur_gids = ut.take(label_gids, unixtime_arr.argsort())
    occur_ids = list(range(len(occur_gids)))
    return occur_ids, occur_gids
Example #53
0
 def get_cards_in_hand(player, valid_types=None, invert=False):
     card_list = player.hand
     if valid_types is None:
         valid_cards = card_list
     else:
         flags = [ut.is_superset(c.types, valid_types) for c in card_list]
         if invert:
             flags = ut.not_list(flags)
         valid_cards = ut.compress(card_list, flags)
     return valid_cards
Example #54
0
 def get_other_nids(self):
     ibs = self.ibs
     all_nid_list = ibs.get_annot_name_rowids(self.all_aid_list)
     unique_nid_list = ut.unique_ordered(all_nid_list)
     is_unknown = ibs.is_nid_unknown(unique_nid_list)
     is_name1 = [nid == self.nid1 for nid in unique_nid_list]
     is_name2 = [nid == self.nid2 for nid in unique_nid_list]
     is_other = ut.and_lists(
         *tuple(map(ut.not_list, (is_name1, is_name2, is_unknown))))
     other_nid_list = ut.compress(unique_nid_list, is_other)
     return other_nid_list
Example #55
0
 def get_where(group, attr, cmp_, target):
     if isinstance(cmp_, six.string_types):
         cmp_ = {
             '==': op.eq,
             '<': op.lt, '<=': op.le,
             '>': op.gt, '>=': op.ge,
         }[cmp_]
         #.get(cmp_, cmp_)
     attrs = group.get_attrs(attr)
     flags = [cmp_(val, target) for val in attrs]
     return ut.compress(group.cards, flags)
Example #56
0
 def handle_cache_misses(ibs, getter_func, rowid_list, ismiss_list, vals_list, cache_, kwargs):
     miss_indices = ut.list_where(ismiss_list)
     miss_rowids = ut.compress(rowid_list, ismiss_list)
     # call wrapped function
     miss_vals = getter_func(ibs, miss_rowids, **kwargs)
     # overwrite missed output
     for index, val in zip(miss_indices, miss_vals):
         vals_list[index] = val  # Output write
     # cache save
     for rowid, val in zip(miss_rowids, miss_vals):
         cache_[rowid] = val  # Cache write
Example #57
0
def test_openworkdirs():
    """
    problems:
        PZ_DanExt_All
        PZ_DanExt_Test
        GZ_March2012
        Wildebeest_ONLY_MATCHES

    python dev.py --convert --dbdir /raid/work/PZ_Marianne --force-delete
    python dev.py --convert --dbdir /raid/work/SL_Siva --force-delete
    python dev.py --convert --dbdir /raid/work/PZ_SweatwaterSmall --force-delete
    """
    canskip = [
        '/raid/work/NAUT_test2', '/raid/work/WD_Siva',
        '/raid/work/PZ_FlankHack', '/raid/work/PZ_Mothers',
        '/raid/work/GZ_Foals', '/raid/work/PZ_MTEST', '/raid/work/GIR_Tanya',
        '/raid/work/GZ_Siva', '/raid/work/Wildebeest', '/raid/work/sonograms',
        '/raid/work/MISC_Jan12', '/raid/work/GZ_Master0',
        '/raid/work/LF_OPTIMIZADAS_NI_V_E', '/raid/work/LF_Bajo_bonito',
        '/raid/work/Frogs', '/raid/work/GZ_ALL', '/raid/work/JAG_Kelly',
        '/raid/work/NAUT_test (copy)', '/raid/work/WS_hard',
        '/raid/work/WY_Toads', '/raid/work/NAUT_Dan',
        '/raid/work/LF_WEST_POINT_OPTIMIZADAS', '/raid/work/Seals',
        '/raid/work/Rhinos_Stewart', '/raid/work/Elephants_Stewart',
        '/raid/work/NAUT_test',
    ]
    import ibeis
    from ibeis.init import sysres
    import os
    import utool as ut  # NOQA
    from os.path import join
    from ibeis.dbio import ingest_hsdb
    import ibeis.other.dbinfo
    ibeis.other.dbinfo.rrr()
    workdir = sysres.get_workdir()
    dbname_list = os.listdir(workdir)
    dbpath_list = [join(workdir, name) for name in dbname_list]
    is_hsdb_list    = list(map(ingest_hsdb.is_hsdb, dbpath_list))
    hsdb_list = ut.compress(dbpath_list, is_hsdb_list)
    #is_ibs_cvt_list = np.array(list(map(is_succesful_convert, dbpath_list)))
    regen_cmds = []
    for hsdb_dpath in hsdb_list:
        if hsdb_dpath in canskip:
            continue
        try:
            ibs = ibeis.opendb(hsdb_dpath)  # NOQA
            print('Succesfully opened hsdb: ' + hsdb_dpath)
            print(ibs.get_dbinfo_str())
        except Exception as ex:
            ut.printex(ex, 'Failed to convert hsdb: ' + hsdb_dpath)
            regen_cmd = 'python dev.py --convert --dbdir ' + hsdb_dpath
            regen_cmds.append(regen_cmd)
    print('\n'.join(regen_cmds))
Example #58
0
def get_cmc_feasible_sequences(spell_list, max_avail_cmc):
    # Get spells castable on their own
    flags = [spell.cmc <= max_avail_cmc for spell in spell_list]
    feasible_spells = ut.compress(spell_list, flags)
    cmc_feasible_sequences = []
    for num in range(1, len(feasible_spells) + 1):
        spell_combos = list(itertools.combinations(feasible_spells, num))
        for combo in spell_combos:
            total = sum([spell.cmc for spell in combo])
            if total <= max_avail_cmc:
                cmc_feasible_sequences.append(combo)
    return cmc_feasible_sequences
Example #59
0
 def dismiss_all(self, event=None):
     """ All unknown annotations are given DIFFERENT new names """
     # Delete all original names
     ibs = self.ibs
     aid_list    = self.all_aid_list
     is_unknown  = ibs.is_aid_unknown(aid_list)
     aid_list_filtered = ut.compress(aid_list, is_unknown)
     # Rename annotations
     ibs.set_annot_names_to_different_new_names(aid_list_filtered)
     self.update_callback()
     self.backend_callback()
     self.show_page()