Esempio n. 1
0
def get_photobomber_map(ibs, aids, aid_to_nid=None):
    """
    Builds map of which names that photobomb other names.

    python -m wbia.gui.id_review_api --test-test_review_widget --show --db PZ_MTEST -a default:qindex=0

    >>> import wbia
    >>> dbdir = ut.truepath('~/lev/media/danger/GGR/GGR-IBEIS')
    >>> ibs = wbia.opendb(dbdir='/home/joncrall/lev/media/danger/GGR/GGR-IBEIS')
    >>> filter_kw = {
    >>>     'multiple': False,
    >>>     'minqual': 'good',
    >>>     'is_known': True,
    >>>     'min_pername': 2,
    >>>     'view': ['right'],
    >>> }
    >>> aids = ibs.filter_annots_general(ibs.get_valid_aids(), filter_kw=filter_kw)
    """
    ams_list = ibs.get_annotmatch_rowids_from_aid(aids)
    flags_list = ibs.unflat_map(
        ut.partial(ibs.get_annotmatch_prop, 'Photobomb'), ams_list)
    pb_ams = ut.zipcompress(ams_list, flags_list)
    has_pb_ams = [len(ams) > 0 for ams in pb_ams]
    pb_ams_ = ut.compress(pb_ams, has_pb_ams)
    # aids_ = ut.compress(aids, has_pb_ams)
    pb_ams_flat = ut.flatten(pb_ams_)

    pb_aids1_ = ibs.get_annotmatch_aid1(pb_ams_flat)
    pb_aids2_ = ibs.get_annotmatch_aid2(pb_ams_flat)

    pb_aid_pairs_ = list(zip(pb_aids1_, pb_aids2_))
    if aid_to_nid is None:
        pb_nid_pairs_ = ibs.unflat_map(ibs.get_annot_nids, pb_aid_pairs_)
    else:
        pb_nid_pairs_ = ibs.unflat_map(ut.partial(ut.take, aid_to_nid),
                                       pb_aid_pairs_)

    # invalid_aid_map = ut.ddict(set)
    # for aid1, aid2 in pb_aid_pairs_:
    #    if aid1 != aid2:
    #        invalid_aid_map[aid1].add(aid2)
    #        invalid_aid_map[aid2].add(aid1)

    invalid_nid_map = ut.ddict(set)
    for nid1, nid2 in pb_nid_pairs_:
        if nid1 != nid2:
            invalid_nid_map[nid1].add(nid2)
            invalid_nid_map[nid2].add(nid1)

    return invalid_nid_map
Esempio n. 2
0
    def _make_lazy_dict(self):
        """
        CommandLine:
            python -m wbia._wbia_object ObjectScalar0D._make_lazy_dict

        Example:
            >>> # DISABLE_DOCTEST
            >>> from wbia._wbia_object import *  # NOQA
            >>> import wbia
            >>> ibs = wbia.opendb('testdb1')
            >>> annots = ibs.annots()
            >>> subset = annots.take([0, 2, 5])
            >>> scalar = annots[0]
            >>> assert scalar.obj1d._attrs == annots._attrs
            >>> self = scalar
            >>> print(dir(self))
            >>> metadata = self._make_lazy_dict()
            >>> print('metadata = %r' % (metadata,))
            >>> aid = metadata['aid']
            >>> print('aid = %r' % (aid,))
        """
        metadata = ut.LazyDict()
        for attr in self.obj1d.__vector_attributes__():
            metadata[attr] = ut.partial(getattr, self, attr)
        return metadata
Esempio n. 3
0
def color_by_nids(graph, unique_nids=None, ibs=None, nid2_color_=None):
    """ Colors edges and nodes by nid """
    # TODO use ut.color_nodes
    import plottool as pt

    ensure_graph_nid_labels(graph, unique_nids, ibs=ibs)
    node_to_nid = nx.get_node_attributes(graph, 'nid')
    unique_nids = ut.unique(node_to_nid.values())
    ncolors = len(unique_nids)
    if (ncolors) == 1:
        unique_colors = [pt.UNKNOWN_PURP]
    else:
        if nid2_color_ is not None:
            unique_colors = pt.distinct_colors(ncolors + len(nid2_color_) * 2)
        else:
            unique_colors = pt.distinct_colors(ncolors)
    # Find edges and aids strictly between two nids
    nid_to_color = dict(zip(unique_nids, unique_colors))
    if nid2_color_ is not None:
        # HACK NEED TO ENSURE COLORS ARE NOT REUSED
        nid_to_color.update(nid2_color_)
    edge_aids = list(graph.edges())
    edge_nids = ut.unflat_take(node_to_nid, edge_aids)
    flags = [nids[0] == nids[1] for nids in edge_nids]
    flagged_edge_aids = ut.compress(edge_aids, flags)
    flagged_edge_nids = ut.compress(edge_nids, flags)
    flagged_edge_colors = [nid_to_color[nids[0]] for nids in flagged_edge_nids]
    edge_to_color = dict(zip(flagged_edge_aids, flagged_edge_colors))
    node_to_color = ut.map_dict_vals(ut.partial(ut.take, nid_to_color),
                                     node_to_nid)
    nx.set_edge_attributes(graph, 'color', edge_to_color)
    nx.set_node_attributes(graph, 'color', node_to_color)
Esempio n. 4
0
 def make_adj_matrix(G):
     edges = list(G.edges())
     edge2_idx = ut.partial(ut.dict_take, node2_idx)
     uv_list = ut.lmap(edge2_idx, edges)
     A = np.zeros((len(nodes), len(nodes)))
     A[tuple(np.array(uv_list).T)] = 1
     return A
Esempio n. 5
0
def color_by_nids(graph, unique_nids=None, ibs=None, nid2_color_=None):
    """ Colors edges and nodes by nid """
    # TODO use ut.color_nodes
    import plottool as pt

    ensure_graph_nid_labels(graph, unique_nids, ibs=ibs)
    node_to_nid = nx.get_node_attributes(graph, 'nid')
    unique_nids = ut.unique(node_to_nid.values())
    ncolors = len(unique_nids)
    if (ncolors) == 1:
        unique_colors = [pt.UNKNOWN_PURP]
    else:
        if nid2_color_ is not None:
            unique_colors = pt.distinct_colors(ncolors + len(nid2_color_) * 2)
        else:
            unique_colors = pt.distinct_colors(ncolors)
    # Find edges and aids strictly between two nids
    nid_to_color = dict(zip(unique_nids, unique_colors))
    if nid2_color_ is not None:
        # HACK NEED TO ENSURE COLORS ARE NOT REUSED
        nid_to_color.update(nid2_color_)
    edge_aids = list(graph.edges())
    edge_nids = ut.unflat_take(node_to_nid, edge_aids)
    flags = [nids[0] == nids[1] for nids in edge_nids]
    flagged_edge_aids = ut.compress(edge_aids, flags)
    flagged_edge_nids = ut.compress(edge_nids, flags)
    flagged_edge_colors = [nid_to_color[nids[0]] for nids in flagged_edge_nids]
    edge_to_color = dict(zip(flagged_edge_aids, flagged_edge_colors))
    node_to_color = ut.map_dict_vals(ut.partial(ut.take, nid_to_color), node_to_nid)
    nx.set_edge_attributes(graph, 'color', edge_to_color)
    nx.set_node_attributes(graph, 'color', node_to_color)
Esempio n. 6
0
def nx_make_adj_matrix(G):
    import utool as ut
    nodes = list(G.nodes())
    node2_idx = ut.make_index_lookup(nodes)
    edges = list(G.edges())
    edge2_idx = ut.partial(ut.dict_take, node2_idx)
    uv_list = ut.lmap(edge2_idx, edges)
    A = np.zeros((len(nodes), len(nodes)))
    A[tuple(np.array(uv_list).T)] = 1
    return A
Esempio n. 7
0
    def _make_cfgstr(extr, edges):
        ibs = extr.ibs
        edge_uuids = ibs.unflat_map(ibs.get_annot_visual_uuids, edges)
        edge_hashid = ut.hashid_arr(edge_uuids, 'edges')

        _cfg_lbl = ut.partial(ut.repr2, si=True, itemsep='', kvsep=':')
        match_configclass = ibs.depc_annot.configclass_dict['pairwise_match']

        cfgstr = '_'.join([
            edge_hashid,
            _cfg_lbl(extr.match_config),
            _cfg_lbl(extr.pairfeat_cfg),
            'global(' + _cfg_lbl(extr.global_keys) + ')',
            'pairwise_match_version=%r' % (match_configclass().version,)
        ])
        return cfgstr
Esempio n. 8
0
 def add_split(dataset, key, idxs):
     print('[dataset] adding split %r' % (key,))
     # Build subset filenames
     ut.ensuredir(dataset.split_dpath)
     ext = dataset._ext
     fmtdict = dict(key=key, ext=ext, size=len(idxs))
     fmtstr = dataset.get_split_fmtstr(forward=True)
     splitset = {
         type_: join(dataset.split_dpath, fmtstr.format(type_=type_, **fmtdict))
         for type_ in ['data', 'labels', 'metadata']
     }
     # Partition data into the subset
     part_dict = {
         'data': dataset.data.take(idxs, axis=0),
         'labels': dataset.labels.take(idxs, axis=0),
     }
     if dataset.metadata is not None:
         taker = ut.partial(ut.take, index_list=idxs)
         part_dict['metadata'] = ut.map_dict_vals(taker, dataset.metadata)
     # Write splitset data to files
     for type_ in part_dict.keys():
         ut.save_data(splitset[type_], part_dict[type_])
     # Register filenames with dataset
     dataset.fpath_dict[key] = splitset
Esempio n. 9
0
def rhombicuboctahedron():
    import vtk
    # First, you need to store the vertex locations.

    import numpy as np
    fu = 1  # full unit
    hu = .5  # half unit
    d = np.sqrt((fu ** 2) / 2)  # diag
    hh = hu + d  # half height

    # left view faces us

    import utool as ut
    import six
    import itertools
    counter = ut.partial(six.next, itertools.count(0))

    vertex_locations = vtk.vtkPoints()
    vertex_locations.SetNumberOfPoints(24)

    p1, p2, p3 = np.array([
        (-hu, -hu, hh),
        ( hu, -hu, hh),
        ( hu,  hu, hh),
        (-hu,  hu, hh),
    ]).T
    plist = [p1, p2, p3]

    # three of the six main faces
    #perms = list(itertools.permutations((0, 1, 2), 3))
    perms = [(0, 1, 2), (0, 2, 1), (2, 0, 1)]

    vertex_array = []

    # VERTEXES
    # left, up, back
    vplist = ['L', 'U', 'B', 'R', 'D', 'F']
    vpdict = {}
    print('perms = %r' % (perms,))
    for x in range(3):
        vp = vplist[x]
        p = np.vstack(ut.take(plist, perms[x])).T
        counts = [counter() for z in range(4)]
        vpdict[vp] = counts
        vertex_array.extend(p.tolist())
        vertex_locations.SetPoint(counts[0], p[0])
        vertex_locations.SetPoint(counts[1], p[1])
        vertex_locations.SetPoint(counts[2], p[2])
        vertex_locations.SetPoint(counts[3], p[3])

    # three more of the six main faces
    perms = [(0, 1, 2), (0, 2, 1), (2, 0, 1)]
    plist[-1] = -plist[-1]
    # right, down, front
    print('perms = %r' % (perms,))
    for x in range(3):
        p = np.vstack(ut.take(plist, perms[x])).T
        counts = [counter() for z in range(4)]
        vp = vplist[x + 3]
        vpdict[vp] = counts
        vertex_array.extend(p.tolist())
        vertex_locations.SetPoint(counts[0], p[0])
        vertex_locations.SetPoint(counts[1], p[1])
        vertex_locations.SetPoint(counts[2], p[2])
        vertex_locations.SetPoint(counts[3], p[3])

    pd = vtk.vtkPolyData()
    pd.SetPoints(vertex_locations)

    polygon_faces = vtk.vtkCellArray()

    face_dict = {
        'L': [vpdict['L'][0], vpdict['L'][1], vpdict['L'][2], vpdict['L'][3]],
        'D': [vpdict['D'][0], vpdict['D'][1], vpdict['D'][2], vpdict['D'][3]],
        'U': [vpdict['U'][0], vpdict['U'][1], vpdict['U'][2], vpdict['U'][3]],
        'F': [vpdict['F'][0], vpdict['F'][1], vpdict['F'][2], vpdict['F'][3]],
        'R': [vpdict['R'][0], vpdict['R'][1], vpdict['R'][2], vpdict['R'][3]],
        'B': [vpdict['B'][0], vpdict['B'][1], vpdict['B'][2], vpdict['B'][3]],
        'FL': [ vpdict['L'][0], vpdict['L'][3], vpdict['F'][2], vpdict['F'][3], ],
        'BL': [ vpdict['L'][1], vpdict['L'][2], vpdict['B'][2], vpdict['B'][3], ],
        'UL': [ vpdict['L'][2], vpdict['L'][3], vpdict['U'][3], vpdict['U'][2], ],
        'DL': [ vpdict['L'][0], vpdict['L'][1], vpdict['D'][2], vpdict['D'][3], ],
        'UFL': [ vpdict['L'][3], vpdict['F'][2], vpdict['U'][3], ],
        'DFL': [ vpdict['L'][0], vpdict['F'][3], vpdict['D'][3], ],
        'UBL': [ vpdict['L'][2], vpdict['B'][2], vpdict['U'][2], ],
        'DBL': [ vpdict['L'][1], vpdict['B'][3], vpdict['D'][2], ],
        'UFR': [ vpdict['R'][3], vpdict['F'][1], vpdict['U'][0], ],
        'DFR': [ vpdict['R'][0], vpdict['F'][0], vpdict['D'][0], ],
        'UBR': [ vpdict['R'][2], vpdict['B'][1], vpdict['U'][1], ],
        'DBR': [ vpdict['R'][1], vpdict['B'][0], vpdict['D'][1], ],
        'FR': [ vpdict['R'][3], vpdict['R'][0], vpdict['F'][0], vpdict['F'][1], ],
        'BR': [ vpdict['R'][2], vpdict['R'][1], vpdict['B'][0], vpdict['B'][1], ],
        'UR': [ vpdict['R'][3], vpdict['R'][2], vpdict['U'][1], vpdict['U'][0], ],
        'DR': [ vpdict['R'][1], vpdict['R'][0], vpdict['D'][0], vpdict['D'][1], ],
        'DF': [ vpdict['F'][0], vpdict['F'][3], vpdict['D'][3], vpdict['D'][0], ],
        'DB': [ vpdict['B'][3], vpdict['B'][0], vpdict['D'][1], vpdict['D'][2], ],
        'UF': [ vpdict['F'][1], vpdict['F'][2], vpdict['U'][3], vpdict['U'][0], ],
        'UB': [ vpdict['B'][2], vpdict['B'][1], vpdict['U'][1], vpdict['U'][2], ],
    }

    for key, vert_ids in face_dict.items():
        #if key != 'L':
        #    continue
        if len(vert_ids) == 4:
            q = vtk.vtkQuad()
        else:
            q = vtk.vtkTriangle()
        for count, idx in enumerate(vert_ids):
            q.GetPointIds().SetId(count, idx)
        polygon_faces.InsertNextCell(q)

    # Next you create a vtkPolyData to store your face and vertex information
    #that
    # represents your polyhedron.
    pd = vtk.vtkPolyData()
    pd.SetPoints(vertex_locations)
    pd.SetPolys(polygon_faces)

    face_stream = vtk.vtkIdList()
    face_stream.InsertNextId(polygon_faces.GetNumberOfCells())
    vertex_list = vtk.vtkIdList()

    polygon_faces.InitTraversal()
    while polygon_faces.GetNextCell(vertex_list) == 1:
        face_stream.InsertNextId(vertex_list.GetNumberOfIds())

        for j in range(vertex_list.GetNumberOfIds()):
            face_stream.InsertNextId(vertex_list.GetId(j))

    ug = vtk.vtkUnstructuredGrid()
    ug.SetPoints(vertex_locations)
    ug.InsertNextCell(vtk.VTK_POLYHEDRON, face_stream)

    #writer = vtk.vtkUnstructuredGridWriter()
    #writer.SetFileName("rhombicuboctahedron.vtk")
    ##writer.SetInputData(ug)
    #writer.SetInput(ug)
    #writer.Write()

    mapper = vtk.vtkDataSetMapper()
    mapper.SetInput(ug)

    actor = vtk.vtkActor()
    actor.SetMapper(mapper)

    if 1:
        # Read the image data from a file
        import utool as ut

        textureCoords = vtk.vtkFloatArray()
        textureCoords.SetNumberOfComponents(3)
        #coords = ut.take(vertex_array, face_dict['L'])
        #for coord in coords:
        #    textureCoords.InsertNextTuple(tuple(coord))
        textureCoords.InsertNextTuple((0, 0, 0))
        textureCoords.InsertNextTuple((1, 0, 0))
        textureCoords.InsertNextTuple((1, 1, 0))
        textureCoords.InsertNextTuple((0, 1, 0))

        # Create texture object
        fpath = ut.grab_test_imgpath('zebra.png')
        reader = vtk.vtkPNGReader()
        reader.SetFileName(fpath)

        texture = vtk.vtkTexture()
        texture.SetInput(reader.GetOutput())
        texture.RepeatOff()
        texture.InterpolateOff()

        ptdat = pd.GetPointData()
        ptdat.SetTCoords(textureCoords)

        actor.SetTexture(texture)

    ren = vtk.vtkRenderer()
    ren.AddActor(actor)

    renw = vtk.vtkRenderWindow()
    renw.AddRenderer(ren)

    iren = vtk.vtkRenderWindowInteractor()
    iren.SetRenderWindow(renw)

    ren.ResetCamera()
    renw.Render()
    iren.Start()
Esempio n. 10
0
def printex(ex, msg='[!?] Caught exception', prefix=None, key_list=[],
            locals_=None, iswarning=False, tb=TB, pad_stdout=True, N=0,
            use_stdout=False, reraise=False, msg_=None, keys=None,
            colored=None):
    """
    Prints (and/or logs) an exception with relevant info

    Args:
        ex (Exception): exception to print
        msg (str): a message to display to the user
        keys (None): a list of strings denoting variables or expressions of interest
        iswarning (bool): prints as a warning rather than an error if True (defaults to False)
        tb (bool): if True prints the traceback in the error message
        pad_stdout (bool): separate the error message from the rest of stdout with newlines
        prefix (None):
        locals_ (None):
        N (int):
        use_stdout (bool):
        reraise (bool):
        msg_ (None):
        key_list (list): DEPRICATED use keys

    Returns:
        None
    """
    import utool as ut
    if isinstance(ex, MemoryError):
        ut.print_resource_usage()
    if keys is not None:
        # shorthand for key_list
        key_list = keys
    # Get error prefix and local info
    if prefix is None:
        prefix = get_caller_prefix(aserror=True, N=N)
    if locals_ is None:
        locals_ = get_caller_locals(N=N)
    # build exception message
    if msg is True:
        key_list = get_caller_locals()
        msg = msg_
    exstr = formatex(ex, msg, prefix, key_list, locals_, iswarning, tb=tb, colored=colored)
    # get requested print function
    if use_stdout:
        def print_func(*args):
            msg = ', '.join(list(map(six.text_type, args)))
            sys.stdout.write(msg + '\n')
            sys.stdout.flush()
    else:
        print_func = ut.partial(ut.colorprint, color='yellow' if iswarning else 'red')
        # print_func = print
    if pad_stdout:
        print_func('\n+------\n')
    # print the execption
    print_func(exstr)
    if pad_stdout:
        print_func('\nL______\n')
    # If you dont know where an error is coming from raise-all
    if (reraise and not iswarning) or RAISE_ALL:
        sys.stdout.flush()
        sys.stderr.flush()
        raise ex
    if ut.get_argflag('--exit-on-error'):
        print('WARNING: dont use this flag. Some errors are meant to be caught')
        ut.print_traceback()
        print('REQUESTED EXIT ON ERROR')
        sys.exit(1)
Esempio n. 11
0
def monkey_to_str_columns(self):
    frame = self.tr_frame
    highlight_func = 'max'
    highlight_func = ut.partial(np.argmax, axis=1)
    highlight_cols = self.highlight_cols

    perrow_colxs = highlight_func(frame[highlight_cols].values)
    n_rows = len(perrow_colxs)
    n_cols = len(highlight_cols)
    shape = (n_rows, n_cols)
    flat_idxs = np.ravel_multi_index((np.arange(n_rows), perrow_colxs), shape)
    flags2d = np.zeros(shape, dtype=np.int32)
    flags2d.ravel()[flat_idxs] = 1

    # np.unravel_index(flat_idxs, shape)

    def color_func(val, level):
        if level:
            return ut.color_text(val, 'red')
        else:
            return val

    _make_fixed_width = pd.formats.format._make_fixed_width
    frame = self.tr_frame
    str_index = self._get_formatted_index(frame)
    str_columns = self._get_formatted_column_labels(frame)
    if self.header:
        stringified = []
        for i, c in enumerate(frame):
            cheader = str_columns[i]
            max_colwidth = max(self.col_space or 0,
                               *(self.adj.len(x) for x in cheader))
            fmt_values = self._format_col(i)
            fmt_values = _make_fixed_width(fmt_values,
                                           self.justify,
                                           minimum=max_colwidth,
                                           adj=self.adj)
            max_len = max(np.max([self.adj.len(x) for x in fmt_values]),
                          max_colwidth)
            cheader = self.adj.justify(cheader, max_len, mode=self.justify)

            # Apply custom coloring
            # cflags = flags2d.T[i]
            # fmt_values = [color_func(val, level) for val, level in zip(fmt_values, cflags)]

            stringified.append(cheader + fmt_values)
    else:
        stringified = []
        for i, c in enumerate(frame):
            fmt_values = self._format_col(i)
            fmt_values = _make_fixed_width(fmt_values,
                                           self.justify,
                                           minimum=(self.col_space or 0),
                                           adj=self.adj)

            stringified.append(fmt_values)

    strcols = stringified
    if self.index:
        strcols.insert(0, str_index)

    # Add ... to signal truncated
    truncate_h = self.truncate_h
    truncate_v = self.truncate_v

    if truncate_h:
        col_num = self.tr_col_num
        # infer from column header
        col_width = self.adj.len(strcols[self.tr_size_col][0])
        strcols.insert(self.tr_col_num + 1,
                       ['...'.center(col_width)] * (len(str_index)))
    if truncate_v:
        n_header_rows = len(str_index) - len(frame)
        row_num = self.tr_row_num
        for ix, col in enumerate(strcols):
            # infer from above row
            cwidth = self.adj.len(strcols[ix][row_num])
            is_dot_col = False
            if truncate_h:
                is_dot_col = ix == col_num + 1
            if cwidth > 3 or is_dot_col:
                my_str = '...'
            else:
                my_str = '..'

            if ix == 0:
                dot_mode = 'left'
            elif is_dot_col:
                cwidth = self.adj.len(strcols[self.tr_size_col][0])
                dot_mode = 'center'
            else:
                dot_mode = 'right'
            dot_str = self.adj.justify([my_str], cwidth, mode=dot_mode)[0]
            strcols[ix].insert(row_num + n_header_rows, dot_str)

    for cx_ in highlight_cols:
        cx = cx_ + bool(self.header)
        col = strcols[cx]
        for rx, val in enumerate(col[1:], start=1):
            strcols[cx][rx] = color_func(val, flags2d[rx - 1, cx - 1])

    return strcols
Esempio n. 12
0
    def show_graph(
            infr,
            graph=None,
            use_image=False,
            update_attrs=True,
            with_colorbar=False,
            pnum=(1, 1, 1),
            zoomable=True,
            pickable=False,
            **kwargs,
    ):
        r"""
        Args:
            infr (?):
            graph (None): (default = None)
            use_image (bool): (default = False)
            update_attrs (bool): (default = True)
            with_colorbar (bool): (default = False)
            pnum (tuple):  plot number(default = (1, 1, 1))
            zoomable (bool): (default = True)
            pickable (bool): (de = False)
            **kwargs: verbose, with_labels, fnum, layout, ax, pos, img_dict,
                      title, layoutkw, framewidth, modify_ax, as_directed,
                      hacknoedge, hacknode, node_labels, arrow_width, fontsize,
                      fontweight, fontname, fontfamilty, fontproperties

        CommandLine:
            python -m wbia.algo.graph.mixin_viz GraphVisualization.show_graph --show

        Example:
            >>> # xdoctest: +REQUIRES(module:pygraphviz)
            >>> # ENABLE_DOCTEST
            >>> from wbia.algo.graph.mixin_viz import *  # NOQA
            >>> from wbia.algo.graph import demo
            >>> import wbia.plottool as pt
            >>> infr = demo.demodata_infr(ccs=ut.estarmap(
            >>>    range, [(1, 6), (6, 10), (10, 13), (13, 15), (15, 16),
            >>>            (17, 20)]))
            >>> pnum_ = pt.make_pnum_nextgen(nRows=1, nCols=3)
            >>> infr.show_graph(show_cand=True, simple_labels=True, pickable=True, fnum=1, pnum=pnum_())
            >>> infr.add_feedback((1, 5), INCMP)
            >>> infr.add_feedback((14, 18), INCMP)
            >>> infr.refresh_candidate_edges()
            >>> infr.show_graph(show_cand=True, simple_labels=True, pickable=True, fnum=1, pnum=pnum_())
            >>> infr.add_feedback((17, 18), NEGTV)  # add inconsistency
            >>> infr.apply_nondynamic_update()
            >>> infr.show_graph(show_cand=True, simple_labels=True, pickable=True, fnum=1, pnum=pnum_())
            >>> ut.show_if_requested()
        """
        import wbia.plottool as pt

        if graph is None:
            graph = infr.graph
        # kwargs['fontsize'] = kwargs.get('fontsize', 8)
        with warnings.catch_warnings():
            warnings.simplefilter('ignore')
            # default_update_kw = ut.get_func_kwargs(infr.update_visual_attrs)
            # update_kw = ut.update_existing(default_update_kw, kwargs)
            # infr.update_visual_attrs(**update_kw)
            if update_attrs:
                infr.update_visual_attrs(graph=graph, **kwargs)
            verbose = kwargs.pop('verbose', infr.verbose)
            pt.show_nx(
                graph,
                layout='custom',
                as_directed=False,
                modify_ax=False,
                use_image=use_image,
                pnum=pnum,
                verbose=verbose,
                **kwargs,
            )
            if zoomable:
                pt.zoom_factory()
                pt.pan_factory(pt.gca())

        # if with_colorbar:
        #     # Draw a colorbar
        #     _normal_ticks = np.linspace(0, 1, num=11)
        #     _normal_scores = np.linspace(0, 1, num=500)
        #     _normal_colors = infr.get_colored_weights(_normal_scores)
        #     cb = pt.colorbar(_normal_scores, _normal_colors, lbl='weights',
        #                      ticklabels=_normal_ticks)

        #     # point to threshold location
        #     thresh = None
        #     if thresh is not None:
        #         xy = (1, thresh)
        #         xytext = (2.5, .3 if thresh < .5 else .7)
        #         cb.ax.annotate('threshold', xy=xy, xytext=xytext,
        #                        arrowprops=dict(
        #                            alpha=.5, fc="0.6",
        #                            connectionstyle="angle3,angleA=90,angleB=0"),)

        # infr.graph
        if graph.graph.get('dark_background', None):
            pt.dark_background(force=True)

        if pickable:
            fig = pt.gcf()
            fig.canvas.mpl_connect('pick_event', ut.partial(on_pick,
                                                            infr=infr))
Esempio n. 13
0
    def conditional_knn(nnindexer, qfx2_vec, num_neighbors, invalid_axs):
        """
            >>> from ibeis.algo.hots.neighbor_index import *  # NOQA
            >>> qreq_ = ibeis.testdata_qreq_(defaultdb='seaturtles')
            >>> qreq_.load_indexer()
            >>> qfx2_vec = qreq_.ibs.get_annot_vecs(qreq_.qaids[0])
            >>> num_neighbors = 2
            >>> nnindexer = qreq_.indexer
            >>> ibs = qreq_.ibs
            >>> qaid = 1
            >>> qencid = ibs.get_annot_encounter_text([qaid])[0]
            >>> ax2_encid = np.array(ibs.get_annot_encounter_text(nnindexer.ax2_aid))
            >>> invalid_axs = np.where(ax2_encid == qencid)[0]
        """
        #import ibeis
        import itertools

        def in1d_shape(arr1, arr2):
            return np.in1d(arr1, arr2).reshape(arr1.shape)

        get_neighbors = ut.partial(nnindexer.flann.nn_index,
                                   checks=nnindexer.checks,
                                   cores=nnindexer.cores)

        # Alloc space for final results
        K = num_neighbors
        shape = (len(qfx2_vec), K)
        qfx2_idx = np.full(shape, -1, dtype=np.int32)
        qfx2_rawdist = np.full(shape, np.nan, dtype=np.float64)
        qfx2_truek = np.full(shape, -1, dtype=np.int32)

        # Make a set of temporary indexes and loop variables
        limit = None
        limit = 4
        K_ = K
        tx2_qfx = np.arange(len(qfx2_vec))
        tx2_vec = qfx2_vec
        iter_count = 0
        for iter_count in itertools.count():
            if limit is not None and iter_count >= limit:
                break
            # Find a set of neighbors
            (tx2_idx, tx2_rawdist) = get_neighbors(tx2_vec, K_)
            tx2_idx = vt.atleast_nd(tx2_idx, 2)
            tx2_rawdist = vt.atleast_nd(tx2_rawdist, 2)
            tx2_ax = nnindexer.get_nn_axs(tx2_idx)
            # Check to see if they meet the criteria
            tx2_invalid = in1d_shape(tx2_ax, invalid_axs)
            tx2_valid = np.logical_not(tx2_invalid)
            tx2_num_valid = tx2_valid.sum(axis=1)
            tx2_notdone = tx2_num_valid < K
            tx2_done = np.logical_not(tx2_notdone)

            # Move completely valid queries into the results
            if np.any(tx2_done):
                done_qfx = tx2_qfx.compress(tx2_done, axis=0)
                # Need to parse which columns are the completed ones
                done_valid_ = tx2_valid.compress(tx2_done, axis=0)
                done_rawdist_ = tx2_rawdist.compress(tx2_done, axis=0)
                done_idx_ = tx2_idx.compress(tx2_done, axis=0)
                # Get the complete valid indicies
                rowxs, colxs = np.where(done_valid_)
                unique_rows, groupxs = vt.group_indices(rowxs)
                first_k_groupxs = [groupx[0:K] for groupx in groupxs]
                chosen_xs = np.hstack(first_k_groupxs)
                multi_index = (rowxs.take(chosen_xs), colxs.take(chosen_xs))
                flat_xs = np.ravel_multi_index(multi_index, done_valid_.shape)
                done_rawdist = done_rawdist_.take(flat_xs).reshape((-1, K))
                done_idx = done_idx_.take(flat_xs).reshape((-1, K))
                # Write done results in output
                qfx2_idx[done_qfx, :] = done_idx
                qfx2_rawdist[done_qfx, :] = done_rawdist
                qfx2_truek[done_qfx, :] = vt.apply_grouping(
                    colxs, first_k_groupxs)
            if np.all(tx2_done):
                break
            K_increase = (K - tx2_num_valid.min())
            K_ += K_increase
            tx2_qfx = tx2_qfx.compress(tx2_notdone, axis=0)
            tx2_vec = tx2_vec.compress(tx2_notdone, axis=0)

        if nnindexer.max_distance_sqrd is not None:
            qfx2_dist = np.divide(qfx2_rawdist, nnindexer.max_distance_sqrd)
        else:
            qfx2_dist = qfx2_rawdist
        return (qfx2_idx, qfx2_dist, iter_count)
Esempio n. 14
0
def get_annotmatch_subgraph(ibs):
    r"""
    http://bokeh.pydata.org/en/latest/
    https://github.com/jsexauer/networkx_viewer

    TODO: Need a special visualization
        In the web I need:
            * graph of annotations matches.
            * can move them around.
            * edit lines between them.
            * http://stackoverflow.com/questions/15373530/web-graph-visualization-tool

            This should  share functionality with a name view.

    Args:
        ibs (IBEISController):  ibeis controller object

    CommandLine:
        python -m ibeis.annotmatch_funcs --exec-get_annotmatch_subgraph --show

        # Networkx example
        python -m ibeis.viz.viz_graph --test-show_chipmatch_graph:0 --show

    Ignore:

        from ibeis import viz

    Example:
        >>> # ENABLE_DOCTEST
        >>> from ibeis.annotmatch_funcs import *  # NOQA
        >>> import ibeis
        >>> ibs = ibeis.opendb(defaultdb='PZ_MTEST')
        >>> result = get_annotmatch_subgraph(ibs)
        >>> ut.show_if_requested()
    """
    #import ibeis
    #ibs = ibeis.opendb(db='PZ_MTEST')
    #rowids = ibs._get_all_annotmatch_rowids()
    #aids1 = ibs.get_annotmatch_aid1(rowids)
    #aids2 = ibs.get_annotmatch_aid2(rowids)
    #
    #
    nids = ibs.get_valid_nids()
    nids = nids[0:5]
    aids_list = ibs.get_name_aids(nids)
    import itertools
    unflat_edges = (list(itertools.product(aids, aids)) for aids in aids_list)
    aid_pairs = [tup for tup in ut.iflatten(unflat_edges) if tup[0] != tup[1]]
    aids1 = ut.get_list_column(aid_pairs, 0)
    aids2 = ut.get_list_column(aid_pairs, 1)

    # Enumerate annotmatch properties
    rng = np.random.RandomState(0)
    edge_props = {
        'weight': rng.rand(len(aids1)),
        'reviewer_confidence': rng.rand(len(aids1)),
        'algo_confidence': rng.rand(len(aids1)),
    }

    # Remove data that does not need to be visualized
    # (dont show all the aids if you dont have to)
    thresh = .5
    flags = edge_props['weight'] > thresh
    aids1_ = ut.compress(aids1, flags)
    aids2_ = ut.compress(aids2, flags)
    chosen_props = ut.dict_subset(edge_props, ['weight'])
    edge_props = ut.map_dict_vals(ut.partial(ut.compress, flag_list=flags), chosen_props)

    edge_keys = list(edge_props.keys())
    edge_vals = ut.dict_take(edge_props, edge_keys)
    edge_attr_list = [dict(zip(edge_keys, vals_)) for vals_ in zip(*edge_vals)]

    unique_aids = list(set(aids1_ + aids2_))
    # Make a graph between the chips
    nodes = unique_aids
    edges = list(zip(aids1_, aids2_, edge_attr_list))
    import networkx as nx
    graph = nx.DiGraph()
    graph.add_nodes_from(nodes)
    graph.add_edges_from(edges)
    from ibeis.viz import viz_graph
    fnum = None
    #zoom = kwargs.get('zoom', .4)
    viz_graph.viz_netx_chipgraph(ibs, graph, fnum=fnum, with_images=True, augment_graph=False)
Esempio n. 15
0
def monkey_to_str_columns(self):
    import numpy as np
    import pandas as pd
    import utool as ut
    frame = self.tr_frame
    highlight_func = 'max'
    highlight_func = ut.partial(np.argmax, axis=1)
    highlight_cols = self.highlight_cols

    perrow_colxs = highlight_func(frame[highlight_cols].values)
    n_rows = len(perrow_colxs)
    n_cols = len(highlight_cols)
    shape = (n_rows, n_cols)
    flat_idxs = np.ravel_multi_index((np.arange(n_rows), perrow_colxs), shape)
    flags2d = np.zeros(shape, dtype=np.int32)
    flags2d.ravel()[flat_idxs] = 1
    # np.unravel_index(flat_idxs, shape)

    def color_func(val, level):
        if level:
            return ut.color_text(val, 'red')
        else:
            return val

    _make_fixed_width = pd.formats.format._make_fixed_width
    frame = self.tr_frame
    str_index = self._get_formatted_index(frame)
    str_columns = self._get_formatted_column_labels(frame)
    if self.header:
        stringified = []
        for i, c in enumerate(frame):
            cheader = str_columns[i]
            max_colwidth = max(self.col_space or 0, *(self.adj.len(x)
                                                      for x in cheader))
            fmt_values = self._format_col(i)
            fmt_values = _make_fixed_width(fmt_values, self.justify,
                                           minimum=max_colwidth,
                                           adj=self.adj)
            max_len = max(np.max([self.adj.len(x) for x in fmt_values]),
                          max_colwidth)
            cheader = self.adj.justify(cheader, max_len, mode=self.justify)

            # Apply custom coloring
            # cflags = flags2d.T[i]
            # fmt_values = [color_func(val, level) for val, level in zip(fmt_values, cflags)]

            stringified.append(cheader + fmt_values)
    else:
        stringified = []
        for i, c in enumerate(frame):
            fmt_values = self._format_col(i)
            fmt_values = _make_fixed_width(fmt_values, self.justify,
                                           minimum=(self.col_space or 0),
                                           adj=self.adj)

            stringified.append(fmt_values)

    strcols = stringified
    if self.index:
        strcols.insert(0, str_index)

    # Add ... to signal truncated
    truncate_h = self.truncate_h
    truncate_v = self.truncate_v

    if truncate_h:
        col_num = self.tr_col_num
        # infer from column header
        col_width = self.adj.len(strcols[self.tr_size_col][0])
        strcols.insert(self.tr_col_num + 1, ['...'.center(col_width)] *
                       (len(str_index)))
    if truncate_v:
        n_header_rows = len(str_index) - len(frame)
        row_num = self.tr_row_num
        for ix, col in enumerate(strcols):
            # infer from above row
            cwidth = self.adj.len(strcols[ix][row_num])
            is_dot_col = False
            if truncate_h:
                is_dot_col = ix == col_num + 1
            if cwidth > 3 or is_dot_col:
                my_str = '...'
            else:
                my_str = '..'

            if ix == 0:
                dot_mode = 'left'
            elif is_dot_col:
                cwidth = self.adj.len(strcols[self.tr_size_col][0])
                dot_mode = 'center'
            else:
                dot_mode = 'right'
            dot_str = self.adj.justify([my_str], cwidth, mode=dot_mode)[0]
            strcols[ix].insert(row_num + n_header_rows, dot_str)

    for cx_ in highlight_cols:
        cx = cx_ + bool(self.header)
        col = strcols[cx]
        for rx, val in enumerate(col[1:], start=1):
            strcols[cx][rx] = color_func(val, flags2d[rx - 1, cx - 1])

    return strcols
Esempio n. 16
0
def to_string_monkey(df, highlight_cols=None):
    """  monkey patch to pandas to highlight the maximum value in specified
    cols of a row

    df = pd.DataFrame(
        np.array([[ 0.87031269,  0.86886931,  0.86842073,  0.91981975],
                  [ 0.34196218,  0.34289191,  0.34206377,  0.34252863],
                  [ 0.34827074,  0.34829214,  0.35032833,  0.28857126],
                  [ 0.76979453,  0.77214855,  0.77547518,  0.38850962]]),
        columns=['sum(fgweights)', 'sum(weighted_ratio)', 'len(matches)', 'score_lnbnn_1vM'],
        index=['match_state(match-v-rest)', 'match_state(nomatch-v-rest)', 'match_state(notcomp-v-rest)', 'photobomb_state']
    )
    highlight_cols = 'all'

    ut.editfile(pd.formats.printing.adjoin)

    """
    import pandas as pd
    import utool as ut
    import numpy as np
    import six
    if isinstance(highlight_cols, six.string_types) and highlight_cols == 'all':
        highlight_cols = np.arange(len(df.columns))
    # kwds = dict(buf=None, columns=None, col_space=None, header=True,
    #             index=True, na_rep='NaN', formatters=None,
    #             float_format=None, sparsify=None, index_names=True,
    #             justify=None, line_width=None, max_rows=None,
    #             max_cols=None, show_dimensions=False)
    # self = pd.formats.format.DataFrameFormatter(df, **kwds)
    self = pd.formats.format.DataFrameFormatter(df)
    self.highlight_cols = highlight_cols
    ut.inject_func_as_method(self, monkey_to_str_columns, '_to_str_columns', override=True, force=True)

    def strip_ansi(text):
        import re
        ansi_escape = re.compile(r'\x1b[^m]*m')
        return ansi_escape.sub('', text)

    def justify_ansi(self, texts, max_len, mode='right'):
        if mode == 'left':
            return [x.ljust(max_len + (len(x) - len(strip_ansi(x)))) for x in texts]
        elif mode == 'center':
            return [x.center(max_len + (len(x) - len(strip_ansi(x)))) for x in texts]
        else:
            return [x.rjust(max_len + (len(x) - len(strip_ansi(x)))) for x in texts]
    ut.inject_func_as_method(self.adj, justify_ansi, 'justify', override=True, force=True)

    def strlen_ansii(self, text):
        return pd.compat.strlen(strip_ansi(text), encoding=self.encoding)
    ut.inject_func_as_method(self.adj, strlen_ansii, 'len', override=True, force=True)

    if False:
        strlen = ut.partial(strlen_ansii, self.adj)  # NOQA
        justfunc = ut.partial(justify_ansi, self.adj)  # NOQA
        # Essentially what to_string does
        strcols = monkey_to_str_columns(self)
        # texts = strcols[2]
        space = 1
        lists = strcols
        str_ = self.adj.adjoin(space, *lists)
        print(str_)
        print(strip_ansi(str_))
    self.to_string()
    result = self.buf.getvalue()
    # hack because adjoin is not working correctly with injected strlen
    result = '\n'.join([x.rstrip() for x in result.split('\n')])
    return result
Esempio n. 17
0
def make_review_api(ibs, cm_list, review_cfg, qreq_=None):
    """
    Builds columns which are displayable in a ColumnListTableWidget

    CommandLine:
        python -m ibeis.gui.id_review_api --test-test_review_widget --show
        python -m ibeis.gui.id_review_api --test-make_review_api

    Example:
        >>> # ENABLE_DOCTEST
        >>> from ibeis.gui.id_review_api import *  # NOQA
        >>> import ibeis
        >>> import guitool as gt
        >>> from ibeis.gui import id_review_api
        >>> cm_list, qreq_ = ibeis.main_helpers.testdata_cmlist()
        >>> tblname = 'chipmatch'
        >>> name_scoring = False
        >>> ranks_top = 5
        >>> review_cfg = dict(ranks_top=ranks_top, name_scoring=name_scoring)
        >>> review_api = make_review_api(qreq_.ibs, cm_list, review_cfg, qreq_=qreq_)
        >>> print('review_api = %r' % (review_api,))
    """
    # TODO: Add in timedelta to column info
    if ut.VERBOSE:
        print('[inspect] make_review_api')

    review_edges = get_review_edges(cm_list, ibs=ibs, review_cfg=review_cfg)
    # Get extra info
    (qaids, daids, scores, ranks) = review_edges

    RES_THUMB_TEXT = 'ResThumb'  # NOQA
    QUERY_THUMB_TEXT = 'querythumb'
    MATCH_THUMB_TEXT = 'MatchThumb'

    col_name_list = [
        'result_index',
        'score',
        REVIEWED_STATUS_TEXT,
    ]

    if review_cfg.get('show_chips', True):
        col_name_list += [
            MATCHED_STATUS_TEXT,
            QUERY_THUMB_TEXT,
        ]

    col_name_list += [
        RES_THUMB_TEXT,
        'qaid',
        'aid',
        'rank',
        'timedelta',
        'dnGt',
        'qnGt',
        'tags',
        'qname',
        'name',
    ]

    col_types_dict = dict([
        ('qaid',       int),
        ('aid',        int),
        ('dnGt',      int),
        ('qnGt',      int),
        ('timedelta',   float),
        #('review',     'BUTTON'),
        (MATCHED_STATUS_TEXT, str),
        (REVIEWED_STATUS_TEXT, str),
        (QUERY_THUMB_TEXT, 'PIXMAP'),
        (RES_THUMB_TEXT,   'PIXMAP'),
        ('qname',      str),
        ('name',       str),
        ('score',      float),
        ('rank',       int),
        ('truth',      bool),
        ('opt',        int),
        ('result_index',  int),
    ])
    timedelta_list = np.array(ut.take_column(ibs.get_unflat_annots_timedelta_list(list(zip(qaids, daids))), 0))
    # TODO: make a display role
    #timediff_list = [ut.get_posix_timedelta_str(t, year=True, approx=True) for t in (timedelta_list * 60 * 60)]

    def get_pair_tags(edge):
        aid1, aid2 = edge
        assert not ut.isiterable(aid1), 'aid1=%r, aid2=%r' % (aid1, aid2)
        assert not ut.isiterable(aid2), 'aid1=%r, aid2=%r' % (aid1, aid2)
        am_rowids = ibs.get_annotmatch_rowid_from_undirected_superkey(
            [aid1], [aid2])
        tag_text = ibs.get_annotmatch_tag_text(am_rowids)[0]
        if tag_text is None:
            tag_text = ''
        return str(tag_text)

    col_getter_dict = dict([
        ('qaid',       np.array(qaids)),
        ('aid',        np.array(daids)),
        ('dnGt',      ibs.get_annot_num_groundtruth),
        ('qnGt',      ibs.get_annot_num_groundtruth),
        ('timedelta', np.array(timedelta_list)),
        #('review',     lambda rowid: get_buttontup),
        (MATCHED_STATUS_TEXT,  partial(get_match_status, ibs)),
        (REVIEWED_STATUS_TEXT,  partial(get_reviewed_status, ibs)),
        (QUERY_THUMB_TEXT, ibs.get_annot_chip_thumbtup),
        (RES_THUMB_TEXT,   ibs.get_annot_chip_thumbtup),
        ('qname',      ibs.get_annot_names),
        ('name',       ibs.get_annot_names),
        ('score',      np.array(scores)),
        ('rank',       np.array(ranks)),
        ('result_index',       np.arange(len(ranks))),
        ('tags', get_pair_tags),
        #lambda aid_pair: ibs.get_annotmatch_tag_text(ibs.get_annotmatch_rowid_from_undirected_superkey(ut.ensure_iterable(aid_pair[0]), ut.ensure_iterable(aid_pair[1])))[0]),
        #('truth',     truths),
        #('opt',       opts),
    ])

    # default is 100
    col_width_dict = {
        'score': 75,
        REVIEWED_STATUS_TEXT: 75,
        MATCHED_STATUS_TEXT: 75,
        'rank': 42,
        'qaid': 42,
        'aid': 42,
        'result_index': 42,
        'qname': 60,
        'name': 60,
        'dnGt': 50,
        'timedelta': 75,
        'tags': 75,
        'qnGt': 50,
    }

    USE_MATCH_THUMBS = 1
    if USE_MATCH_THUMBS:

        def get_match_thumbtup(ibs, qaid2_cm, qaids, daids, index, qreq_=None,
                               thumbsize=(128, 128), match_thumbtup_cache={}):
            daid = daids[index]
            qaid = qaids[index]
            cm = qaid2_cm[qaid]
            assert cm.qaid == qaid, 'aids do not aggree'

            OLD = False
            if OLD:
                fpath = ensure_match_img(ibs, cm, daid, qreq_=qreq_,
                                         match_thumbtup_cache=match_thumbtup_cache)
                if isinstance(thumbsize, int):
                    thumbsize = (thumbsize, thumbsize)
                thumbtup = (ut.augpath(fpath, 'thumb_%d,%d' % thumbsize), fpath, thumbsize,
                            [], [])
                return thumbtup
            else:
                # Hacky new way of drawing
                fpath, func, func2 = make_ensure_match_img_nosql_func(qreq_, cm, daid)
                #match_thumbdir = ibs.get_match_thumbdir()
                #match_thumb_fname = get_match_thumb_fname(cm, daid, qreq_)
                #fpath = ut.unixjoin(match_thumbdir, match_thumb_fname)
                thumbdat = {
                    'fpath': fpath,
                    'thread_func': func,
                    'main_func': func2,
                    #'args': (ibs, cm, daid),
                    #'kwargs': dict(qreq_=qreq_,
                    #               match_thumbtup_cache=match_thumbtup_cache)
                }
                return thumbdat

        col_name_list.insert(col_name_list.index('qaid'),
                             MATCH_THUMB_TEXT)
        col_types_dict[MATCH_THUMB_TEXT] = 'PIXMAP'
        #col_types_dict[MATCH_THUMB_TEXT] = CustomMatchThumbDelegate
        qaid2_cm = {cm.qaid: cm for cm in cm_list}
        get_match_thumbtup_ = partial(get_match_thumbtup, ibs, qaid2_cm,
                                      qaids, daids, qreq_=qreq_,
                                      match_thumbtup_cache={})
        col_getter_dict[MATCH_THUMB_TEXT] = get_match_thumbtup_

    col_bgrole_dict = {
        MATCHED_STATUS_TEXT : partial(get_match_status_bgrole, ibs),
        REVIEWED_STATUS_TEXT: partial(get_reviewed_status_bgrole, ibs),
    }
    # TODO: remove ider dict.
    # it is massively unuseful
    col_ider_dict = {
        MATCHED_STATUS_TEXT     : ('qaid', 'aid'),
        REVIEWED_STATUS_TEXT    : ('qaid', 'aid'),
        'tags'    : ('qaid', 'aid'),
        QUERY_THUMB_TEXT : ('qaid'),
        RES_THUMB_TEXT : ('aid'),
        'dnGt'      : ('aid'),
        'qnGt'      : ('qaid'),
        'qname'      : ('qaid'),
        'name'       : ('aid'),
    }
    col_setter_dict = {
        'qname': ibs.set_annot_names,
        'name': ibs.set_annot_names
    }
    editable_colnames =  ['truth', 'notes', 'qname', 'name', 'opt']

    sortby = 'score'

    def get_thumb_size():
        return ibs.cfg.other_cfg.thumb_size

    col_display_role_func_dict = {
        'timedelta': ut.partial(ut.get_posix_timedelta_str, year=True, approx=2),
    }

    if not review_cfg.get('show_chips', True):
        del col_getter_dict[QUERY_THUMB_TEXT]
        del col_getter_dict[RES_THUMB_TEXT]
        del col_types_dict[RES_THUMB_TEXT]
        del col_types_dict[QUERY_THUMB_TEXT]
        del col_ider_dict[RES_THUMB_TEXT]
        del col_ider_dict[QUERY_THUMB_TEXT]
        # del col_bgrole_dict[RES_THUMB_TEXT]
        # del col_bgrole_dict[QUERY_THUMB_TEXT]

    # Insert info into dict
    review_api = gt.CustomAPI(
        col_name_list=col_name_list,
        col_types_dict=col_types_dict,
        col_getter_dict=col_getter_dict,
        col_bgrole_dict=col_bgrole_dict,
        col_ider_dict=col_ider_dict,
        col_setter_dict=col_setter_dict,
        editable_colnames=editable_colnames,
        col_display_role_func_dict=col_display_role_func_dict,
        sortby=sortby,
        get_thumb_size=get_thumb_size,
        sort_reverse=True,
        col_width_dict=col_width_dict)
    #review_api.review_edges = review_edges
    return review_api
Esempio n. 18
0
    def parse_column_tuples(
        self,
        col_name_list,
        col_types_dict,
        col_getter_dict,
        col_bgrole_dict,
        col_ider_dict,
        col_setter_dict,
        editable_colnames,
        sortby,
        sort_reverse=True,
        strict=False,
        **kwargs,
    ):
        """
        parses simple lists into information suitable for making guitool headers
        """
        # Unpack the column tuples into names, getters, and types
        if not strict:
            # slopply colname definitions
            flag_list = [colname in col_getter_dict for colname in col_name_list]
            if not all(flag_list):
                invalid_colnames = ut.compress(col_name_list, ut.not_list(flag_list))
                logger.info(
                    '[api_item_widget] Warning: colnames=%r have no getters'
                    % (invalid_colnames,)
                )
                col_name_list = ut.compress(col_name_list, flag_list)
            # sloppy type inference
            for colname in col_name_list:
                getter_ = col_getter_dict[colname]
                if colname not in col_types_dict:
                    type_ = ut.get_homogenous_list_type(getter_)
                    if type_ is not None:
                        col_types_dict[colname] = type_
        # sloppy kwargs.
        # FIXME: explicitly list col_nice_dict
        col_nice_dict = kwargs.get('col_nice_dict', {})
        self.col_nice_list = [col_nice_dict.get(name, name) for name in col_name_list]

        self.col_name_list = col_name_list
        self.col_type_list = [
            col_types_dict.get(colname, str) for colname in col_name_list
        ]
        # First col is always a getter
        self.col_getter_list = [
            col_getter_dict.get(colname, str) for colname in col_name_list
        ]
        # Get number of rows / columns
        self.nCols = len(self.col_getter_list)
        if self.nCols == 0:
            self.nRows = 0
        else:
            for getter in self.col_getter_list:
                if ut.isiterable(getter):
                    break
                getter = None
            # FIXME
            assert getter is not None, 'at least one getter must be an array/list'
            self.nRows = len(getter)

        # self.nRows = 0 if self.nCols == 0 else len(self.col_getter_list[0])  # FIXME
        # Init iders to default and then overwite based on dict inputs
        self.col_ider_list = [None] * self.nCols  # ut.alloc_nones(self.nCols)
        # for colname, ider_colnames in six.iteritems(col_ider_dict):
        # import utool
        # utool.embed()
        colname2_colx = ut.make_index_lookup(self.col_name_list)
        for colname, ider_colnames in six.iteritems(col_ider_dict):
            if colname not in colname2_colx:
                continue
            # for colname in self.col_name_list:
            ider_colnames = col_ider_dict[colname]
            try:
                colx = colname2_colx[colname]
                # Col iders might have tuple input
                ider_cols = self._uinput_1to1(self.col_name_list.index, ider_colnames)
                col_ider = self._uinput_1to1(lambda c: ut.partial(self.get, c), ider_cols)
                self.col_ider_list[colx] = col_ider
                del col_ider
                del ider_cols
                del colx
                del colname
            except Exception as ex:
                ut.printex(
                    ex,
                    keys=['colname', 'ider_colnames', 'colx', 'col_ider', 'ider_cols'],
                )
                raise
        # Init setters to data, and then overwrite based on dict inputs
        self.col_setter_list = list(self.col_getter_list)
        for colname, col_setter in six.iteritems(col_setter_dict):
            colx = colname2_colx[colname]
            self.col_setter_list[colx] = col_setter
        # Init bgrole_getters to None, and then overwrite based on dict inputs
        self.col_bgrole_getter_list = [
            col_bgrole_dict.get(colname, None) for colname in self.col_name_list
        ]
        # Mark edtiable columns
        self.col_edit_list = [name in editable_colnames for name in col_name_list]
        # Mark the sort column index
        if sortby is None:
            self.col_sort_index = 0
        elif ut.is_str(sortby):
            self.col_sort_index = self.col_name_list.index(sortby)
        else:
            self.col_sort_index = sortby
        self.col_sort_reverse = sort_reverse

        # Hacks for tree widget
        self._iders = kwargs.get('iders', None)
        col_level_dict = kwargs.get('col_level_dict', None)
        if col_level_dict is None:
            self.col_level_list = None
        else:
            self.col_level_list = ut.take(col_level_dict, col_name_list)
Esempio n. 19
0
def get_annotmatch_subgraph(ibs):
    r"""
    http://bokeh.pydata.org/en/latest/
    https://github.com/jsexauer/networkx_viewer

    TODO: Need a special visualization
        In the web I need:
            * graph of annotations matches.
            * can move them around.
            * edit lines between them.
            * http://stackoverflow.com/questions/15373530/web-graph-visualization-tool

            This should  share functionality with a name view.

    Args:
        ibs (IBEISController):  ibeis controller object

    CommandLine:
        python -m ibeis.annotmatch_funcs --exec-get_annotmatch_subgraph --show

        # Networkx example
        python -m ibeis.viz.viz_graph --test-show_chipmatch_graph:0 --show

    Ignore:

        from ibeis import viz

    Example:
        >>> # ENABLE_DOCTEST
        >>> from ibeis.annotmatch_funcs import *  # NOQA
        >>> import ibeis
        >>> ibs = ibeis.opendb(defaultdb='PZ_MTEST')
        >>> result = get_annotmatch_subgraph(ibs)
        >>> ut.show_if_requested()
    """
    #import ibeis
    #ibs = ibeis.opendb(db='PZ_MTEST')
    #rowids = ibs._get_all_annotmatch_rowids()
    #aids1 = ibs.get_annotmatch_aid1(rowids)
    #aids2 = ibs.get_annotmatch_aid2(rowids)
    #
    #
    nids = ibs.get_valid_nids()
    nids = nids[0:5]
    aids_list = ibs.get_name_aids(nids)
    import itertools
    unflat_edges = (list(itertools.product(aids, aids)) for aids in aids_list)
    aid_pairs = [tup for tup in ut.iflatten(unflat_edges) if tup[0] != tup[1]]
    aids1 = ut.get_list_column(aid_pairs, 0)
    aids2 = ut.get_list_column(aid_pairs, 1)

    # Enumerate annotmatch properties
    rng = np.random.RandomState(0)
    edge_props = {
        'weight': rng.rand(len(aids1)),
        'reviewer_confidence': rng.rand(len(aids1)),
        'algo_confidence': rng.rand(len(aids1)),
    }

    # Remove data that does not need to be visualized
    # (dont show all the aids if you dont have to)
    thresh = .5
    flags = edge_props['weight'] > thresh
    aids1_ = ut.compress(aids1, flags)
    aids2_ = ut.compress(aids2, flags)
    chosen_props = ut.dict_subset(edge_props, ['weight'])
    edge_props = ut.map_dict_vals(ut.partial(ut.compress, flag_list=flags),
                                  chosen_props)

    edge_keys = list(edge_props.keys())
    edge_vals = ut.dict_take(edge_props, edge_keys)
    edge_attr_list = [dict(zip(edge_keys, vals_)) for vals_ in zip(*edge_vals)]

    unique_aids = list(set(aids1_ + aids2_))
    # Make a graph between the chips
    nodes = unique_aids
    edges = list(zip(aids1_, aids2_, edge_attr_list))
    import networkx as nx
    graph = nx.DiGraph()
    graph.add_nodes_from(nodes)
    graph.add_edges_from(edges)
    from ibeis.viz import viz_graph
    fnum = None
    #zoom = kwargs.get('zoom', .4)
    viz_graph.viz_netx_chipgraph(ibs,
                                 graph,
                                 fnum=fnum,
                                 with_images=True,
                                 augment_graph=False)
Esempio n. 20
0
    def do(*cmd_list, **kwargs):
        import utool as ut
        import time
        import six
        import sys
        verbose = kwargs.get('verbose', False)
        orig_print = globals()['print']
        print = ut.partial(orig_print, file=kwargs.get('file', sys.stdout))
        # print('Running xctrl.do script')
        if verbose:
            print('Executing x do: %s' % (ut.repr4(cmd_list),))
        debug = False

        cmdkw = dict(verbose=False, quiet=True, silence=True)
        # http://askubuntu.com/questions/455762/xbindkeys-wont-work-properly
        # Make things work even if other keys are pressed
        defaultsleep = 0.0
        sleeptime = kwargs.get('sleeptime', defaultsleep)
        time.sleep(.05)
        out, err, ret = ut.cmd('xset r off', **cmdkw)
        if debug:
            print('----------')
            print('xset r off')
            print('ret = %r' % (ret,))
            print('err = %r' % (err,))
            print('out = %r' % (out,))

        memory = {}

        tmpverbose = 0
        for count, item in enumerate(cmd_list):
            # print('item = %r' % (item,))
            sleeptime = kwargs.get('sleeptime', defaultsleep)
            if tmpverbose:
                print('moving on')
            tmpverbose = 0
            nocommand = 0

            assert isinstance(item, tuple)
            assert len(item) >= 2
            xcmd, key_ = item[0:2]
            if len(item) >= 3:
                if isinstance(item[2], six.string_types) and item[2].endswith('?'):
                    sleeptime = float(item[2][:-1])
                    tmpverbose = 1
                    print('special command sleep')
                    print('sleeptime = %r' % (sleeptime,))
                else:
                    sleeptime = float(item[2])

            if xcmd == 'focus':
                key_ = str(key_)
                if key_.startswith('$'):
                    key_ = memory[key_[1:]]
                pattern = key_
                win_id = XCtrl.find_window_id(pattern, method='mru')
                if win_id is None:
                    args = ['wmctrl', '-xa', pattern]
                else:
                    args = ['wmctrl', '-ia', hex(win_id)]
            elif xcmd == 'focus_id':
                key_ = str(key_)
                if key_.startswith('$'):
                    key_ = memory[key_[1:]]
                args = ['wmctrl', '-ia', hex(key_)]
            elif xcmd == 'remember_window_id':
                out, err, ret = ut.cmd('xdotool getwindowfocus', **cmdkw)
                memory[key_] = int(out.strip())
                nocommand = True
                args = []
            elif xcmd == 'remember_window_name':
                out, err, ret = ut.cmd('xdotool getwindowfocus getwindowname', **cmdkw)
                import pipes
                memory[key_] = pipes.quote(out.strip())
                nocommand = True
                args = []
            elif xcmd == 'type':
                args = [
                    'xdotool',
                    'keyup', '--window', '0', '7',
                    'type', '--clearmodifiers',
                    '--window', '0', str(key_)
                ]
            elif xcmd == 'type2':
                import pipes
                args = [
                    'xdotool', 'type', pipes.quote(str(key_))
                ]
            elif xcmd == 'xset-r-on':
                args = ['xset', 'r', 'on']
            elif xcmd == 'xset-r-off':
                args = ['xset', 'r', 'off']
            else:
                args = ['xdotool', str(xcmd), str(key_)]

            if verbose or tmpverbose:
                print('\n\n# Step %d' % (count,))
                print(args, ' '.join(args))

            if nocommand:
                continue
                # print('args = %r -> %s' % (args, ' '.join(args),))
            # print('args = %r' % (args,))
            out, err, ret = ut.cmd(*args, **cmdkw)
            if debug:
                print('---- ' + xcmd + ' ------')
                print(' '.join(args))
                print('ret = %r' % (ret,))
                print('err = %r' % (err,))
                print('out = %r' % (out,))

            if sleeptime > 0:
                time.sleep(sleeptime)

        out, err, ret = ut.cmd('xset r on', verbose=False, quiet=True,
                               silence=True)
        if debug:
            print('----------')
            print('xset r on')
            print('ret = %r' % (ret,))
            print('err = %r' % (err,))
            print('out = %r' % (out,))
Esempio n. 21
0
def draw_feat_scoresep(testres, f=None, disttype=None):
    r"""
    SeeAlso:
        ibeis.algo.hots.scorenorm.train_featscore_normalizer

    CommandLine:
        python -m ibeis --tf TestResult.draw_feat_scoresep --show
        python -m ibeis --tf TestResult.draw_feat_scoresep --show -t default:sv_on=[True,False]
        python -m ibeis --tf TestResult.draw_feat_scoresep --show --db PZ_Master1
        python -m ibeis --tf TestResult.draw_feat_scoresep --show --db PZ_Master1 --disttype=L2_sift,fg
        python -m ibeis --tf TestResult.draw_feat_scoresep --show --db PZ_Master1 --disttype=L2_sift
        python -m ibeis --tf TestResult.draw_feat_scoresep --show --db PZ_MTEST -t best:lnbnn_on=True --namemode=True
        python -m ibeis --tf TestResult.draw_feat_scoresep --show --db PZ_MTEST -t best:lnbnn_on=True --namemode=False

        python -m ibeis --tf TestResult.draw_feat_scoresep --show --db PZ_MTEST --disttype=L2_sift
        python -m ibeis --tf TestResult.draw_feat_scoresep --show --db PZ_MTEST --disttype=L2_sift -t best:SV=False

        utprof.py -m ibeis --tf TestResult.draw_feat_scoresep --show --db PZ_Master1
        utprof.py -m ibeis --tf TestResult.draw_feat_scoresep --show --db PZ_Master1 --fsvx=1:2
        utprof.py -m ibeis --tf TestResult.draw_feat_scoresep --show --db PZ_Master1 --fsvx=0:1

        utprof.py -m ibeis --tf TestResult.draw_feat_scoresep --show --db PZ_Master1 -t best:lnbnn_on=False,bar_l2_on=True  --fsvx=0:1

        # We want to query the oxford annots taged query
        # and we want the database to contain
        # K correct images per query, as well as the distractors

        python -m ibeis --tf TestResult.draw_feat_scoresep  --show --db Oxford -a default:qhas_any=\(query,\),dpername=1,exclude_reference=True,minqual=ok
        python -m ibeis --tf TestResult.draw_feat_scoresep  --show --db Oxford -a default:qhas_any=\(query,\),dpername=1,exclude_reference=True,minqual=good

        python -m ibeis --tf get_annotcfg_list  --db PZ_Master1 -a timectrl --acfginfo --verbtd  --veryverbtd --nocache-aid

        python -m ibeis --tf TestResult.draw_feat_scoresep --show --db PZ_MTEST --disttype=ratio

    Example:
        >>> # SCRIPT
        >>> from ibeis.expt.test_result import *  # NOQA
        >>> from ibeis.init import main_helpers
        >>> disttype = ut.get_argval('--disttype', type_=list, default=None)
        >>> ibs, testres = main_helpers.testdata_expts(
        >>>     defaultdb='PZ_MTEST', a=['timectrl'], t=['best'])
        >>> f = ut.get_argval(('--filt', '-f'), type_=list, default=[''])
        >>> testres.draw_feat_scoresep(f=f)
        >>> ut.show_if_requested()
    """
    print('[testres] draw_feat_scoresep')
    import plottool as pt

    def load_feat_scores(qreq_, qaids):
        import ibeis  # NOQA
        from os.path import dirname, join  # NOQA
        # HACKY CACHE
        cfgstr = qreq_.get_cfgstr(with_input=True)
        cache_dir = join(dirname(dirname(ibeis.__file__)),
                         'TMP_FEATSCORE_CACHE')
        namemode = ut.get_argval('--namemode', default=True)
        fsvx = ut.get_argval('--fsvx',
                             type_='fuzzy_subset',
                             default=slice(None, None, None))
        threshx = ut.get_argval('--threshx', type_=int, default=None)
        thresh = ut.get_argval('--thresh', type_=float, default=.9)
        num = ut.get_argval('--num', type_=int, default=1)
        cfg_components = [
            cfgstr, disttype, namemode, fsvx, threshx, thresh, f, num
        ]
        cache_cfgstr = ','.join(ut.lmap(six.text_type, cfg_components))
        cache_hashid = ut.hashstr27(cache_cfgstr + '_v1')
        cache_name = ('get_cfgx_feat_scores_' + cache_hashid)

        @ut.cached_func(cache_name,
                        cache_dir=cache_dir,
                        key_argx=[],
                        use_cache=True)
        def get_cfgx_feat_scores(qreq_, qaids):
            from ibeis.algo.hots import scorenorm
            cm_list = qreq_.execute(qaids)
            # print('Done loading cached chipmatches')
            tup = scorenorm.get_training_featscores(qreq_,
                                                    cm_list,
                                                    disttype,
                                                    namemode,
                                                    fsvx,
                                                    threshx,
                                                    thresh,
                                                    num=num)
            # print(ut.depth_profile(tup))
            tp_scores, tn_scores, scorecfg = tup
            return tp_scores, tn_scores, scorecfg

        tp_scores, tn_scores, scorecfg = get_cfgx_feat_scores(qreq_, qaids)
        return tp_scores, tn_scores, scorecfg

    valid_case_pos = testres.case_sample2(filt_cfg=f, return_mask=False)
    cfgx2_valid_qxs = ut.group_items(valid_case_pos.T[0], valid_case_pos.T[1])
    test_qaids = testres.get_test_qaids()
    cfgx2_valid_qaids = ut.map_dict_vals(ut.partial(ut.take, test_qaids),
                                         cfgx2_valid_qxs)

    join_acfgs = True

    # TODO: option to average over pipeline configurations
    if join_acfgs:
        groupxs = testres.get_cfgx_groupxs()
    else:
        groupxs = list(zip(range(len(testres.cfgx2_qreq_))))
    grouped_qreqs = ut.apply_grouping(testres.cfgx2_qreq_, groupxs)

    grouped_scores = []
    for cfgxs, qreq_group in zip(groupxs, grouped_qreqs):
        # testres.print_pcfg_info()
        score_group = []
        for cfgx, qreq_ in zip(cfgxs, testres.cfgx2_qreq_):
            print('Loading cached chipmatches')
            qaids = cfgx2_valid_qaids[cfgx]
            tp_scores, tn_scores, scorecfg = load_feat_scores(qreq_, qaids)
            score_group.append((tp_scores, tn_scores, scorecfg))
        grouped_scores.append(score_group)

    cfgx2_shortlbl = testres.get_short_cfglbls(join_acfgs=join_acfgs)
    for score_group, lbl in zip(grouped_scores, cfgx2_shortlbl):
        tp_scores = np.hstack(ut.take_column(score_group, 0))
        tn_scores = np.hstack(ut.take_column(score_group, 1))
        scorecfg = '+++'.join(ut.unique(ut.take_column(score_group, 2)))
        score_group
        # TODO: learn this score normalizer as a model
        # encoder = vt.ScoreNormalizer(adjust=4, monotonize=False)
        encoder = vt.ScoreNormalizer(adjust=2, monotonize=True)
        encoder.fit_partitioned(tp_scores, tn_scores, verbose=False)
        figtitle = 'Feature Scores: %s, %s' % (scorecfg, lbl)
        fnum = None

        vizkw = {}
        sephack = ut.get_argflag('--sephack')
        if not sephack:
            vizkw['target_tpr'] = .95
            vizkw['score_range'] = (0, 1.0)

        encoder.visualize(
            figtitle=figtitle,
            fnum=fnum,
            with_scores=False,
            #with_prebayes=True,
            with_prebayes=False,
            with_roc=True,
            with_postbayes=False,
            #with_postbayes=True,
            **vizkw)
        icon = testres.ibs.get_database_icon()
        if icon is not None:
            pt.overlay_icon(icon, coords=(1, 0), bbox_alignment=(1, 0))

        if ut.get_argflag('--contextadjust'):
            pt.adjust_subplots(left=.1, bottom=.25, wspace=.2, hspace=.2)
            pt.adjust_subplots(use_argv=True)
    return encoder
Esempio n. 22
0
    def _dynamic_test_callback(infr, edge, decision, prev_decision, user_id):
        was_gt_pos = infr.test_gt_pos_graph.has_edge(*edge)

        # prev_decision = infr.get_edge_attr(edge, 'decision', default=UNREV)
        # prev_decision = list(infr.edge_decision_from([edge]))[0]

        true_decision = infr.edge_truth[edge]

        was_within_pred = infr.pos_graph.are_nodes_connected(*edge)
        was_within_gt = infr.test_gt_pos_graph.are_nodes_connected(*edge)
        was_reviewed = prev_decision != UNREV
        is_within_gt = was_within_gt
        was_correct = prev_decision == true_decision

        is_correct = true_decision == decision
        # print('prev_decision = {!r}'.format(prev_decision))
        # print('decision = {!r}'.format(decision))
        # print('true_decision = {!r}'.format(true_decision))

        test_print = ut.partial(infr.print, level=2)
        def test_print(x, **kw):
            infr.print('[ACTION] ' + x, level=2, **kw)
        # test_print = lambda *a, **kw: None  # NOQA

        if 0:
            num = infr.recover_graph.number_of_components()
            old_data = infr.get_nonvisual_edge_data(edge)
            # print('old_data = %s' % (ut.repr4(old_data, stritems=True),))
            print('n_prev_reviews = %r' % (old_data['num_reviews'],))
            print('prev_decision = %r' % (prev_decision,))
            print('decision = %r' % (decision,))
            print('was_gt_pos = %r' % (was_gt_pos,))
            print('was_within_pred = %r' % (was_within_pred,))
            print('was_within_gt = %r' % (was_within_gt,))
            print('num inconsistent = %r' % (num,))
            # is_recovering = infr.is_recovering()

        if decision == POSTV:
            if is_correct:
                if not was_gt_pos:
                    infr.test_gt_pos_graph.add_edge(*edge)
        elif was_gt_pos:
            test_print("UNDID GOOD POSITIVE EDGE", color='darkred')
            infr.test_gt_pos_graph.remove_edge(*edge)
            is_within_gt = infr.test_gt_pos_graph.are_nodes_connected(*edge)

        split_gt = is_within_gt != was_within_gt
        if split_gt:
            test_print("SPLIT A GOOD MERGE", color='darkred')
            infr.test_state['n_true_merges'] -= 1

        confusion = infr.test_state['confusion']
        if confusion is None:
            # initialize dynamic confusion matrix
            # import pandas as pd
            states = (POSTV, NEGTV, INCMP, UNREV, UNKWN)
            confusion = {r: {c: 0 for c in states} for r in states}
            # pandas takes a really long time doing this
            # confusion = pd.DataFrame(columns=states, index=states)
            # confusion[:] = 0
            # confusion.index.name = 'real'
            # confusion.columns.name = 'pred'
            infr.test_state['confusion'] = confusion

        if was_reviewed:
            confusion[true_decision][prev_decision] -= 1
            confusion[true_decision][decision] += 1
        else:
            confusion[true_decision][decision] += 1

        test_action = None
        action_color = None

        if is_correct:
            # CORRECT DECISION
            if was_reviewed:
                if prev_decision == decision:
                    test_action = 'correct duplicate'
                    action_color = 'darkyellow'
                else:
                    infr.mistake_edges.remove(edge)
                    test_action = 'correction'
                    action_color = 'darkgreen'
                    if decision == POSTV:
                        if not was_within_gt:
                            test_action = 'correction redid merge'
                            action_color = 'darkgreen'
                            infr.test_state['n_true_merges'] += 1
            else:
                if decision == POSTV:
                    if not was_within_gt:
                        test_action = 'correct merge'
                        action_color = 'darkgreen'
                        infr.test_state['n_true_merges'] += 1
                    else:
                        test_action = 'correct redundant positive'
                        action_color = 'darkblue'
                else:
                    if decision == NEGTV:
                        test_action = 'correct negative'
                        action_color = 'teal'
                    else:
                        test_action = 'correct uninferrable'
                        action_color = 'teal'
        else:
            action_color = 'darkred'
            # INCORRECT DECISION
            infr.mistake_edges.add(edge)
            if was_reviewed:
                if prev_decision == decision:
                    test_action = 'incorrect duplicate'
                elif was_correct:
                    test_action = 'incorrect undid good edge'
            else:
                if decision == POSTV:
                    if was_within_pred:
                        test_action = 'incorrect redundant merge'
                    else:
                        test_action = 'incorrect new merge'
                else:
                    test_action = 'incorrect new mistake'

        infr.test_state['test_action'] = test_action
        infr.test_state['pred_decision'] = decision
        infr.test_state['true_decision'] = true_decision
        infr.test_state['user_id'] = user_id
        infr.test_state['recovering'] = (infr.recover_graph.has_node(edge[0]) or
                                         infr.recover_graph.has_node(edge[1]))

        infr.test_state['n_decision'] += 1
        if user_id.startswith('algo'):
            infr.test_state['n_algo'] += 1
        elif user_id.startswith('user') or user_id == 'oracle':
            infr.test_state['n_manual'] += 1
        else:
            raise AssertionError('unknown user_id=%r' % (user_id,))

        test_print(test_action, color=action_color)
        assert test_action is not None, 'what happened?'
Esempio n. 23
0
def rhombicuboctahedron():
    import vtk

    # First, you need to store the vertex locations.

    import numpy as np

    fu = 1  # full unit
    hu = 0.5  # half unit
    d = np.sqrt((fu**2) / 2)  # diag
    hh = hu + d  # half height

    # left view faces us

    import utool as ut
    import six
    import itertools

    counter = ut.partial(six.next, itertools.count(0))

    vertex_locations = vtk.vtkPoints()
    vertex_locations.SetNumberOfPoints(24)

    p1, p2, p3 = np.array([(-hu, -hu, hh), (hu, -hu, hh), (hu, hu, hh),
                           (-hu, hu, hh)]).T
    plist = [p1, p2, p3]

    # three of the six main faces
    # perms = list(itertools.permutations((0, 1, 2), 3))
    perms = [(0, 1, 2), (0, 2, 1), (2, 0, 1)]

    vertex_array = []

    # VERTEXES
    # left, up, back
    vplist = ['L', 'U', 'B', 'R', 'D', 'F']
    vpdict = {}
    print('perms = %r' % (perms, ))
    for x in range(3):
        vp = vplist[x]
        p = np.vstack(ut.take(plist, perms[x])).T
        counts = [counter() for z in range(4)]
        vpdict[vp] = counts
        vertex_array.extend(p.tolist())
        vertex_locations.SetPoint(counts[0], p[0])
        vertex_locations.SetPoint(counts[1], p[1])
        vertex_locations.SetPoint(counts[2], p[2])
        vertex_locations.SetPoint(counts[3], p[3])

    # three more of the six main faces
    perms = [(0, 1, 2), (0, 2, 1), (2, 0, 1)]
    plist[-1] = -plist[-1]
    # right, down, front
    print('perms = %r' % (perms, ))
    for x in range(3):
        p = np.vstack(ut.take(plist, perms[x])).T
        counts = [counter() for z in range(4)]
        vp = vplist[x + 3]
        vpdict[vp] = counts
        vertex_array.extend(p.tolist())
        vertex_locations.SetPoint(counts[0], p[0])
        vertex_locations.SetPoint(counts[1], p[1])
        vertex_locations.SetPoint(counts[2], p[2])
        vertex_locations.SetPoint(counts[3], p[3])

    pd = vtk.vtkPolyData()
    pd.SetPoints(vertex_locations)

    polygon_faces = vtk.vtkCellArray()

    face_dict = {
        'L': [vpdict['L'][0], vpdict['L'][1], vpdict['L'][2], vpdict['L'][3]],
        'D': [vpdict['D'][0], vpdict['D'][1], vpdict['D'][2], vpdict['D'][3]],
        'U': [vpdict['U'][0], vpdict['U'][1], vpdict['U'][2], vpdict['U'][3]],
        'F': [vpdict['F'][0], vpdict['F'][1], vpdict['F'][2], vpdict['F'][3]],
        'R': [vpdict['R'][0], vpdict['R'][1], vpdict['R'][2], vpdict['R'][3]],
        'B': [vpdict['B'][0], vpdict['B'][1], vpdict['B'][2], vpdict['B'][3]],
        'FL': [vpdict['L'][0], vpdict['L'][3], vpdict['F'][2], vpdict['F'][3]],
        'BL': [vpdict['L'][1], vpdict['L'][2], vpdict['B'][2], vpdict['B'][3]],
        'UL': [vpdict['L'][2], vpdict['L'][3], vpdict['U'][3], vpdict['U'][2]],
        'DL': [vpdict['L'][0], vpdict['L'][1], vpdict['D'][2], vpdict['D'][3]],
        'UFL': [vpdict['L'][3], vpdict['F'][2], vpdict['U'][3]],
        'DFL': [vpdict['L'][0], vpdict['F'][3], vpdict['D'][3]],
        'UBL': [vpdict['L'][2], vpdict['B'][2], vpdict['U'][2]],
        'DBL': [vpdict['L'][1], vpdict['B'][3], vpdict['D'][2]],
        'UFR': [vpdict['R'][3], vpdict['F'][1], vpdict['U'][0]],
        'DFR': [vpdict['R'][0], vpdict['F'][0], vpdict['D'][0]],
        'UBR': [vpdict['R'][2], vpdict['B'][1], vpdict['U'][1]],
        'DBR': [vpdict['R'][1], vpdict['B'][0], vpdict['D'][1]],
        'FR': [vpdict['R'][3], vpdict['R'][0], vpdict['F'][0], vpdict['F'][1]],
        'BR': [vpdict['R'][2], vpdict['R'][1], vpdict['B'][0], vpdict['B'][1]],
        'UR': [vpdict['R'][3], vpdict['R'][2], vpdict['U'][1], vpdict['U'][0]],
        'DR': [vpdict['R'][1], vpdict['R'][0], vpdict['D'][0], vpdict['D'][1]],
        'DF': [vpdict['F'][0], vpdict['F'][3], vpdict['D'][3], vpdict['D'][0]],
        'DB': [vpdict['B'][3], vpdict['B'][0], vpdict['D'][1], vpdict['D'][2]],
        'UF': [vpdict['F'][1], vpdict['F'][2], vpdict['U'][3], vpdict['U'][0]],
        'UB': [vpdict['B'][2], vpdict['B'][1], vpdict['U'][1], vpdict['U'][2]],
    }

    for key, vert_ids in face_dict.items():
        # if key != 'L':
        #    continue
        if len(vert_ids) == 4:
            q = vtk.vtkQuad()
        else:
            q = vtk.vtkTriangle()
        for count, idx in enumerate(vert_ids):
            q.GetPointIds().SetId(count, idx)
        polygon_faces.InsertNextCell(q)

    # Next you create a vtkPolyData to store your face and vertex information
    # that
    # represents your polyhedron.
    pd = vtk.vtkPolyData()
    pd.SetPoints(vertex_locations)
    pd.SetPolys(polygon_faces)

    face_stream = vtk.vtkIdList()
    face_stream.InsertNextId(polygon_faces.GetNumberOfCells())
    vertex_list = vtk.vtkIdList()

    polygon_faces.InitTraversal()
    while polygon_faces.GetNextCell(vertex_list) == 1:
        face_stream.InsertNextId(vertex_list.GetNumberOfIds())

        for j in range(vertex_list.GetNumberOfIds()):
            face_stream.InsertNextId(vertex_list.GetId(j))

    ug = vtk.vtkUnstructuredGrid()
    ug.SetPoints(vertex_locations)
    ug.InsertNextCell(vtk.VTK_POLYHEDRON, face_stream)

    # writer = vtk.vtkUnstructuredGridWriter()
    # writer.SetFileName("rhombicuboctahedron.vtk")
    # # writer.SetInputData(ug)
    # writer.SetInput(ug)
    # writer.Write()

    mapper = vtk.vtkDataSetMapper()
    mapper.SetInput(ug)

    actor = vtk.vtkActor()
    actor.SetMapper(mapper)

    if 1:
        # Read the image data from a file
        import utool as ut

        textureCoords = vtk.vtkFloatArray()
        textureCoords.SetNumberOfComponents(3)
        # coords = ut.take(vertex_array, face_dict['L'])
        # for coord in coords:
        #    textureCoords.InsertNextTuple(tuple(coord))
        textureCoords.InsertNextTuple((0, 0, 0))
        textureCoords.InsertNextTuple((1, 0, 0))
        textureCoords.InsertNextTuple((1, 1, 0))
        textureCoords.InsertNextTuple((0, 1, 0))

        # Create texture object
        fpath = ut.grab_test_imgpath('zebra.png')
        reader = vtk.vtkPNGReader()
        reader.SetFileName(fpath)

        texture = vtk.vtkTexture()
        texture.SetInput(reader.GetOutput())
        texture.RepeatOff()
        texture.InterpolateOff()

        ptdat = pd.GetPointData()
        ptdat.SetTCoords(textureCoords)

        actor.SetTexture(texture)

    ren = vtk.vtkRenderer()
    ren.AddActor(actor)

    renw = vtk.vtkRenderWindow()
    renw.AddRenderer(ren)

    iren = vtk.vtkRenderWindowInteractor()
    iren.SetRenderWindow(renw)

    ren.ResetCamera()
    renw.Render()
    iren.Start()
Esempio n. 24
0
    def show_exi_graph(inputs, inter=None):
        """
        CommandLine:
            python -m dtool.input_helpers TableInput.show_exi_graph --show

        Example:
            >>> # DISABLE_DOCTEST
            >>> from dtool.input_helpers import *  # NOQA
            >>> from dtool.example_depcache2 import *  # NOQA
            >>> depc = testdata_depc3()
            >>> import plottool as pt
            >>> # table = depc['smk_match']
            >>> table = depc['neighbs']
            >>> inputs = table.rootmost_inputs
            >>> print('inputs = %r' % (inputs,))
            >>> from plottool.interactions import ExpandableInteraction
            >>> inter = ExpandableInteraction(nCols=1)
            >>> inputs.show_exi_graph(inter=inter)
            >>> # FIXME; Expanding inputs can overspecify inputs
            >>> #inputs = inputs.expand_input(2)
            >>> #print('inputs = %r' % (inputs,))
            >>> #inputs.show_exi_graph(inter=inter)
            >>> #inputs = inputs.expand_input(1)
            >>> #inputs = inputs.expand_input(3)
            >>> #inputs = inputs.expand_input(2)
            >>> #inputs = inputs.expand_input(2)
            >>> #inputs = inputs.expand_input(1)
            >>> #print('inputs = %r' % (inputs,))
            >>> #inputs.show_exi_graph(inter=inter)
            >>> inter.start()
            >>> ut.show_if_requested()
        """
        import plottool as pt
        from plottool.interactions import ExpandableInteraction
        autostart = inter is None
        if inter is None:
            inter = ExpandableInteraction()
        tablename = inputs.table.tablename

        exi_graph = inputs.exi_graph.copy()
        recolor_exi_graph(exi_graph, inputs.exi_nodes())

        # Add numbering to indicate the input order
        node_dict = ut.nx_node_dict(exi_graph)
        for count, rmi in enumerate(inputs.rmi_list, start=0):
            if rmi.ismulti:
                node_dict[rmi.node]['label'] += ' #%d*' % (count, )
            else:
                node_dict[rmi.node]['label'] += ' #%d' % (count, )

        plot_kw = {'fontname': 'Ubuntu'}
        #inter.append_plot(
        #    ut.partial(pt.show_nx, G, title='Dependency Subgraph (%s)' % (tablename), **plot_kw))
        inter.append_plot(
            ut.partial(pt.show_nx,
                       exi_graph,
                       title='Expanded Input (%s)' % (tablename, ),
                       **plot_kw))
        if autostart:
            inter.start()
        return inter
Esempio n. 25
0
    def conditional_knn(nnindexer, qfx2_vec, num_neighbors, invalid_axs):
        """
            >>> from ibeis.algo.hots.neighbor_index import *  # NOQA
            >>> qreq_ = ibeis.testdata_qreq_(defaultdb='seaturtles')
            >>> qreq_.load_indexer()
            >>> qfx2_vec = qreq_.ibs.get_annot_vecs(qreq_.qaids[0])
            >>> num_neighbors = 2
            >>> nnindexer = qreq_.indexer
            >>> ibs = qreq_.ibs
            >>> qaid = 1
            >>> qencid = ibs.get_annot_encounter_text([qaid])[0]
            >>> ax2_encid = np.array(ibs.get_annot_encounter_text(nnindexer.ax2_aid))
            >>> invalid_axs = np.where(ax2_encid == qencid)[0]
        """
        #import ibeis
        import itertools

        def in1d_shape(arr1, arr2):
            return np.in1d(arr1, arr2).reshape(arr1.shape)

        get_neighbors = ut.partial(nnindexer.flann.nn_index,
                                   checks=nnindexer.checks,
                                   cores=nnindexer.cores)

        # Alloc space for final results
        K = num_neighbors
        shape = (len(qfx2_vec), K)
        qfx2_idx = np.full(shape, -1, dtype=np.int32)
        qfx2_rawdist = np.full(shape, np.nan, dtype=np.float64)
        qfx2_truek = np.full(shape, -1, dtype=np.int32)

        # Make a set of temporary indexes and loop variables
        limit = None
        limit = 4
        K_ = K
        tx2_qfx = np.arange(len(qfx2_vec))
        tx2_vec = qfx2_vec
        iter_count = 0
        for iter_count in itertools.count():
            if limit is not None and iter_count >= limit:
                break
            # Find a set of neighbors
            (tx2_idx, tx2_rawdist) = get_neighbors(tx2_vec, K_)
            tx2_idx = vt.atleast_nd(tx2_idx, 2)
            tx2_rawdist = vt.atleast_nd(tx2_rawdist, 2)
            tx2_ax = nnindexer.get_nn_axs(tx2_idx)
            # Check to see if they meet the criteria
            tx2_invalid = in1d_shape(tx2_ax, invalid_axs)
            tx2_valid = np.logical_not(tx2_invalid)
            tx2_num_valid = tx2_valid.sum(axis=1)
            tx2_notdone = tx2_num_valid < K
            tx2_done = np.logical_not(tx2_notdone)

            # Move completely valid queries into the results
            if np.any(tx2_done):
                done_qfx = tx2_qfx.compress(tx2_done, axis=0)
                # Need to parse which columns are the completed ones
                done_valid_ = tx2_valid.compress(tx2_done, axis=0)
                done_rawdist_ = tx2_rawdist.compress(tx2_done, axis=0)
                done_idx_ = tx2_idx.compress(tx2_done, axis=0)
                # Get the complete valid indicies
                rowxs, colxs = np.where(done_valid_)
                unique_rows, groupxs = vt.group_indices(rowxs)
                first_k_groupxs = [groupx[0:K] for groupx in groupxs]
                chosen_xs = np.hstack(first_k_groupxs)
                multi_index = (rowxs.take(chosen_xs), colxs.take(chosen_xs))
                flat_xs = np.ravel_multi_index(multi_index, done_valid_.shape)
                done_rawdist = done_rawdist_.take(flat_xs).reshape((-1, K))
                done_idx = done_idx_.take(flat_xs).reshape((-1, K))
                # Write done results in output
                qfx2_idx[done_qfx, :] = done_idx
                qfx2_rawdist[done_qfx, :] = done_rawdist
                qfx2_truek[done_qfx, :] = vt.apply_grouping(
                    colxs, first_k_groupxs)
            if np.all(tx2_done):
                break
            K_increase = (K - tx2_num_valid.min())
            K_ += K_increase
            tx2_qfx = tx2_qfx.compress(tx2_notdone, axis=0)
            tx2_vec = tx2_vec.compress(tx2_notdone, axis=0)

        if nnindexer.max_distance_sqrd is not None:
            qfx2_dist = np.divide(qfx2_rawdist, nnindexer.max_distance_sqrd)
        else:
            qfx2_dist = qfx2_rawdist
        return (qfx2_idx, qfx2_dist, iter_count)