Esempio n. 1
0
def visualize_vocab_word(ibs, invassign, wx, fnum=None):
    """

    Example:
        >>> from ibeis.new_annots import *  # NOQA
        >>> import plottool as pt
        >>> pt.qt4ensure()
        >>> ibs, aid_list, vocab = testdata_vocab()
        >>> #aid_list = aid_list[0:1]
        >>> fstack = StackedFeatures(ibs, aid_list)
        >>> nAssign = 2
        >>> invassign = fstack.inverted_assignment(vocab, nAssign)
        >>> sortx = ut.argsort(invassign.num_list)[::-1]
        >>> wx_list = ut.take(invassign.wx_list, sortx)
        >>> wx = wx_list[0]
    """
    import plottool as pt
    pt.qt4ensure()
    vecs = invassign.get_vecs(wx)
    word = invassign.vocab.wx2_word[wx]

    word_patches = invassign.get_patches(wx)
    average_patch = np.mean(word_patches, axis=0)

    average_vec = vecs.mean(axis=0)
    average_vec = word

    word

    with_sift = True
    fnum = 2
    fnum = pt.ensure_fnum(fnum)
    if with_sift:
        patch_img = pt.render_sift_on_patch(average_patch, average_vec)
        #sift_word_patches = [pt.render_sift_on_patch(patch, vec) for patch, vec in ut.ProgIter(list(zip(word_patches, vecs)))]
        #stacked_patches = vt.stack_square_images(word_patches)
        #stacked_patches = vt.stack_square_images(sift_word_patches)
    else:
        patch_img = average_patch
    stacked_patches = vt.stack_square_images(word_patches)
    solidbar = np.zeros((patch_img.shape[0], int(patch_img.shape[1] * .1), 3),
                        dtype=patch_img.dtype)
    border_color = (100, 10, 10)  # bgr, darkblue
    if ut.is_float(solidbar):
        solidbar[:, :, :] = (np.array(border_color) / 255)[None, None]
    else:
        solidbar[:, :, :] = np.array(border_color)[None, None]
    word_img = vt.stack_image_list([patch_img, solidbar, stacked_patches],
                                   vert=False,
                                   modifysize=True)
    pt.imshow(word_img, fnum=fnum)
    #pt.imshow(patch_img, pnum=(1, 2, 1), fnum=fnum)
    #patch_size = 64
    #half_size = patch_size / 2
    #pt.imshow(stacked_patches, pnum=(1, 2, 2), fnum=fnum)
    pt.iup()
Esempio n. 2
0
def visualize_vocab_word(ibs, invassign, wx, fnum=None):
    """

    Example:
        >>> from ibeis.new_annots import *  # NOQA
        >>> import plottool as pt
        >>> pt.qt4ensure()
        >>> ibs, aid_list, vocab = testdata_vocab()
        >>> #aid_list = aid_list[0:1]
        >>> fstack = StackedFeatures(ibs, aid_list)
        >>> nAssign = 2
        >>> invassign = fstack.inverted_assignment(vocab, nAssign)
        >>> sortx = ut.argsort(invassign.num_list)[::-1]
        >>> wx_list = ut.take(invassign.wx_list, sortx)
        >>> wx = wx_list[0]
    """
    import plottool as pt
    pt.qt4ensure()
    vecs = invassign.get_vecs(wx)
    word = invassign.vocab.wx2_word[wx]

    word_patches = invassign.get_patches(wx)
    average_patch = np.mean(word_patches, axis=0)

    average_vec = vecs.mean(axis=0)
    average_vec = word

    word

    with_sift = True
    fnum = 2
    fnum = pt.ensure_fnum(fnum)
    if with_sift:
        patch_img = pt.render_sift_on_patch(average_patch, average_vec)
        #sift_word_patches = [pt.render_sift_on_patch(patch, vec) for patch, vec in ut.ProgIter(list(zip(word_patches, vecs)))]
        #stacked_patches = vt.stack_square_images(word_patches)
        #stacked_patches = vt.stack_square_images(sift_word_patches)
    else:
        patch_img = average_patch
    stacked_patches = vt.stack_square_images(word_patches)
    solidbar = np.zeros((patch_img.shape[0], int(patch_img.shape[1] * .1), 3), dtype=patch_img.dtype)
    border_color = (100, 10, 10)  # bgr, darkblue
    if ut.is_float(solidbar):
        solidbar[:, :, :] = (np.array(border_color) / 255)[None, None]
    else:
        solidbar[:, :, :] = np.array(border_color)[None, None]
    word_img = vt.stack_image_list([patch_img, solidbar, stacked_patches], vert=False, modifysize=True)
    pt.imshow(word_img, fnum=fnum)
    #pt.imshow(patch_img, pnum=(1, 2, 1), fnum=fnum)
    #patch_size = 64
    #half_size = patch_size / 2
    #pt.imshow(stacked_patches, pnum=(1, 2, 2), fnum=fnum)
    pt.iup()
Esempio n. 3
0
def plot_gps_html(gps_list):
    """ Plots gps coordinates on a map projection

    InstallBasemap:
        sudo apt-get install libgeos-dev
        pip install git+https://github.com/matplotlib/basemap
        http://matplotlib.org/basemap/users/examples.html

        pip install gmplot

        sudo apt-get install netcdf-bin
        sudo apt-get install libnetcdf-dev
        pip install netCDF4

    Ignore:
        pip install git+git://github.com/myuser/foo.git@v123

    Example:
        >>> from ibeis.algo.preproc.preproc_occurrence import *  # NOQA
        >>> import ibeis
        >>> ibs = ibeis.opendb(defaultdb='testdb1')
        >>> images = ibs.images()
        >>> # Setup GPS points to draw
        >>> print('Setup GPS points')
        >>> gps_list_ = np.array(images.gps2)
        >>> unixtime_list_ = np.array(images.unixtime2)
        >>> has_gps = np.all(np.logical_not(np.isnan(gps_list_)), axis=1)
        >>> has_unixtime = np.logical_not(np.isnan(unixtime_list_))
        >>> isvalid = np.logical_and(has_gps, has_unixtime)
        >>> gps_list = gps_list_.compress(isvalid, axis=0)
        >>> unixtime_list = unixtime_list_.compress(isvalid)  # NOQA
        >>> plot_image_gps(gps_list)
    """
    import plottool as pt
    import gmplot
    import matplotlib as mpl
    import vtool as vt
    pt.qt4ensure()

    lat = gps_list.T[0]
    lon = gps_list.T[1]

    # Get extent of
    bbox = vt.bbox_from_verts(gps_list)
    centerx, centery = vt.bbox_center(bbox)

    gmap = gmplot.GoogleMapPlotter(centerx, centery, 13)
    color = mpl.colors.rgb2hex(pt.ORANGE)
    gmap.scatter(lat, lon, color=color, size=100, marker=False)
    gmap.draw("mymap.html")
    ut.startfile('mymap.html')
Esempio n. 4
0
def gridsearch_ratio_thresh(matches):
    import sklearn
    import sklearn.metrics
    import vtool as vt
    # Param search for vsone
    import plottool as pt
    pt.qt4ensure()

    skf = sklearn.model_selection.StratifiedKFold(n_splits=10,
                                                  random_state=119372)

    y = np.array([m.annot1['nid'] == m.annot2['nid'] for m in matches])

    basis = {'ratio_thresh': np.linspace(.6, .7, 50).tolist()}
    grid = ut.all_dict_combinations(basis)
    xdata = np.array(ut.take_column(grid, 'ratio_thresh'))

    def _ratio_thresh(y_true, match_list):
        # Try and find optional ratio threshold
        auc_list = []
        for cfgdict in ut.ProgIter(grid, lbl='gridsearch'):
            y_score = [
                match.fs.compress(match.ratio_test_flags(cfgdict)).sum()
                for match in match_list
            ]
            auc = sklearn.metrics.roc_auc_score(y_true, y_score)
            auc_list.append(auc)
        auc_list = np.array(auc_list)
        return auc_list

    auc_list = _ratio_thresh(y, matches)
    pt.plot(xdata, auc_list)
    subx, suby = vt.argsubmaxima(auc_list, xdata)
    best_ratio_thresh = subx[suby.argmax()]

    skf_results = []
    y_true = y
    for train_idx, test_idx in skf.split(matches, y):
        match_list_ = ut.take(matches, train_idx)
        y_true = y.take(train_idx)
        auc_list = _ratio_thresh(y_true, match_list_)
        subx, suby = vt.argsubmaxima(auc_list, xdata, maxima_thresh=.8)
        best_ratio_thresh = subx[suby.argmax()]
        skf_results.append(best_ratio_thresh)
    print('skf_results.append = %r' % (np.mean(skf_results), ))
    import utool
    utool.embed()
Esempio n. 5
0
def fix_splits_interaction(ibs):
    """
    python -m ibeis fix_splits_interaction --show

    Example:
        >>> # DISABLE_DOCTEST GGR
        >>> from ibeis.other.dbinfo import *  # NOQA
        >>> import ibeis
        >>> dbdir = '/media/danger/GGR/GGR-IBEIS'
        >>> dbdir = dbdir if ut.checkpath(dbdir) else ut.truepath('~/lev/media/danger/GGR/GGR-IBEIS')
        >>> ibs = ibeis.opendb(dbdir=dbdir, allow_newdir=False)
        >>> import guitool as gt
        >>> gt.ensure_qtapp()
        >>> win = fix_splits_interaction(ibs)
        >>> ut.quit_if_noshow()
        >>> import plottool as pt
        >>> gt.qtapp_loop(qwin=win)
    """
    split_props = {'splitcase', 'photobomb'}
    all_annot_groups = ibs._annot_groups(ibs.group_annots_by_name(ibs.get_valid_aids())[0])
    all_has_split = [len(split_props.intersection(ut.flatten(tags))) > 0 for tags in all_annot_groups.match_tags]
    tosplit_annots = ut.compress(all_annot_groups.annots_list, all_has_split)

    tosplit_annots = ut.take(tosplit_annots, ut.argsort(ut.lmap(len, tosplit_annots)))[::-1]
    if ut.get_argflag('--reverse'):
        tosplit_annots = tosplit_annots[::-1]
    print('len(tosplit_annots) = %r' % (len(tosplit_annots),))
    aids_list = [a.aids for a in tosplit_annots]

    from ibeis.algo.graph import graph_iden
    from ibeis.viz import viz_graph2
    import guitool as gt
    import plottool as pt
    pt.qt4ensure()
    gt.ensure_qtapp()

    for aids in ut.InteractiveIter(aids_list):
        infr = graph_iden.AnnotInference(ibs, aids)
        infr.initialize_graph()
        win = viz_graph2.AnnotGraphWidget(infr=infr, use_image=False,
                                          init_mode='rereview')
        win.populate_edge_model()
        win.show()
    return win
Esempio n. 6
0
def test_visualize_vocab_interact():
    """
    python -m ibeis.new_annots --exec-test_visualize_vocab_interact --show

    Example:
        >>> from ibeis.new_annots import *  # NOQA
        >>> test_visualize_vocab_interact()
        >>> ut.show_if_requested()
    """
    import plottool as pt
    pt.qt4ensure()
    ibs, aid_list, vocab = testdata_vocab()
    #aid_list = aid_list[0:1]
    fstack = StackedFeatures(ibs, aid_list)
    nAssign = 2
    invassign = fstack.inverted_assignment(vocab, nAssign)
    sortx = ut.argsort(invassign.num_list)[::-1]
    wx_list = ut.take(invassign.wx_list, sortx)
    wx = wx_list[0]
    fnum = 1
    for wx in ut.InteractiveIter(wx_list):
        visualize_vocab_word(ibs, invassign, wx, fnum)
Esempio n. 7
0
def test_visualize_vocab_interact():
    """
    python -m ibeis.new_annots --exec-test_visualize_vocab_interact --show

    Example:
        >>> from ibeis.new_annots import *  # NOQA
        >>> test_visualize_vocab_interact()
        >>> ut.show_if_requested()
    """
    import plottool as pt
    pt.qt4ensure()
    ibs, aid_list, vocab = testdata_vocab()
    #aid_list = aid_list[0:1]
    fstack = StackedFeatures(ibs, aid_list)
    nAssign = 2
    invassign = fstack.inverted_assignment(vocab, nAssign)
    sortx = ut.argsort(invassign.num_list)[::-1]
    wx_list = ut.take(invassign.wx_list, sortx)
    wx = wx_list[0]
    fnum = 1
    for wx in ut.InteractiveIter(wx_list):
        visualize_vocab_word(ibs, invassign, wx, fnum)
Esempio n. 8
0
def show_data_image(data_uri_order, i, offset_list, all_kpts, all_vecs):
    """
    i = 12
    """
    import vtool as vt
    from os.path import join
    imgdir = ut.truepath('/raid/work/Oxford/oxbuild_images')
    gpath = join(imgdir, data_uri_order[i] + '.jpg')
    image = vt.imread(gpath)
    import plottool as pt
    pt.qt4ensure()
    # pt.imshow(image)
    l = offset_list[i]
    r = offset_list[i + 1]
    kpts = all_kpts[l:r]
    vecs = all_vecs[l:r]
    pt.interact_keypoints.ishow_keypoints(image,
                                          kpts,
                                          vecs,
                                          ori=False,
                                          ell_alpha=.4,
                                          color='distinct')
Esempio n. 9
0
def sanity_checks(offset_list, Y_list, query_annots, ibs):
    nfeat_list = np.diff(offset_list)
    for Y, nfeat in ut.ProgIter(zip(Y_list, nfeat_list), 'checking'):
        assert nfeat == sum(ut.lmap(len, Y.fxs_list))

    if False:
        # Visualize queries
        # Look at the standard query images here
        # http://www.robots.ox.ac.uk:5000/~vgg/publications/2007/Philbin07/philbin07.pdf
        from ibeis.viz import viz_chip
        import plottool as pt
        pt.qt4ensure()
        fnum = 1
        pnum_ = pt.make_pnum_nextgen(len(query_annots.aids) // 5, 5)
        for aid in ut.ProgIter(query_annots.aids):
            pnum = pnum_()
            viz_chip.show_chip(ibs,
                               aid,
                               in_image=True,
                               annote=False,
                               notitle=True,
                               draw_lbls=False,
                               fnum=fnum,
                               pnum=pnum)
Esempio n. 10
0
def test_mser():
    import cv2
    import vtool as vt
    import plottool as pt
    import numpy as np
    pt.qt4ensure()

    class Keypoints(ut.NiceRepr):
        """
        Convinence class for dealing with keypoints
        """
        def __init__(self, kparr, info=None):
            self.kparr = kparr
            if info is None:
                info = {}
            self.info = info

        def add_info(self, key, val):
            self.info[key] = val

        def __nice__(self):
            return ' ' + str(len(self.kparr))

        @property
        def scale(self):
            return vt.get_scales(self.kparr)

        @property
        def eccentricity(self):
            return vt.get_kpts_eccentricity(self.kparr)

        def compress(self, flags, inplace=False):
            subarr = self.kparr.compress(flags, axis=0)
            info = {
                key: ut.compress(val, flags)
                for key, val in self.info.items()
            }
            return Keypoints(subarr, info)

    img_fpath = ut.grab_test_imgpath(
        ut.get_argval('--fname', default='zebra.png'))
    imgBGR = vt.imread(img_fpath)
    imgGray = cv2.cvtColor(imgBGR, cv2.COLOR_BGR2GRAY)
    # http://docs.opencv.org/master/d3/d28/classcv_1_1MSER.html#gsc.tab=0
    # http://stackoverflow.com/questions/17647500/exact-meaning-of-the-parameters-given-to-initialize-mser-in-opencv-2-4-x
    factory = cv2.MSER_create
    img_area = np.product(np.array(vt.get_size(imgGray)))
    _max_area = (img_area // 10)
    _delta = 8
    _min_diversity = .5

    extractor = factory(_delta=_delta,
                        _max_area=_max_area,
                        _min_diversity=_min_diversity)
    # bboxes are x,y,w,h
    regions, bboxes = extractor.detectRegions(imgGray)
    # ellipse definition from [Fitzgibbon95]
    # http://www.bmva.org/bmvc/1995/bmvc-95-050.pdf p518
    # ell = [c_x, c_y, R_x, R_y, theta]
    # (cx, cy) = conic center
    # Rx and Ry = conic radii
    # theta is the counterclockwise angle
    fitz_ellipses = [cv2.fitEllipse(mser) for mser in regions]

    # http://answers.opencv.org/question/19015/how-to-use-mser-in-python/
    #hulls = [cv2.convexHull(p.reshape(-1, 1, 2)) for p in regions]
    #hull_ells = [cv2.fitEllipse(hull[:, 0]) for hull in hulls]
    invVR_mats = []
    for ell in fitz_ellipses:
        ((cx, cy), (dx, dy), degrees) = ell
        theta = np.radians(degrees)  # opencv lives in radians
        # Convert diameter to radians
        rx = dx / 2
        ry = dy / 2
        S = vt.scale_mat3x3(rx, ry)
        T = vt.translation_mat3x3(cx, cy)
        R = vt.rotation_mat3x3(theta)
        invVR = T.dot(R.dot(S))
        invVR_mats.append(invVR)
    invVR_mats = np.array(invVR_mats)
    #_oris = vt.get_invVR_mats_oris(invVR_mats)
    kpts2_ = vt.flatten_invV_mats_to_kpts(invVR_mats)

    self = Keypoints(kpts2_)
    self.add_info('regions', regions)
    flags = (self.eccentricity < .9)
    #flags = self.scale < np.mean(self.scale)
    #flags = self.scale < np.median(self.scale)
    self = self.compress(flags)
    import plottool as pt
    #pt.interact_keypoints.ishow_keypoints(imgBGR, self.kparr, None, ell_alpha=.4, color='distinct', fnum=2)
    #import plottool as pt
    vis = imgBGR.copy()

    for region in self.info['regions']:
        vis[region.T[1], region.T[0], :] = 0

    #regions, bbox = mser.detectRegions(gray)
    #hulls = [cv2.convexHull(p.reshape(-1, 1, 2)) for p in self.info['regions']]
    #cv2.polylines(vis, hulls, 1, (0, 255, 0))
    #for region in self.info['regions']:
    #    ell = cv2.fitEllipse(region)
    #    cv2.ellipse(vis, ell, (255))
    pt.interact_keypoints.ishow_keypoints(vis,
                                          self.kparr,
                                          None,
                                          ell_alpha=.4,
                                          color='distinct',
                                          fnum=2)
    #pt.imshow(vis, fnum=2)
    pt.update()

    #extractor = extract_factory['DAISY']()

    #desc_type_to_dtype = {
    #    cv2.CV_8U: np.uint8,
    #    cv2.CV_8s: np.uint,
    #}
    #def alloc_desc(extractor):
    #    desc_type = extractor.descriptorType()
    #    desc_size = extractor.descriptorSize()
    #    dtype = desc_type_to_dtype[desc_type]
    #    shape = (len(cv2_kpts), desc_size)
    #    desc = np.empty(shape, dtype=dtype)
    #    return desc

    #ut.search_module(cv2, 'array', recursive=True)
    #ut.search_module(cv2, 'freak', recursive=True)
    #ut.search_module(cv2, 'out', recursive=True)

    #cv2_kpts = cv2_kpts[0:2]

    #for key, factory in just_desc_factory_.items():
    #    extractor = factory()
    #    desc = alloc_desc(extractor)
    #    desc = extractor.compute(imgGray, cv2_kpts)
    #    feats[key] = (desc,)
    #    #extractor.compute(imgGray, cv2_kpts, desc)
    #    pass
    #kpts = np.array(list(map(from_cv2_kpts, cv2_kpts)))

    #orb = cv2.ORB()
    #kp1, des1 = orb.detectAndCompute(imgGray, None)
    #blober = cv2.SimpleBlobDetector_create()
    #haris_kpts = cv2.cornerHarris(imgGray, 2, 3, 0.04)

    #[name for name in dir(cv2) if 'mat' in name.lower()]
    #[name for name in dir(cv2.xfeatures2d) if 'desc' in name.lower()]

    #[name for name in dir(cv2) if 'detect' in name.lower()]
    #[name for name in dir(cv2) if 'extract' in name.lower()]
    #[name for name in dir(cv2) if 'ellip' in name.lower()]

    #sift = cv2.xfeatures2d.SIFT_create()
    #cv2_kpts = sift.detect(imgGray)
    #desc = sift.compute(imgGray, cv2_kpts)[1]

    #freak = cv2.xfeatures2d.FREAK_create()
    #cv2_kpts = freak.detect(imgGray)
    #desc = freak.compute(imgGray, cv2_kpts)[1]
    pass
Esempio n. 11
0
def scalespace():
    r"""
    THIS DOES NOT SHOW A REAL SCALE SPACE PYRAMID YET. FIXME.

    Returns:
        ?: imgBGRA_warped

    CommandLine:
        python -m ibeis.scripts.specialdraw scalespace --show

    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis.scripts.specialdraw import *  # NOQA
        >>> imgBGRA_warped = scalespace()
        >>> result = ('imgBGRA_warped = %s' % (ut.repr2(imgBGRA_warped),))
        >>> print(result)
        >>> ut.quit_if_noshow()
        >>> import plottool as pt
        >>> ut.show_if_requested()
    """
    import numpy as np
    # import matplotlib.pyplot as plt
    import cv2
    import vtool as vt
    import plottool as pt
    pt.qt4ensure()

    #imgBGR = vt.imread(ut.grab_test_imgpath('lena.png'))
    imgBGR = vt.imread(ut.grab_test_imgpath('zebra.png'))
    # imgBGR = vt.imread(ut.grab_test_imgpath('carl.jpg'))

    # Convert to colored intensity image
    imgGray = cv2.cvtColor(imgBGR, cv2.COLOR_BGR2GRAY)
    imgBGR = cv2.cvtColor(imgGray, cv2.COLOR_GRAY2BGR)
    imgRaw = imgBGR

    # TODO: # stack images in pyramid # boarder?
    initial_sigma = 1.6
    num_intervals = 4

    def makepyramid_octave(imgRaw, level, num_intervals):
        # Downsample image to take sigma to a power of level
        step = (2**(level))
        img_level = imgRaw[::step, ::step]
        # Compute interval relative scales
        interval = np.array(list(range(num_intervals)))
        relative_scales = (2**((interval / num_intervals)))
        sigma_intervals = initial_sigma * relative_scales
        octave_intervals = []
        for sigma in sigma_intervals:
            sizex = int(6. * sigma + 1.) + int(1 - (int(6. * sigma + 1.) % 2))
            ksize = (sizex, sizex)
            img_blur = cv2.GaussianBlur(img_level,
                                        ksize,
                                        sigmaX=sigma,
                                        sigmaY=sigma,
                                        borderType=cv2.BORDER_REPLICATE)
            octave_intervals.append(img_blur)
        return octave_intervals

    pyramid = []
    num_octaves = 4
    for level in range(num_octaves):
        octave = makepyramid_octave(imgRaw, level, num_intervals)
        pyramid.append(octave)

    def makewarp(imgBGR):
        # hack a projection matrix using dummy homogrpahy
        imgBGRA = cv2.cvtColor(imgBGR, cv2.COLOR_BGR2BGRA)
        imgBGRA[:, :, 3] = .87 * 255  # hack alpha
        imgBGRA = vt.pad_image(imgBGRA, 2, value=[0, 0, 255, 255])
        size = np.array(vt.get_size(imgBGRA))
        pts1 = np.array([(0, 0), (0, 1), (1, 1), (1, 0)]) * size
        x_adjust = .15
        y_adjust = .5
        pts2 = np.array([(x_adjust, 0), (0, 1 - y_adjust), (1, 1 - y_adjust),
                         (1 - x_adjust, 0)]) * size
        H = cv2.findHomography(pts1, pts2)[0]

        dsize = np.array(vt.bbox_from_verts(pts2)[2:4]).astype(np.int)
        warpkw = dict(flags=cv2.INTER_LANCZOS4, borderMode=cv2.BORDER_CONSTANT)
        imgBGRA_warped = cv2.warpPerspective(imgBGRA, H, tuple(dsize),
                                             **warpkw)
        return imgBGRA_warped

    framesize = (700, 500)
    steps = np.array([.04, .03, .02, .01]) * 1.3

    numintervals = 4
    octave_ty_starts = [1.0]
    for i in range(1, 4):
        prev_ty = octave_ty_starts[-1]
        prev_base = pyramid[i - 1][0]
        next_ty = prev_ty - ((prev_base.shape[0] / framesize[1]) / 2 +
                             (numintervals - 1) * (steps[i - 1]))
        octave_ty_starts.append(next_ty)

    def temprange(stop, step, num):
        return [stop - (x * step) for x in range(num)]

    layers = []
    for i in range(0, 4):
        ty_start = octave_ty_starts[i]
        step = steps[i]
        intervals = pyramid[i]
        ty_range = temprange(ty_start, step, numintervals)
        nextpart = [
            vt.embed_in_square_image(makewarp(interval),
                                     framesize,
                                     img_origin=(.5, .5),
                                     target_origin=(.5, ty / 2))
            for ty, interval in zip(ty_range, intervals)
        ]
        layers += nextpart

    for layer in layers:
        pt.imshow(layer)

    pt.plt.grid(False)
Esempio n. 12
0
def detect_sharks(ibs, gids):
    #import ibeis
    #ibs = ibeis.opendb('WS_ALL')
    config = {
        'algo':
        'yolo',
        'sensitivity':
        0.2,
        'config_filepath':
        ut.truepath('~/work/WS_ALL/localizer_backup/detect.yolo.2.cfg'),
        'weight_filepath':
        ut.truepath(
            '~/work/WS_ALL/localizer_backup/detect.yolo.2.39000.weights'),
        'class_filepath':
        ut.truepath(
            '~/work/WS_ALL/localizer_backup/detect.yolo.2.cfg.classes'),
    }
    depc = ibs.depc_image

    #imgsets = ibs.imagesets(text='Injured Sharks')
    #images = ibs.images(imgsets.gids[0])
    images = ibs.images(gids)
    images = images.compress([ext not in ['.gif'] for ext in images.exts])
    gid_list = images.gids

    # result is a tuple:
    # (score, bbox_list, theta_list, conf_list, class_list)
    results_list = depc.get_property('localizations',
                                     gid_list,
                                     None,
                                     config=config)

    results_list2 = []
    multi_gids = []
    failed_gids = []

    #ibs.set_image_imagesettext(failed_gids, ['Fixme'] * len(failed_gids))
    ibs.set_image_imagesettext(multi_gids, ['Fixme2'] * len(multi_gids))

    failed_gids

    for gid, res in zip(gid_list, results_list):
        score, bbox_list, theta_list, conf_list, class_list = res
        if len(bbox_list) == 0:
            failed_gids.append(gid)
        elif len(bbox_list) == 1:
            results_list2.append((gid, bbox_list, theta_list))
        elif len(bbox_list) > 1:
            multi_gids.append(gid)
            idx = conf_list.argmax()
            res2 = (gid, bbox_list[idx:idx + 1], theta_list[idx:idx + 1])
            results_list2.append(res2)

    ut.dict_hist(([t[1].shape[0] for t in results_list]))

    localized_imgs = ibs.images(ut.take_column(results_list2, 0))
    assert all([len(a) == 1 for a in localized_imgs.aids])
    old_annots = ibs.annots(ut.flatten(localized_imgs.aids))
    #old_tags = old_annots.case_tags

    # Override old bboxes
    import numpy as np
    bboxes = np.array(ut.take_column(results_list2, 1))[:, 0, :]
    ibs.set_annot_bboxes(old_annots.aids, bboxes)

    if False:
        import plottool as pt
        pt.qt4ensure()

        inter = pt.MultiImageInteraction(
            ibs.get_image_paths(ut.take_column(results_list2, 0)),
            bboxes_list=ut.take_column(results_list2, 1))
        inter.dump_to_disk('shark_loc', num=50, prefix='shark_loc')
        inter.start()

        inter = pt.MultiImageInteraction(ibs.get_image_paths(failed_gids))
        inter.start()

        inter = pt.MultiImageInteraction(ibs.get_image_paths(multi_gids))
        inter.start()
Esempio n. 13
0
def get_injured_sharks():
    """
    >>> from ibeis.scripts.getshark import *  # NOQA
    """
    import requests
    url = 'http://www.whaleshark.org/getKeywordImages.jsp'
    resp = requests.get(url)
    assert resp.status_code == 200
    keywords = resp.json()['keywords']
    key_list = ut.take_column(keywords, 'indexName')
    key_to_nice = {k['indexName']: k['readableName'] for k in keywords}

    injury_patterns = [
        'injury',
        'net',
        'hook',
        'trunc',
        'damage',
        'scar',
        'nicks',
        'bite',
    ]

    injury_keys = [
        key for key in key_list if any([pat in key for pat in injury_patterns])
    ]
    noninjury_keys = ut.setdiff(key_list, injury_keys)
    injury_nice = ut.lmap(lambda k: key_to_nice[k], injury_keys)  # NOQA
    noninjury_nice = ut.lmap(lambda k: key_to_nice[k], noninjury_keys)  # NOQA
    key_list = injury_keys

    keyed_images = {}
    for key in ut.ProgIter(key_list, lbl='reading index', bs=True):
        key_url = url + '?indexName={indexName}'.format(indexName=key)
        key_resp = requests.get(key_url)
        assert key_resp.status_code == 200
        key_imgs = key_resp.json()['images']
        keyed_images[key] = key_imgs

    key_hist = {key: len(imgs) for key, imgs in keyed_images.items()}
    key_hist = ut.sort_dict(key_hist, 'vals')
    print(ut.repr3(key_hist))
    nice_key_hist = ut.map_dict_keys(lambda k: key_to_nice[k], key_hist)
    nice_key_hist = ut.sort_dict(nice_key_hist, 'vals')
    print(ut.repr3(nice_key_hist))

    key_to_urls = {
        key: ut.take_column(vals, 'url')
        for key, vals in keyed_images.items()
    }
    overlaps = {}
    import itertools
    overlap_img_list = []
    for k1, k2 in itertools.combinations(key_to_urls.keys(), 2):
        overlap_imgs = ut.isect(key_to_urls[k1], key_to_urls[k2])
        num_overlap = len(overlap_imgs)
        overlaps[(k1, k2)] = num_overlap
        overlaps[(k1, k1)] = len(key_to_urls[k1])
        if num_overlap > 0:
            #print('[%s][%s], overlap=%r' % (k1, k2, num_overlap))
            overlap_img_list.extend(overlap_imgs)

    all_img_urls = list(set(ut.flatten(key_to_urls.values())))
    num_all = len(all_img_urls)  # NOQA
    print('num_all = %r' % (num_all, ))

    # Determine super-categories
    categories = ['nicks', 'scar', 'trunc']

    # Force these keys into these categories
    key_to_cat = {'scarbite': 'other_injury'}

    cat_to_keys = ut.ddict(list)

    for key in key_to_urls.keys():
        flag = 1
        if key in key_to_cat:
            cat = key_to_cat[key]
            cat_to_keys[cat].append(key)
            continue
        for cat in categories:
            if cat in key:
                cat_to_keys[cat].append(key)
                flag = 0
        if flag:
            cat = 'other_injury'
            cat_to_keys[cat].append(key)

    cat_urls = ut.ddict(list)
    for cat, keys in cat_to_keys.items():
        for key in keys:
            cat_urls[cat].extend(key_to_urls[key])

    cat_hist = {}
    for cat in list(cat_urls.keys()):
        cat_urls[cat] = list(set(cat_urls[cat]))
        cat_hist[cat] = len(cat_urls[cat])

    print(ut.repr3(cat_to_keys))
    print(ut.repr3(cat_hist))

    key_to_cat = dict([(val, key) for key, vals in cat_to_keys.items()
                       for val in vals])

    #ingestset = {
    #    '__class__': 'ImageSet',
    #    'images': ut.ddict(dict)
    #}
    #for key, key_imgs in keyed_images.items():
    #    for imgdict in key_imgs:
    #        url = imgdict['url']
    #        encid = imgdict['correspondingEncounterNumber']
    #        # Make structure
    #        encdict = encounters[encid]
    #        encdict['__class__'] = 'Encounter'
    #        imgdict = ut.delete_keys(imgdict.copy(), ['correspondingEncounterNumber'])
    #        imgdict['__class__'] = 'Image'
    #        cat = key_to_cat[key]
    #        annotdict = {'relative_bbox': [.01, .01, .98, .98], 'tags': [cat, key]}
    #        annotdict['__class__'] = 'Annotation'

    #        # Ensure structures exist
    #        encdict['images'] = encdict.get('images', [])
    #        imgdict['annots'] = imgdict.get('annots', [])

    #        # Add an image to this encounter
    #        encdict['images'].append(imgdict)
    #        # Add an annotation to this image
    #        imgdict['annots'].append(annotdict)

    ##http://springbreak.wildbook.org/rest/org.ecocean.Encounter/1111
    #get_enc_url = 'http://www.whaleshark.org/rest/org.ecocean.Encounter/%s' % (encid,)
    #resp = requests.get(get_enc_url)
    #print(ut.repr3(encdict))
    #print(ut.repr3(encounters))

    # Download the files to the local disk
    #fpath_list =

    all_urls = ut.unique(
        ut.take_column(
            ut.flatten(
                ut.dict_subset(keyed_images,
                               ut.flatten(cat_to_keys.values())).values()),
            'url'))

    dldir = ut.truepath('~/tmpsharks')
    from os.path import commonprefix, basename  # NOQA
    prefix = commonprefix(all_urls)
    suffix_list = [url_[len(prefix):] for url_ in all_urls]
    fname_list = [suffix.replace('/', '--') for suffix in suffix_list]

    fpath_list = []
    for url, fname in ut.ProgIter(zip(all_urls, fname_list),
                                  lbl='downloading imgs',
                                  freq=1):
        fpath = ut.grab_file_url(url,
                                 download_dir=dldir,
                                 fname=fname,
                                 verbose=False)
        fpath_list.append(fpath)

    # Make sure we keep orig info
    #url_to_keys = ut.ddict(list)
    url_to_info = ut.ddict(dict)
    for key, imgdict_list in keyed_images.items():
        for imgdict in imgdict_list:
            url = imgdict['url']
            info = url_to_info[url]
            for k, v in imgdict.items():
                info[k] = info.get(k, [])
                info[k].append(v)
            info['keys'] = info.get('keys', [])
            info['keys'].append(key)
            #url_to_keys[url].append(key)

    info_list = ut.take(url_to_info, all_urls)
    for info in info_list:
        if len(set(info['correspondingEncounterNumber'])) > 1:
            assert False, 'url with two different encounter nums'
    # Combine duplicate tags

    hashid_list = [
        ut.get_file_uuid(fpath_, stride=8)
        for fpath_ in ut.ProgIter(fpath_list, bs=True)
    ]
    groupxs = ut.group_indices(hashid_list)[1]

    # Group properties by duplicate images
    #groupxs = [g for g in groupxs if len(g) > 1]
    fpath_list_ = ut.take_column(ut.apply_grouping(fpath_list, groupxs), 0)
    url_list_ = ut.take_column(ut.apply_grouping(all_urls, groupxs), 0)
    info_list_ = [
        ut.map_dict_vals(ut.flatten, ut.dict_accum(*info_))
        for info_ in ut.apply_grouping(info_list, groupxs)
    ]

    encid_list_ = [
        ut.unique(info_['correspondingEncounterNumber'])[0]
        for info_ in info_list_
    ]
    keys_list_ = [ut.unique(info_['keys']) for info_ in info_list_]
    cats_list_ = [ut.unique(ut.take(key_to_cat, keys)) for keys in keys_list_]

    clist = ut.ColumnLists({
        'gpath': fpath_list_,
        'url': url_list_,
        'encid': encid_list_,
        'key': keys_list_,
        'cat': cats_list_,
    })

    #for info_ in ut.apply_grouping(info_list, groupxs):
    #    info = ut.dict_accum(*info_)
    #    info = ut.map_dict_vals(ut.flatten, info)
    #    x = ut.unique(ut.flatten(ut.dict_accum(*info_)['correspondingEncounterNumber']))
    #    if len(x) > 1:
    #        info = info.copy()
    #        del info['keys']
    #        print(ut.repr3(info))

    flags = ut.lmap(ut.fpath_has_imgext, clist['gpath'])
    clist = clist.compress(flags)

    import ibeis
    ibs = ibeis.opendb('WS_Injury', allow_newdir=True)

    gid_list = ibs.add_images(clist['gpath'])
    clist['gid'] = gid_list

    failed_flags = ut.flag_None_items(clist['gid'])
    print('# failed %s' % (sum(failed_flags)), )
    passed_flags = ut.not_list(failed_flags)
    clist = clist.compress(passed_flags)
    ut.assert_all_not_None(clist['gid'])
    #ibs.get_image_uris_original(clist['gid'])
    ibs.set_image_uris_original(clist['gid'], clist['url'], overwrite=True)

    #ut.zipflat(clist['cat'], clist['key'])
    if False:
        # Can run detection instead
        clist['tags'] = ut.zipflat(clist['cat'])
        aid_list = ibs.use_images_as_annotations(clist['gid'],
                                                 adjust_percent=0.01,
                                                 tags_list=clist['tags'])
        aid_list

    import plottool as pt
    from ibeis import core_annots
    pt.qt4ensure()
    #annots = ibs.annots()
    #aids = [1, 2]
    #ibs.depc_annot.get('hog', aids , 'hog')
    #ibs.depc_annot.get('chip', aids, 'img')
    for aid in ut.InteractiveIter(ibs.get_valid_aids()):
        hogs = ibs.depc_annot.d.get_hog_hog([aid])
        chips = ibs.depc_annot.d.get_chips_img([aid])
        chip = chips[0]
        hogimg = core_annots.make_hog_block_image(hogs[0])
        pt.clf()
        pt.imshow(hogimg, pnum=(1, 2, 1))
        pt.imshow(chip, pnum=(1, 2, 2))
        fig = pt.gcf()
        fig.show()
        fig.canvas.draw()

    #print(len(groupxs))

    #if False:
    #groupxs = ut.find_duplicate_items(ut.lmap(basename, suffix_list)).values()
    #print(ut.repr3(ut.apply_grouping(all_urls, groupxs)))
    #    # FIX
    #    for fpath, fname in zip(fpath_list, fname_list):
    #        if ut.checkpath(fpath):
    #            ut.move(fpath, join(dirname(fpath), fname))
    #            print('fpath = %r' % (fpath,))

    #import ibeis
    #from ibeis.dbio import ingest_dataset
    #dbdir = ibeis.sysres.lookup_dbdir('WS_ALL')
    #self = ingest_dataset.Ingestable2(dbdir)

    if False:
        # Show overlap matrix
        import plottool as pt
        import pandas as pd
        import numpy as np
        dict_ = overlaps
        s = pd.Series(dict_, index=pd.MultiIndex.from_tuples(overlaps))
        df = s.unstack()
        lhs, rhs = df.align(df.T)
        df = lhs.add(rhs, fill_value=0).fillna(0)

        label_texts = df.columns.values

        def label_ticks(label_texts):
            import plottool as pt
            truncated_labels = [repr(lbl[0:100]) for lbl in label_texts]
            ax = pt.gca()
            ax.set_xticks(list(range(len(label_texts))))
            ax.set_xticklabels(truncated_labels)
            [lbl.set_rotation(-55) for lbl in ax.get_xticklabels()]
            [
                lbl.set_horizontalalignment('left')
                for lbl in ax.get_xticklabels()
            ]

            #xgrid, ygrid = np.meshgrid(range(len(label_texts)), range(len(label_texts)))
            #pt.plot_surface3d(xgrid, ygrid, disjoint_mat)
            ax.set_yticks(list(range(len(label_texts))))
            ax.set_yticklabels(truncated_labels)
            [
                lbl.set_horizontalalignment('right')
                for lbl in ax.get_yticklabels()
            ]
            [
                lbl.set_verticalalignment('center')
                for lbl in ax.get_yticklabels()
            ]
            #[lbl.set_rotation(20) for lbl in ax.get_yticklabels()]

        #df = df.sort(axis=0)
        #df = df.sort(axis=1)

        sortx = np.argsort(df.sum(axis=1).values)[::-1]
        df = df.take(sortx, axis=0)
        df = df.take(sortx, axis=1)

        fig = pt.figure(fnum=1)
        fig.clf()
        mat = df.values.astype(np.int32)
        mat[np.diag_indices(len(mat))] = 0
        vmax = mat[(1 - np.eye(len(mat))).astype(np.bool)].max()
        import matplotlib.colors
        norm = matplotlib.colors.Normalize(vmin=0, vmax=vmax, clip=True)
        pt.plt.imshow(mat, cmap='hot', norm=norm, interpolation='none')
        pt.plt.colorbar()
        pt.plt.grid('off')
        label_ticks(label_texts)
        fig.tight_layout()

    #overlap_df = pd.DataFrame.from_dict(overlap_img_list)

    class TmpImage(ut.NiceRepr):
        pass

    from skimage.feature import hog
    from skimage import data, color, exposure
    import plottool as pt
    image2 = color.rgb2gray(data.astronaut())  # NOQA

    fpath = './GOPR1120.JPG'

    import vtool as vt
    for fpath in [fpath]:
        """
        http://scikit-image.org/docs/dev/auto_examples/plot_hog.html
        """

        image = vt.imread(fpath, grayscale=True)
        image = pt.color_funcs.to_base01(image)

        fig = pt.figure(fnum=2)
        fd, hog_image = hog(image,
                            orientations=8,
                            pixels_per_cell=(16, 16),
                            cells_per_block=(1, 1),
                            visualise=True)

        fig, (ax1, ax2) = pt.plt.subplots(1,
                                          2,
                                          figsize=(8, 4),
                                          sharex=True,
                                          sharey=True)

        ax1.axis('off')
        ax1.imshow(image, cmap=pt.plt.cm.gray)
        ax1.set_title('Input image')
        ax1.set_adjustable('box-forced')

        # Rescale histogram for better display
        hog_image_rescaled = exposure.rescale_intensity(hog_image,
                                                        in_range=(0, 0.02))

        ax2.axis('off')
        ax2.imshow(hog_image_rescaled, cmap=pt.plt.cm.gray)
        ax2.set_title('Histogram of Oriented Gradients')
        ax1.set_adjustable('box-forced')
        pt.plt.show()
Esempio n. 14
0
def test_mser():
    import cv2
    import vtool as vt
    import plottool as pt
    import numpy as np
    pt.qt4ensure()
    class Keypoints(ut.NiceRepr):
        """
        Convinence class for dealing with keypoints
        """
        def __init__(self, kparr, info=None):
            self.kparr = kparr
            if info is None:
                info = {}
            self.info = info

        def add_info(self, key, val):
            self.info[key] = val

        def __nice__(self):
            return ' ' + str(len(self.kparr))

        @property
        def scale(self):
            return vt.get_scales(self.kparr)

        @property
        def eccentricity(self):
            return vt.get_kpts_eccentricity(self.kparr)

        def compress(self, flags, inplace=False):
            subarr = self.kparr.compress(flags, axis=0)
            info = {key: ut.compress(val, flags) for key, val in self.info.items()}
            return Keypoints(subarr, info)

    img_fpath = ut.grab_test_imgpath(ut.get_argval('--fname', default='zebra.png'))
    imgBGR = vt.imread(img_fpath)
    imgGray = cv2.cvtColor(imgBGR, cv2.COLOR_BGR2GRAY)
    # http://docs.opencv.org/master/d3/d28/classcv_1_1MSER.html#gsc.tab=0
    # http://stackoverflow.com/questions/17647500/exact-meaning-of-the-parameters-given-to-initialize-mser-in-opencv-2-4-x
    factory = cv2.MSER_create
    img_area = np.product(np.array(vt.get_size(imgGray)))
    _max_area = (img_area // 10)
    _delta = 8
    _min_diversity = .5

    extractor = factory(_delta=_delta, _max_area=_max_area, _min_diversity=_min_diversity)
    # bboxes are x,y,w,h
    regions, bboxes = extractor.detectRegions(imgGray)
    # ellipse definition from [Fitzgibbon95]
    # http://www.bmva.org/bmvc/1995/bmvc-95-050.pdf p518
    # ell = [c_x, c_y, R_x, R_y, theta]
    # (cx, cy) = conic center
    # Rx and Ry = conic radii
    # theta is the counterclockwise angle
    fitz_ellipses = [cv2.fitEllipse(mser) for mser in regions]

    # http://answers.opencv.org/question/19015/how-to-use-mser-in-python/
    #hulls = [cv2.convexHull(p.reshape(-1, 1, 2)) for p in regions]
    #hull_ells = [cv2.fitEllipse(hull[:, 0]) for hull in hulls]
    invVR_mats = []
    for ell in fitz_ellipses:
        ((cx, cy), (dx, dy), degrees) = ell
        theta = np.radians(degrees)  # opencv lives in radians
        # Convert diameter to radians
        rx = dx / 2
        ry = dy / 2
        S = vt.scale_mat3x3(rx, ry)
        T = vt.translation_mat3x3(cx, cy)
        R = vt.rotation_mat3x3(theta)
        invVR = T.dot(R.dot(S))
        invVR_mats.append(invVR)
    invVR_mats = np.array(invVR_mats)
    #_oris = vt.get_invVR_mats_oris(invVR_mats)
    kpts2_ = vt.flatten_invV_mats_to_kpts(invVR_mats)

    self = Keypoints(kpts2_)
    self.add_info('regions', regions)
    flags = (self.eccentricity < .9)
    #flags = self.scale < np.mean(self.scale)
    #flags = self.scale < np.median(self.scale)
    self = self.compress(flags)
    import plottool as pt
    #pt.interact_keypoints.ishow_keypoints(imgBGR, self.kparr, None, ell_alpha=.4, color='distinct', fnum=2)
    #import plottool as pt
    vis = imgBGR.copy()

    for region in self.info['regions']:
        vis[region.T[1], region.T[0], :] = 0

    #regions, bbox = mser.detectRegions(gray)
    #hulls = [cv2.convexHull(p.reshape(-1, 1, 2)) for p in self.info['regions']]
    #cv2.polylines(vis, hulls, 1, (0, 255, 0))
    #for region in self.info['regions']:
    #    ell = cv2.fitEllipse(region)
    #    cv2.ellipse(vis, ell, (255))
    pt.interact_keypoints.ishow_keypoints(vis, self.kparr, None, ell_alpha=.4, color='distinct', fnum=2)
    #pt.imshow(vis, fnum=2)
    pt.update()

    #extractor = extract_factory['DAISY']()

    #desc_type_to_dtype = {
    #    cv2.CV_8U: np.uint8,
    #    cv2.CV_8s: np.uint,
    #}
    #def alloc_desc(extractor):
    #    desc_type = extractor.descriptorType()
    #    desc_size = extractor.descriptorSize()
    #    dtype = desc_type_to_dtype[desc_type]
    #    shape = (len(cv2_kpts), desc_size)
    #    desc = np.empty(shape, dtype=dtype)
    #    return desc

    #ut.search_module(cv2, 'array', recursive=True)
    #ut.search_module(cv2, 'freak', recursive=True)
    #ut.search_module(cv2, 'out', recursive=True)

    #cv2_kpts = cv2_kpts[0:2]

    #for key, factory in just_desc_factory_.items():
    #    extractor = factory()
    #    desc = alloc_desc(extractor)
    #    desc = extractor.compute(imgGray, cv2_kpts)
    #    feats[key] = (desc,)
    #    #extractor.compute(imgGray, cv2_kpts, desc)
    #    pass
    #kpts = np.array(list(map(from_cv2_kpts, cv2_kpts)))

    #orb = cv2.ORB()
    #kp1, des1 = orb.detectAndCompute(imgGray, None)
    #blober = cv2.SimpleBlobDetector_create()
    #haris_kpts = cv2.cornerHarris(imgGray, 2, 3, 0.04)

    #[name for name in dir(cv2) if 'mat' in name.lower()]
    #[name for name in dir(cv2.xfeatures2d) if 'desc' in name.lower()]

    #[name for name in dir(cv2) if 'detect' in name.lower()]
    #[name for name in dir(cv2) if 'extract' in name.lower()]
    #[name for name in dir(cv2) if 'ellip' in name.lower()]

    #sift = cv2.xfeatures2d.SIFT_create()
    #cv2_kpts = sift.detect(imgGray)
    #desc = sift.compute(imgGray, cv2_kpts)[1]

    #freak = cv2.xfeatures2d.FREAK_create()
    #cv2_kpts = freak.detect(imgGray)
    #desc = freak.compute(imgGray, cv2_kpts)[1]
    pass
Esempio n. 15
0
def dans_splits(ibs):
    """
    python -m ibeis dans_splits --show

    Example:
        >>> # DISABLE_DOCTEST GGR
        >>> from ibeis.other.dbinfo import *  # NOQA
        >>> import ibeis
        >>> dbdir = '/media/danger/GGR/GGR-IBEIS'
        >>> dbdir = dbdir if ut.checkpath(dbdir) else ut.truepath('~/lev/media/danger/GGR/GGR-IBEIS')
        >>> ibs = ibeis.opendb(dbdir=dbdir, allow_newdir=False)
        >>> import guitool as gt
        >>> gt.ensure_qtapp()
        >>> win = dans_splits(ibs)
        >>> ut.quit_if_noshow()
        >>> import plottool as pt
        >>> gt.qtapp_loop(qwin=win)
    """
    #pair = 9262, 932

    dans_aids = [26548, 2190, 9418, 29965, 14738, 26600, 3039, 2742, 8249,
                 20154, 8572, 4504, 34941, 4040, 7436, 31866, 28291,
                 16009, 7378, 14453, 2590, 2738, 22442, 26483, 21640, 19003,
                 13630, 25395, 20015, 14948, 21429, 19740, 7908, 23583, 14301,
                 26912, 30613, 19719, 21887, 8838, 16184, 9181, 8649, 8276,
                 14678, 21950, 4925, 13766, 12673, 8417, 2018, 22434, 21149,
                 14884, 5596, 8276, 14650, 1355, 21725, 21889, 26376, 2867,
                 6906, 4890, 21524, 6690, 14738, 1823, 35525, 9045, 31723,
                 2406, 5298, 15627, 31933, 19535, 9137, 21002, 2448,
                 32454, 12615, 31755, 20015, 24573, 32001, 23637, 3192, 3197,
                 8702, 1240, 5596, 33473, 23874, 9558, 9245, 23570, 33075,
                 23721,  24012, 33405, 23791, 19498, 33149, 9558, 4971,
                 34183, 24853, 9321, 23691, 9723, 9236, 9723,  21078,
                 32300, 8700, 15334, 6050, 23277, 31164, 14103,
                 21231, 8007, 10388, 33387, 4319, 26880, 8007, 31164,
                 32300, 32140]

    is_hyrbid = [7123, 7166, 7157, 7158, ]  # NOQA
    needs_mask = [26836, 29742]  # NOQA
    justfine = [19862]  # NOQA

    annots = ibs.annots(dans_aids)
    unique_nids = ut.unique(annots.nids)
    grouped_aids = ibs.get_name_aids(unique_nids)
    annot_groups = ibs._annot_groups(grouped_aids)

    split_props = {'splitcase', 'photobomb'}
    needs_tag = [len(split_props.intersection(ut.flatten(tags))) == 0 for tags in annot_groups.match_tags]
    num_needs_tag = sum(needs_tag)
    num_had_split = len(needs_tag) - num_needs_tag
    print('num_had_split = %r' % (num_had_split,))
    print('num_needs_tag = %r' % (num_needs_tag,))

    #all_annot_groups = ibs._annot_groups(ibs.group_annots_by_name(ibs.get_valid_aids())[0])
    #all_has_split = [len(split_props.intersection(ut.flatten(tags))) > 0 for tags in all_annot_groups.match_tags]
    #num_nondan = sum(all_has_split) - num_had_split
    #print('num_nondan = %r' % (num_nondan,))

    from ibeis.algo.graph import graph_iden
    from ibeis.viz import viz_graph2
    import guitool as gt
    import plottool as pt
    pt.qt4ensure()
    gt.ensure_qtapp()

    aids_list = ut.compress(grouped_aids, needs_tag)
    aids_list = [a for a in aids_list if len(a) > 1]
    print('len(aids_list) = %r' % (len(aids_list),))

    for aids in aids_list:
        infr = graph_iden.AnnotInference(ibs, aids)
        infr.initialize_graph()
        win = viz_graph2.AnnotGraphWidget(infr=infr, use_image=False,
                                          init_mode='rereview')
        win.populate_edge_model()
        win.show()
        return win
    assert False
Esempio n. 16
0
def scalespace():
    r"""
    THIS DOES NOT SHOW A REAL SCALE SPACE PYRAMID YET. FIXME.

    Returns:
        ?: imgBGRA_warped

    CommandLine:
        python -m ibeis.scripts.specialdraw scalespace --show

    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis.scripts.specialdraw import *  # NOQA
        >>> imgBGRA_warped = scalespace()
        >>> result = ('imgBGRA_warped = %s' % (ut.repr2(imgBGRA_warped),))
        >>> print(result)
        >>> ut.quit_if_noshow()
        >>> import plottool as pt
        >>> ut.show_if_requested()
    """
    import numpy as np
    # import matplotlib.pyplot as plt
    import cv2
    import vtool as vt
    import plottool as pt
    pt.qt4ensure()

    #imgBGR = vt.imread(ut.grab_test_imgpath('lena.png'))
    imgBGR = vt.imread(ut.grab_test_imgpath('zebra.png'))
    # imgBGR = vt.imread(ut.grab_test_imgpath('carl.jpg'))

    # Convert to colored intensity image
    imgGray = cv2.cvtColor(imgBGR, cv2.COLOR_BGR2GRAY)
    imgBGR = cv2.cvtColor(imgGray, cv2.COLOR_GRAY2BGR)
    imgRaw = imgBGR

    # TODO: # stack images in pyramid # boarder?
    initial_sigma = 1.6
    num_intervals = 4

    def makepyramid_octave(imgRaw, level, num_intervals):
        # Downsample image to take sigma to a power of level
        step = (2 ** (level))
        img_level = imgRaw[::step, ::step]
        # Compute interval relative scales
        interval = np.array(list(range(num_intervals)))
        relative_scales = (2 ** ((interval / num_intervals)))
        sigma_intervals = initial_sigma * relative_scales
        octave_intervals = []
        for sigma in sigma_intervals:
            sizex = int(6. * sigma + 1.) + int(1 - (int(6. * sigma + 1.) % 2))
            ksize = (sizex, sizex)
            img_blur = cv2.GaussianBlur(img_level, ksize, sigmaX=sigma,
                                        sigmaY=sigma,
                                        borderType=cv2.BORDER_REPLICATE)
            octave_intervals.append(img_blur)
        return octave_intervals

    pyramid = []
    num_octaves = 4
    for level in range(num_octaves):
        octave = makepyramid_octave(imgRaw, level, num_intervals)
        pyramid.append(octave)

    def makewarp(imgBGR):
        # hack a projection matrix using dummy homogrpahy
        imgBGRA = cv2.cvtColor(imgBGR, cv2.COLOR_BGR2BGRA)
        imgBGRA[:, :, 3] = .87 * 255  # hack alpha
        imgBGRA = vt.pad_image(imgBGRA, 2, value=[0, 0, 255, 255])
        size = np.array(vt.get_size(imgBGRA))
        pts1 = np.array([(0, 0), (0, 1), (1, 1), (1, 0)]) * size
        x_adjust = .15
        y_adjust = .5
        pts2 = np.array([(x_adjust, 0), (0, 1 - y_adjust), (1, 1 - y_adjust), (1 - x_adjust, 0)]) * size
        H = cv2.findHomography(pts1, pts2)[0]

        dsize = np.array(vt.bbox_from_verts(pts2)[2:4]).astype(np.int)
        warpkw = dict(flags=cv2.INTER_LANCZOS4, borderMode=cv2.BORDER_CONSTANT)
        imgBGRA_warped = cv2.warpPerspective(imgBGRA, H, tuple(dsize), **warpkw)
        return imgBGRA_warped

    framesize = (700, 500)
    steps = np.array([.04, .03, .02, .01]) * 1.3

    numintervals = 4
    octave_ty_starts = [1.0]
    for i in range(1, 4):
        prev_ty = octave_ty_starts[-1]
        prev_base = pyramid[i - 1][0]
        next_ty = prev_ty - ((prev_base.shape[0] / framesize[1]) / 2 + (numintervals - 1) * (steps[i - 1]))
        octave_ty_starts.append(next_ty)

    def temprange(stop, step, num):
        return [stop - (x * step) for x in  range(num)]

    layers = []
    for i in range(0, 4):
        ty_start = octave_ty_starts[i]
        step = steps[i]
        intervals = pyramid[i]
        ty_range = temprange(ty_start, step, numintervals)
        nextpart = [
            vt.embed_in_square_image(makewarp(interval), framesize, img_origin=(.5, .5),
                                     target_origin=(.5, ty / 2))
            for ty, interval in  zip(ty_range, intervals)
        ]
        layers += nextpart

    for layer in layers:
        pt.imshow(layer)

    pt.plt.grid(False)