Ejemplo n.º 1
0
def testdata_imglist(shape=(32, 32, 3)):
    """
    Returns 4 colored 32x32 test images, one is structured increasing numbers,
    an images with lines of a cartoon face, and two complex images of people.

    CommandLine:
        python -m ibeis_cnn.utils --test-testdata_imglist --show

    Example:
        >>> # ENABLE_DOCTEST
        >>> from ibeis_cnn.utils import *  # NOQA
        >>> (img_list, width, height, channels) = testdata_imglist()
        >>> ut.quit_if_noshow()
        >>> import plottool as pt
        >>> pt.imshow(img_list[0], pnum=(2, 2, 1))
        >>> pt.imshow(img_list[1], pnum=(2, 2, 2))
        >>> pt.imshow(img_list[2], pnum=(2, 2, 3))
        >>> pt.imshow(img_list[3], pnum=(2, 2, 4))
        >>> ut.show_if_requested()

    """
    import vtool as vt
    x = 32
    height, width, channels = shape
    img0 = np.arange(x**2 * 3, dtype=np.uint8).reshape(x, x, 3)
    img1 = vt.imread(ut.grab_test_imgpath('jeff.png'))
    img2 = vt.imread(ut.grab_test_imgpath('carl.jpg'))
    img3 = vt.imread(ut.grab_test_imgpath('lena.png'))
    img_list = [
        vt.padded_resize(img, (width, height))
        for img in [img0, img1, img2, img3]
    ]
    return img_list, width, height, channels
Ejemplo n.º 2
0
def simple_iterative_test():
    r"""
    CommandLine:
        python -m pyhesaff.tests.test_pyhesaff_simple_iterative --test-simple_iterative_test
        python -m pyhesaff.tests.test_pyhesaff_simple_iterative --test-simple_iterative_test --show

    Example:
        >>> # GUI_DOCTEST
        >>> from pyhesaff.tests.test_pyhesaff_simple_iterative import *  # NOQA
        >>> result = simple_iterative_test()
        >>> print(result)
        >>> ut.show_if_requested()
    """
    lena_fpath = ut.grab_test_imgpath('lena.png')
    carl_fpath = ut.grab_test_imgpath('carl.jpg')
    grace_fpath = ut.grab_test_imgpath('grace.jpg')
    ada_fpath = ut.grab_test_imgpath('ada.jpg')

    fig = plt.figure()
    ax = fig.add_subplot(2, 2, 1)
    test_detect_then_show(ax, lena_fpath)

    ax = fig.add_subplot(2, 2, 2)
    test_detect_then_show(ax, carl_fpath)

    ax = fig.add_subplot(2, 2, 3)
    test_detect_then_show(ax, grace_fpath)

    ax = fig.add_subplot(2, 2, 4)
    test_detect_then_show(ax, ada_fpath)
def simple_iterative_test():
    r"""
    CommandLine:
        python -m pyhesaff.tests.test_pyhesaff_simple_iterative --test-simple_iterative_test
        python -m pyhesaff.tests.test_pyhesaff_simple_iterative --test-simple_iterative_test --show

    Example:
        >>> # GUI_DOCTEST
        >>> from pyhesaff.tests.test_pyhesaff_simple_iterative import *  # NOQA
        >>> result = simple_iterative_test()
        >>> print(result)
        >>> ut.show_if_requested()
    """
    lena_fpath  = ut.grab_test_imgpath('lena.png')
    carl_fpath  = ut.grab_test_imgpath('carl.jpg')
    grace_fpath = ut.grab_test_imgpath('grace.jpg')
    ada_fpath   = ut.grab_test_imgpath('ada.jpg')

    fig = plt.figure()
    ax = fig.add_subplot(2, 2, 1)
    test_detect_then_show(ax, lena_fpath)

    ax = fig.add_subplot(2, 2, 2)
    test_detect_then_show(ax, carl_fpath)

    ax = fig.add_subplot(2, 2, 3)
    test_detect_then_show(ax, grace_fpath)

    ax = fig.add_subplot(2, 2, 4)
    test_detect_then_show(ax, ada_fpath)
Ejemplo n.º 4
0
def test_cv2_flann():
    """
    Ignore:
        [name for name in dir(cv2) if 'create' in name.lower()]
        [name for name in dir(cv2) if 'stereo' in name.lower()]

        ut.grab_zipped_url('https://priithon.googlecode.com/archive/a6117f5e81ec00abcfb037f0f9da2937bb2ea47f.tar.gz', download_dir='.')
    """
    import cv2
    from vtool.tests import dummy
    import plottool as pt
    import vtool as vt
    img1 = vt.imread(ut.grab_test_imgpath('easy1.png'))
    img2 = vt.imread(ut.grab_test_imgpath('easy2.png'))

    stereo = cv2.StereoBM_create(numDisparities=16, blockSize=15)
    disparity = stereo.compute(img1, img2)
    pt.imshow(disparity)
    pt.show()

    #cv2.estima

    flow = cv2.createOptFlow_DualTVL1()
    img1, img2 = vt.convert_image_list_colorspace([img1, img2], 'gray', src_colorspace='bgr')
    img2 = vt.resize(img2, img1.shape[0:2][::-1])
    out = img1.copy()
    flow.calc(img1, img2, out)

    orb = cv2.ORB_create()
    kp1, vecs1 = orb.detectAndCompute(img1, None)
    kp2, vecs2 = orb.detectAndCompute(img2, None)

    detector = cv2.FeatureDetector_create("SIFT")
    descriptor = cv2.DescriptorExtractor_create("SIFT")

    skp = detector.detect(img1)
    skp, sd = descriptor.compute(img1, skp)

    tkp = detector.detect(img2)
    tkp, td = descriptor.compute(img2, tkp)

    out = img1.copy()
    cv2.drawKeypoints(img1, kp1, outImage=out)
    pt.imshow(out)

    vecs1 = dummy.testdata_dummy_sift(10)
    vecs2 = dummy.testdata_dummy_sift(10)  # NOQA

    FLANN_INDEX_KDTREE = 0  # bug: flann enums are missing
    #flann_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=4)
    index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
    search_params = dict(checks=50)   # or pass empty dictionary
    flann = cv2.FlannBasedMatcher(index_params, search_params)  # NOQA

    cv2.flann.Index(vecs1, index_params)

    #cv2.FlannBasedMatcher(flann_params)

    cv2.flann.Index(vecs1, flann_params)  # NOQA
Ejemplo n.º 5
0
def draw_demo():
    r"""
    CommandLine:
        python -m plottool.interact_impaint --exec-draw_demo --show

    Example:
        >>> # SCRIPT
        >>> from plottool.interact_impaint import *  # NOQA
        >>> result = draw_demo()
        >>> print(result)
        >>> ut.quit_if_noshow()
        >>> import plottool as pt
        >>> ut.show_if_requested()
    """
    fpath = ut.grab_test_imgpath('zebra.png')
    img = vt.imread(fpath)
    mask = impaint_mask2(img)
    print('mask = %r' % (mask,))
    print('mask.sum() = %r' % (mask.sum(),))
    if False:
        plt.imshow(vt.blend_images_multiply(img, mask))
        ax = plt.gca()
        ax.grid(False)
        ax.set_xticks([])
        ax.set_yticks([])
Ejemplo n.º 6
0
def draw_demo():
    r"""
    CommandLine:
        python -m plottool.interact_impaint --exec-draw_demo --show

    Example:
        >>> # SCRIPT
        >>> from plottool.interact_impaint import *  # NOQA
        >>> result = draw_demo()
        >>> print(result)
        >>> ut.quit_if_noshow()
        >>> import plottool as pt
        >>> ut.show_if_requested()
    """
    fpath = ut.grab_test_imgpath('zebra.png')
    img = vt.imread(fpath)
    mask = impaint_mask2(img)
    print('mask = %r' % (mask, ))
    print('mask.sum() = %r' % (mask.sum(), ))
    if False:
        plt.imshow(vt.blend_images_multiply(img, mask))
        ax = plt.gca()
        ax.grid(False)
        ax.set_xticks([])
        ax.set_yticks([])
 def thumb_getter(id_, thumbsize=128):
     """ Thumb getters must conform to thumbtup structure """
     if id_ not in imgname_list:
         return {
             'fpath': id_ + '.jpg',
             'thread_func': thread_func,
             'main_func': lambda: (id_, ),
         }
     # print(id_)
     if id_ == 'doesnotexist.jpg':
         return None
         img_path = None
         img_size = (100, 100)
     else:
         img_path = ut.grab_test_imgpath(id_, verbose=False)
         img_size = vt.open_image_size(img_path)
     thumb_path = join(guitool_test_thumbdir,
                       ut.hashstr(str(img_path)) + '.jpg')
     if id_ == 'carl.jpg':
         bbox_list = [(10, 10, 200, 200)]
         theta_list = [0]
     elif id_ == 'lena.png':
         # bbox_list = [(10, 10, 200, 200)]
         bbox_list = [None]
         theta_list = [None]
     else:
         bbox_list = []
         theta_list = []
     interest_list = [False]
     thumbtup = (thumb_path, img_path, img_size, bbox_list, theta_list,
                 interest_list)
     # print('thumbtup = %r' % (thumbtup,))
     return thumbtup
Ejemplo n.º 8
0
def _test_buffered_generator_img():
    """
    Test for buffering image read calls

    CONCLUSIONS:
        Use buffer  when bgtime is bigger, but comparable to fgtime
        Use buffer  when fgtime < bgtime and (fgtime + bgtime) is large
        Use genrate when fgtime > bgtime and (fgtime + bgtime) is large
        Use serial when fgtime is bigger and all parts are comparitively small

        Buffer size should be roughly bgtime / fgtime

        Buffering also has a much more even and regular cpu demand.
        Also demands less cpus (I think)


    CommandLine:
        python -m utool.util_parallel --test-_test_buffered_generator_img

    Example:
        >>> import utool as ut
        >>> from utool.util_parallel import *  # NOQA
        >>> from utool.util_parallel import _test_buffered_generator_img  # NOQA
        >>> from utool.util_parallel import _test_buffered_generator_general2  # NOQA
        >>> _test_buffered_generator_img()
    """
    import utool as ut
    #import vtool as vt
    args = [
        ut.grab_test_imgpath(key)
        for key in ut.util_grabdata.get_valid_test_imgkeys()
    ]

    #import cv2
    #import vtool as vt
    #func = cv2.imread
    #bffunc = vt.imread
    def sleepfunc_bufwin(x, niters=10):
        #import cv2
        for z in range(niters):
            # operate on image in some capacity
            x.cumsum()
        for z in range(2):
            x**1.1
        return x

    target_looptime = 60.0
    #target_looptime = 20.0
    #target_looptime = 10.0
    #target_looptime = 5.0
    serial_cheat = 1
    _test_buffered_generator_general2(bgfunc,
                                      args,
                                      sleepfunc_bufwin,
                                      target_looptime,
                                      serial_cheat,
                                      buffer_size=4,
                                      show_serial=False)
Ejemplo n.º 9
0
def testdata_matcher(fname1='easy1.png', fname2='easy2.png'):
    """"
    fname1 = 'easy1.png'
    fname2 = 'hard3.png'

    python -m vtool.test_constrained_matching --test-visualize_matches --show

    Args:
        fname1 (str): (default = 'easy1.png')
        fname2 (str): (default = 'easy2.png')

    Returns:
        ?: testtup

    CommandLine:
        python -m vtool.test_constrained_matching --test-testdata_matcher

    Example:
        >>> # DISABLE_DOCTEST
        >>> from vtool.test_constrained_matching import *  # NOQA
        >>> fname1 = 'easy1.png'
        >>> fname2 = 'easy2.png'
        >>> testtup = testdata_matcher(fname1, fname2)
        >>> result = ('testtup = %s' % (str(testtup),))
        >>> print(result)
    """
    import utool as ut
    #import vtool as vt
    from vtool import image as gtool
    from vtool import features as feattool
    fpath1 = ut.grab_test_imgpath(fname1)
    fpath2 = ut.grab_test_imgpath(fname2)
    featkw = dict(rotation_invariance=True)
    kpts1, vecs1 = feattool.extract_features(fpath1, **featkw)
    kpts2, vecs2 = feattool.extract_features(fpath2, **featkw)
    #if featkw['rotation_invariance']:
    #    print('ori stats 1 ' + ut.get_stats_str(vt.get_oris(kpts2)))
    #    print('ori stats 2 ' + ut.get_stats_str(vt.get_oris(kpts1)))
    rchip1 = gtool.imread(fpath1)
    rchip2 = gtool.imread(fpath2)
    #chip1_shape = vt.gtool.open_image_size(fpath1)
    chip2_shape = gtool.open_image_size(fpath2)
    dlen_sqrd2 = chip2_shape[0]**2 + chip2_shape[1]
    testtup = (rchip1, rchip2, kpts1, vecs1, kpts2, vecs2, dlen_sqrd2)
    return testtup
Ejemplo n.º 10
0
def testdata_matcher(fname1='easy1.png', fname2='easy2.png'):
    """"
    fname1 = 'easy1.png'
    fname2 = 'hard3.png'

    python -m vtool.test_constrained_matching --test-visualize_matches --show

    Args:
        fname1 (str): (default = 'easy1.png')
        fname2 (str): (default = 'easy2.png')

    Returns:
        ?: testtup

    CommandLine:
        python -m vtool.test_constrained_matching --test-testdata_matcher

    Example:
        >>> # DISABLE_DOCTEST
        >>> from vtool.test_constrained_matching import *  # NOQA
        >>> fname1 = 'easy1.png'
        >>> fname2 = 'easy2.png'
        >>> testtup = testdata_matcher(fname1, fname2)
        >>> result = ('testtup = %s' % (str(testtup),))
        >>> print(result)
    """
    import utool as ut
    #import vtool as vt
    from vtool import image as gtool
    from vtool import features as feattool
    fpath1 = ut.grab_test_imgpath(fname1)
    fpath2 = ut.grab_test_imgpath(fname2)
    featkw = dict(rotation_invariance=True)
    kpts1, vecs1 = feattool.extract_features(fpath1, **featkw)
    kpts2, vecs2 = feattool.extract_features(fpath2, **featkw)
    #if featkw['rotation_invariance']:
    #    print('ori stats 1 ' + ut.get_stats_str(vt.get_oris(kpts2)))
    #    print('ori stats 2 ' + ut.get_stats_str(vt.get_oris(kpts1)))
    rchip1 = gtool.imread(fpath1)
    rchip2 = gtool.imread(fpath2)
    #chip1_shape = vt.gtool.open_image_size(fpath1)
    chip2_shape = gtool.open_image_size(fpath2)
    dlen_sqrd2 = chip2_shape[0] ** 2 + chip2_shape[1]
    testtup = (rchip1, rchip2, kpts1, vecs1, kpts2, vecs2, dlen_sqrd2)
    return testtup
Ejemplo n.º 11
0
def testdata_kpts():
    import utool as ut
    import vtool as vt
    import pyhesaff
    img_fpath = ut.grab_test_imgpath(ut.get_argval('--fname', default='star.png'))
    kwargs = ut.parse_dict_from_argv(pyhesaff.get_hesaff_default_params())
    (kpts, vecs) = pyhesaff.detect_feats(img_fpath, **kwargs)
    imgBGR = vt.imread(img_fpath)
    return kpts, vecs, imgBGR
Ejemplo n.º 12
0
def testdata_blend(scale=128):
    import vtool as vt
    img_fpath = ut.grab_test_imgpath('lena.png')
    img1 = vt.imread(img_fpath)
    rng = np.random.RandomState(0)
    img2 = vt.perlin_noise(img1.shape[0:2], scale=scale, rng=rng)[None, :].T
    img1 = vt.rectify_to_float01(img1)
    img2 = vt.rectify_to_float01(img2)
    return img1, img2
Ejemplo n.º 13
0
def testdata_blend(scale=128):
    import vtool as vt
    img_fpath = ut.grab_test_imgpath('lena.png')
    img1 = vt.imread(img_fpath)
    rng = np.random.RandomState(0)
    img2 = vt.perlin_noise(img1.shape[0:2], scale=scale, rng=rng)[None, :].T
    img1 = vt.rectify_to_float01(img1)
    img2 = vt.rectify_to_float01(img2)
    return img1, img2
Ejemplo n.º 14
0
def get_dummy_test_vars1(fname1='easy1.png', fname2='easy2.png'):
    import utool as ut
    from vtool import image as gtool
    from vtool import features as feattool
    fpath1 = ut.grab_test_imgpath(fname1)
    fpath2 = ut.grab_test_imgpath(fname2)
    kpts1, vecs1 = feattool.extract_features(fpath1)
    kpts2, vecs2 = feattool.extract_features(fpath2)
    chip1 = gtool.imread(fpath1)
    chip2 = gtool.imread(fpath2)
    #chip1_shape = vt.gtool.open_image_size(fpath1)
    #chip2_shape = gtool.open_image_size(fpath2)
    #dlen_sqrd2 = chip2_shape[0] ** 2 + chip2_shape[1]
    #testtup = (rchip1, rchip2, kpts1, vecs1, kpts2, vecs2, dlen_sqrd2)
    import vtool as vt
    checks = 800
    flann_params = {
        'algorithm': 'kdtree',
        'trees': 8
    }
    #pseudo_max_dist_sqrd = (np.sqrt(2) * 512) ** 2
    pseudo_max_dist_sqrd = 2 * (512 ** 2)
    flann = vt.flann_cache(vecs1, flann_params=flann_params)
    import pyflann
    try:
        fx2_to_fx1, _fx2_to_dist = flann.nn_index(vecs2, num_neighbors=2, checks=checks)
    except pyflann.FLANNException:
        print('vecs1.shape = %r' % (vecs1.shape,))
        print('vecs2.shape = %r' % (vecs2.shape,))
        print('vecs1.dtype = %r' % (vecs1.dtype,))
        print('vecs2.dtype = %r' % (vecs2.dtype,))
        raise
    fx2_to_dist = np.divide(_fx2_to_dist, pseudo_max_dist_sqrd)
    fx2_to_ratio = np.divide(fx2_to_dist.T[0], fx2_to_dist.T[1])
    ratio_thresh = .625
    fx2_to_isvalid = fx2_to_ratio < ratio_thresh
    fx2_m = np.where(fx2_to_isvalid)[0]
    fx1_m = fx2_to_fx1.T[0].take(fx2_m)
    #fs_RAT = np.subtract(1.0, fx2_to_ratio.take(fx2_m))
    fm_RAT = np.vstack((fx1_m, fx2_m)).T
    fm = fm_RAT
    return chip1, chip2, kpts1, kpts2, fm
Ejemplo n.º 15
0
def get_dummy_test_vars1(fname1='easy1.png', fname2='easy2.png'):
    import utool as ut
    from vtool import image as gtool
    from vtool import features as feattool
    fpath1 = ut.grab_test_imgpath(fname1)
    fpath2 = ut.grab_test_imgpath(fname2)
    kpts1, vecs1 = feattool.extract_features(fpath1)
    kpts2, vecs2 = feattool.extract_features(fpath2)
    chip1 = gtool.imread(fpath1)
    chip2 = gtool.imread(fpath2)
    #chip1_shape = vt.gtool.open_image_size(fpath1)
    #chip2_shape = gtool.open_image_size(fpath2)
    #dlen_sqrd2 = chip2_shape[0] ** 2 + chip2_shape[1]
    #testtup = (rchip1, rchip2, kpts1, vecs1, kpts2, vecs2, dlen_sqrd2)
    import vtool as vt
    checks = 800
    flann_params = {'algorithm': 'kdtree', 'trees': 8}
    #pseudo_max_dist_sqrd = (np.sqrt(2) * 512) ** 2
    pseudo_max_dist_sqrd = 2 * (512**2)
    flann = vt.flann_cache(vecs1, flann_params=flann_params)
    import pyflann
    try:
        fx2_to_fx1, _fx2_to_dist = flann.nn_index(vecs2,
                                                  num_neighbors=2,
                                                  checks=checks)
    except pyflann.FLANNException:
        print('vecs1.shape = %r' % (vecs1.shape, ))
        print('vecs2.shape = %r' % (vecs2.shape, ))
        print('vecs1.dtype = %r' % (vecs1.dtype, ))
        print('vecs2.dtype = %r' % (vecs2.dtype, ))
        raise
    fx2_to_dist = np.divide(_fx2_to_dist, pseudo_max_dist_sqrd)
    fx2_to_ratio = np.divide(fx2_to_dist.T[0], fx2_to_dist.T[1])
    ratio_thresh = .625
    fx2_to_isvalid = fx2_to_ratio < ratio_thresh
    fx2_m = np.where(fx2_to_isvalid)[0]
    fx1_m = fx2_to_fx1.T[0].take(fx2_m)
    #fs_RAT = np.subtract(1.0, fx2_to_ratio.take(fx2_m))
    fm_RAT = np.vstack((fx1_m, fx2_m)).T
    fm = fm_RAT
    return chip1, chip2, kpts1, kpts2, fm
Ejemplo n.º 16
0
def simple_iterative_test():
    r"""
    CommandLine:
        python -m pyhesaff.tests.test_pyhesaff_simple_iterative --test-simple_iterative_test
        python -m pyhesaff.tests.test_pyhesaff_simple_iterative --test-simple_iterative_test --show

    Example:
        >>> # GUI_DOCTEST
        >>> from pyhesaff.tests.test_pyhesaff_simple_iterative import *  # NOQA
        >>> result = simple_iterative_test()
        >>> print(result)
        >>> ut.show_if_requested()
    """
    import pyhesaff
    fpath_list = [
        ut.grab_test_imgpath('lena.png'),
        ut.grab_test_imgpath('carl.jpg'),
        ut.grab_test_imgpath('grace.jpg'),
        ut.grab_test_imgpath('ada.jpg'),
    ]
    kpts_list = []

    for img_fpath in fpath_list:
        kpts, vecs = pyhesaff.detect_feats(img_fpath)
        print('img_fpath=%r' % img_fpath)
        print('kpts=%s' % (ut.truncate_str(repr(kpts)), ))
        print('vecs=%s' % (ut.truncate_str(repr(vecs)), ))
        assert len(kpts) == len(vecs)
        assert len(kpts) > 0, 'no keypoints were detected!'
        kpts_list.append(kpts)

    if ut.show_was_requested():
        import matplotlib as mpl
        from matplotlib import pyplot as plt
        fig = plt.figure()
        for i, fpath, kpts in enumerate(zip(fpath_list, kpts_list), start=1):
            ax = fig.add_subplot(2, 2, i)
            img = mpl.image.imread(fpath)
            plt.imshow(img)
            _xs, _ys = kpts.T[0:2]
            ax.plot(_xs, _ys, 'ro', alpha=.5)
Ejemplo n.º 17
0
def get_testdata_kpts(fname=None, with_vecs=False):
    if fname is None:
        kpts = get_dummy_kpts()
        vecs = (np.random.rand(len(kpts), 128) * 255).astype(np.uint8)
        # TODO: dummy vecs
    else:
        from vtool import features as feattool
        import utool as ut
        fpath = ut.grab_test_imgpath(fname)
        kpts, vecs = feattool.extract_features(fpath)
    if with_vecs:
        return kpts, vecs
    else:
        return kpts
Ejemplo n.º 18
0
def testdata_matcher(fname1='easy1.png', fname2='easy2.png'):
    """"
    fname1 = 'easy1.png'
    fname2 = 'hard3.png'

    annot1 = Annot(fpath1)
    annot2 = Annot(fpath2)
    """
    import utool as ut
    from vtool import image as gtool
    from vtool import features as feattool
    fpath1 = ut.grab_test_imgpath(fname1)
    fpath2 = ut.grab_test_imgpath(fname2)
    kpts1, vecs1 = feattool.extract_features(fpath1)
    kpts2, vecs2 = feattool.extract_features(fpath2)
    rchip1 = gtool.imread(fpath1)
    rchip2 = gtool.imread(fpath2)
    #chip1_shape = vt.gtool.open_image_size(fpath1)
    chip2_shape = gtool.open_image_size(fpath2)
    dlen_sqrd2 = chip2_shape[0]**2 + chip2_shape[1]**2
    testtup = (rchip1, rchip2, kpts1, vecs1, kpts2, vecs2, dlen_sqrd2)

    return testtup
Ejemplo n.º 19
0
def get_testdata_kpts(fname=None, with_vecs=False):
    if fname is None:
        kpts = get_dummy_kpts()
        vecs = (np.random.rand(len(kpts), 128) * 255).astype(np.uint8)
        # TODO: dummy vecs
    else:
        from vtool import features as feattool
        import utool as ut
        fpath = ut.grab_test_imgpath(fname)
        kpts, vecs = feattool.extract_features(fpath)
    if with_vecs:
        return kpts, vecs
    else:
        return kpts
Ejemplo n.º 20
0
def testdata_matcher(fname1="easy1.png", fname2="easy2.png"):
    """"
    fname1 = 'easy1.png'
    fname2 = 'hard3.png'

    annot1 = Annot(fpath1)
    annot2 = Annot(fpath2)
    """
    import utool as ut
    from vtool import image as gtool
    from vtool import features as feattool

    fpath1 = ut.grab_test_imgpath(fname1)
    fpath2 = ut.grab_test_imgpath(fname2)
    kpts1, vecs1 = feattool.extract_features(fpath1)
    kpts2, vecs2 = feattool.extract_features(fpath2)
    rchip1 = gtool.imread(fpath1)
    rchip2 = gtool.imread(fpath2)
    # chip1_shape = vt.gtool.open_image_size(fpath1)
    chip2_shape = gtool.open_image_size(fpath2)
    dlen_sqrd2 = chip2_shape[0] ** 2 + chip2_shape[1] ** 2
    testtup = (rchip1, rchip2, kpts1, vecs1, kpts2, vecs2, dlen_sqrd2)

    return testtup
Ejemplo n.º 21
0
def _test_buffered_generator_img():
    """
    Test for buffering image read calls

    CONCLUSIONS:
        Use buffer  when bgtime is bigger, but comparable to fgtime
        Use buffer  when fgtime < bgtime and (fgtime + bgtime) is large
        Use genrate when fgtime > bgtime and (fgtime + bgtime) is large
        Use serial when fgtime is bigger and all parts are comparitively small

        Buffer size should be roughly bgtime / fgtime

        Buffering also has a much more even and regular cpu demand.
        Also demands less cpus (I think)


    CommandLine:
        python -m utool.util_parallel --test-_test_buffered_generator_img

    Example:
        >>> import utool as ut
        >>> from utool.util_parallel import *  # NOQA
        >>> from utool.util_parallel import _test_buffered_generator_img  # NOQA
        >>> from utool.util_parallel import _test_buffered_generator_general2  # NOQA
        >>> _test_buffered_generator_img()
    """
    import utool as ut
    #import vtool as vt
    args = [ut.grab_test_imgpath(key) for key in ut.util_grabdata.get_valid_test_imgkeys()]
    #import cv2
    #import vtool as vt
    #func = cv2.imread
    #bffunc = vt.imread
    def sleepfunc_bufwin(x, niters=10):
        #import cv2
        for z in range(niters):
            # operate on image in some capacity
            x.cumsum()
        for z in range(2):
            x ** 1.1
        return x
    target_looptime = 60.0
    #target_looptime = 20.0
    #target_looptime = 10.0
    #target_looptime = 5.0
    serial_cheat = 1
    _test_buffered_generator_general2(bgfunc, args, sleepfunc_bufwin, target_looptime, serial_cheat, buffer_size=4, show_serial=False)
Ejemplo n.º 22
0
def test_average_contrast():
    import vtool as vt
    ut.get_valid_test_imgkeys()
    img_fpath_list = [ut.grab_test_imgpath(key) for key in ut.get_valid_test_imgkeys()]
    img_list = [vt.imread(img, grayscale=True) for img in img_fpath_list]
    avecontrast_list = np.array([compute_average_contrast(img) for img in img_list])
    import plottool as pt
    nCols = len(img_list)
    fnum = None
    if fnum is None:
        fnum = pt.next_fnum()
    pt.figure(fnum=fnum, pnum=(2, 1, 1))
    sortx = avecontrast_list.argsort()
    y_list = avecontrast_list[sortx]
    x_list = np.arange(0, nCols) + .5
    pt.plot(x_list, y_list, 'bo-')
    sorted_imgs = ut.take(img_list, sortx)
    for px, img in ut.ProgressIter(enumerate(sorted_imgs, start=1)):
        pt.imshow(img, fnum=fnum, pnum=(2, nCols, nCols + px))
Ejemplo n.º 23
0
def test_average_contrast():
    import vtool as vt
    ut.get_valid_test_imgkeys()
    img_fpath_list = [
        ut.grab_test_imgpath(key) for key in ut.get_valid_test_imgkeys()
    ]
    img_list = [vt.imread(img, grayscale=True) for img in img_fpath_list]
    avecontrast_list = np.array(
        [compute_average_contrast(img) for img in img_list])
    import plottool as pt
    nCols = len(img_list)
    fnum = None
    if fnum is None:
        fnum = pt.next_fnum()
    pt.figure(fnum=fnum, pnum=(2, 1, 1))
    sortx = avecontrast_list.argsort()
    y_list = avecontrast_list[sortx]
    x_list = np.arange(0, nCols) + .5
    pt.plot(x_list, y_list, 'bo-')
    sorted_imgs = ut.take(img_list, sortx)
    for px, img in ut.ProgressIter(enumerate(sorted_imgs, start=1)):
        pt.imshow(img, fnum=fnum, pnum=(2, nCols, nCols + px))
Ejemplo n.º 24
0
 def thumb_getter(id_, thumbsize=128):
     """ Thumb getters must conform to thumbtup structure """
     #print(id_)
     if id_ == 'doesnotexist.jpg':
         return None
         img_path = None
         img_size = (100, 100)
     else:
         img_path = ut.grab_test_imgpath(id_, verbose=False)
         img_size = vt.open_image_size(img_path)
     thumb_path = join(guitool_test_thumbdir, ut.hashstr(str(img_path)) + '.jpg')
     if id_ == 'carl.jpg':
         bbox_list = [(10, 10, 200, 200)]
         theta_list = [0]
     elif id_ == 'lena.png':
         #bbox_list = [(10, 10, 200, 200)]
         bbox_list = [None]
         theta_list = [None]
     else:
         bbox_list = []
         theta_list = []
     thumbtup = (thumb_path, img_path, img_size, bbox_list, theta_list)
     #print('thumbtup = %r' % (thumbtup,))
     return thumbtup
Ejemplo n.º 25
0
def testdata_ratio_matches(fname1='easy1.png', fname2='easy2.png', **kwargs):
    r"""
    Runs simple ratio-test matching between two images.
    Technically this is not dummy data.

    Args:
        fname1 (str):
        fname2 (str):

    Returns:
        tuple : matches_testtup

    CommandLine:
        python -m vtool.tests.dummy --test-testdata_ratio_matches
        python -m vtool.tests.dummy --test-testdata_ratio_matches --help
        python -m vtool.tests.dummy --test-testdata_ratio_matches --show
        python -m vtool.tests.dummy --test-testdata_ratio_matches --show --ratio_thresh=1.1 --rotation_invariance

        python -m vtool.tests.dummy --test-testdata_ratio_matches --show --ratio_thresh=.625 --rotation_invariance --fname1 easy1.png --fname2 easy3.png
        python -m vtool.tests.dummy --test-testdata_ratio_matches --show --ratio_thresh=.625 --no-rotation_invariance --fname1 easy1.png --fname2 easy3.png

    Example:
        >>> # ENABLE_DOCTEST
        >>> from vtool.tests.dummy import *  # NOQA
        >>> import vtool as vt
        >>> # build test data
        >>> fname1 = ut.get_argval('--fname1', type_=str, default='easy1.png')
        >>> fname2 = ut.get_argval('--fname2', type_=str, default='easy2.png')
        >>> # execute function
        >>> default_dict = vt.get_extract_features_default_params()
        >>> default_dict['ratio_thresh'] = .625
        >>> kwargs = ut.argparse_dict(default_dict)
        >>> matches_testtup = testdata_ratio_matches(fname1, fname2, **kwargs)
        >>> (kpts1, kpts2, fm_RAT, fs_RAT, rchip1, rchip2) = matches_testtup
        >>> if ut.show_was_requested():
        >>>     import plottool as pt
        >>>     pt.show_chipmatch2(rchip1, rchip2, kpts1, kpts2, fm_RAT, fs_RAT, ori=True)
        >>>     num_matches = len(fm_RAT)
        >>>     score_sum = sum(fs_RAT)
        >>>     title = 'Simple matches using the Lowe\'s ratio test'
        >>>     title += '\n num_matches=%r, score_sum=%.2f' % (num_matches, score_sum)
        >>>     pt.set_figtitle(title)
        >>>     pt.show_if_requested()
    """
    import utool as ut
    import vtool as vt
    from vtool import image as gtool
    from vtool import features as feattool
    import pyflann
    # Get params
    ratio_thresh = kwargs.get('ratio_thresh', .625)
    print('ratio_thresh=%r' % (ratio_thresh,))
    featkw = vt.get_extract_features_default_params()
    ut.updateif_haskey(featkw, kwargs)
    # Read Images
    fpath1 = ut.grab_test_imgpath(fname1)
    fpath2 = ut.grab_test_imgpath(fname2)
    # Extract Features
    kpts1, vecs1 = feattool.extract_features(fpath1, **featkw)
    kpts2, vecs2 = feattool.extract_features(fpath2, **featkw)
    rchip1 = gtool.imread(fpath1)
    rchip2 = gtool.imread(fpath2)
    # Run Algorithm
    def assign_nearest_neighbors(vecs1, vecs2, K=2):
        checks = 800
        flann_params = {
            'algorithm': 'kdtree',
            'trees': 8
        }
        #pseudo_max_dist_sqrd = (np.sqrt(2) * 512) ** 2
        pseudo_max_dist_sqrd = 2 * (512 ** 2)
        flann = vt.flann_cache(vecs1, flann_params=flann_params)
        try:
            fx2_to_fx1, _fx2_to_dist = flann.nn_index(vecs2, num_neighbors=K, checks=checks)
        except pyflann.FLANNException:
            print('vecs1.shape = %r' % (vecs1.shape,))
            print('vecs2.shape = %r' % (vecs2.shape,))
            print('vecs1.dtype = %r' % (vecs1.dtype,))
            print('vecs2.dtype = %r' % (vecs2.dtype,))
            raise
        fx2_to_dist = np.divide(_fx2_to_dist, pseudo_max_dist_sqrd)
        return fx2_to_fx1, fx2_to_dist

    def ratio_test(fx2_to_fx1, fx2_to_dist, ratio_thresh):
        fx2_to_ratio = np.divide(fx2_to_dist.T[0], fx2_to_dist.T[1])
        fx2_to_isvalid = fx2_to_ratio < ratio_thresh
        fx2_m = np.where(fx2_to_isvalid)[0]
        fx1_m = fx2_to_fx1.T[0].take(fx2_m)
        fs_RAT = np.subtract(1.0, fx2_to_ratio.take(fx2_m))
        fm_RAT = np.vstack((fx1_m, fx2_m)).T
        # return normalizer info as well
        fx1_m_normalizer = fx2_to_fx1.T[1].take(fx2_m)
        fm_norm_RAT = np.vstack((fx1_m_normalizer, fx2_m)).T
        return fm_RAT, fs_RAT, fm_norm_RAT

    # GET NEAREST NEIGHBORS
    fx2_to_fx1, fx2_to_dist = assign_nearest_neighbors(vecs1, vecs2, K=2)
    #fx2_m = np.arange(len(fx2_to_fx1))
    #fx1_m = fx2_to_fx1.T[0]
    #fm_ORIG = np.vstack((fx1_m, fx2_m)).T
    #fs_ORIG = fx2_to_dist.T[0]
    #fs_ORIG = 1 - np.divide(fx2_to_dist.T[0], fx2_to_dist.T[1])
    #np.ones(len(fm_ORIG))
    # APPLY RATIO TEST
    #ratio_thresh = .625
    fm_RAT, fs_RAT, fm_norm_RAT = ratio_test(fx2_to_fx1, fx2_to_dist, ratio_thresh)
    kpts1 = kpts1.astype(np.float64)
    kpts2 = kpts2.astype(np.float64)
    matches_testtup = (kpts1, kpts2, fm_RAT, fs_RAT, rchip1, rchip2)
    return matches_testtup
Ejemplo n.º 26
0
def segmentation_example():
    import vigra
    import opengm
    import sklearn
    import sklearn.mixture
    import numpy as np
    from vigra import graphs
    import matplotlib as mpl
    import plottool as pt

    pt.ensure_pylab_qt4()

    # load image and convert to LAB
    img_fpath = str(ut.grab_test_imgpath(str('lena.png')))
    img = vigra.impex.readImage(img_fpath)
    imgLab = vigra.colors.transform_RGB2Lab(img)

    superpixelDiameter = 15  # super-pixel size
    slicWeight = 15.0  # SLIC color - spatial weight
    labels, nseg = vigra.analysis.slicSuperpixels(imgLab, slicWeight,
                                                  superpixelDiameter)
    labels = vigra.analysis.labelImage(labels) - 1

    # get 2D grid graph and RAG
    gridGraph = graphs.gridGraph(img.shape[0:2])
    rag = graphs.regionAdjacencyGraph(gridGraph, labels)

    # Node Features
    nodeFeatures = rag.accumulateNodeFeatures(imgLab)
    nodeFeaturesImg = rag.projectNodeFeaturesToGridGraph(nodeFeatures)
    nodeFeaturesImg = vigra.taggedView(nodeFeaturesImg, "xyc")
    nodeFeaturesImgRgb = vigra.colors.transform_Lab2RGB(nodeFeaturesImg)

    nCluster = 5
    g = sklearn.mixture.GMM(n_components=nCluster)
    g.fit(nodeFeatures[:, :])
    clusterProb = g.predict_proba(nodeFeatures)
    # https://github.com/opengm/opengm/blob/master/src/interfaces/python/examples/tutorial/Irregular%20Factor%20Graphs.ipynb
    # https://github.com/opengm/opengm/blob/master/src/interfaces/python/examples/tutorial/Hard%20and%20Soft%20Constraints.ipynb
    clusterProbImg = rag.projectNodeFeaturesToGridGraph(
        clusterProb.astype(np.float32))
    clusterProbImg = vigra.taggedView(clusterProbImg, "xyc")

    ndim_data = clusterProbImg.reshape((-1, nCluster))
    pca = sklearn.decomposition.PCA(n_components=3)
    print(ndim_data.shape)
    pca.fit(ndim_data)
    print(ut.repr2(pca.explained_variance_ratio_, precision=2))
    oldshape = (clusterProbImg.shape[0:2] + (-1, ))
    clusterProgImg3 = pca.transform(ndim_data).reshape(oldshape)
    print(clusterProgImg3.shape)

    # graphical model with as many variables
    # as superpixels, each has 3 states
    gm = opengm.gm(np.ones(rag.nodeNum, dtype=opengm.label_type) * nCluster)
    # convert probabilites to energies
    probs = np.clip(clusterProb, 0.00001, 0.99999)
    costs = -1.0 * np.log(probs)
    # add ALL unaries AT ONCE
    fids = gm.addFunctions(costs)
    gm.addFactors(fids, np.arange(rag.nodeNum))
    # add a potts function
    beta = 40.0  # strength of potts regularizer
    regularizer = opengm.pottsFunction([nCluster] * 2, 0.0, beta)
    fid = gm.addFunction(regularizer)
    # get variable indices of adjacent superpixels
    # - or "u" and "v" node id's for edges
    uvIds = rag.uvIds()
    uvIds = np.sort(uvIds, axis=1)
    # add all second order factors at once
    gm.addFactors(fid, uvIds)

    # get super-pixels with slic on LAB image
    Inf = opengm.inference.BeliefPropagation
    parameter = opengm.InfParam(steps=10, damping=0.5, convergenceBound=0.001)
    inf = Inf(gm, parameter=parameter)

    class PyCallback(object):
        def __init__(self, ):
            self.labels = []

        def begin(self, inference):
            print("begin of inference")

        def end(self, inference):
            self.labels.append(inference.arg())

        def visit(self, inference):
            gm = inference.gm()
            labelVector = inference.arg()
            print("energy  %r" % (gm.evaluate(labelVector), ))
            self.labels.append(labelVector)

    callback = PyCallback()
    visitor = inf.pythonVisitor(callback, visitNth=1)

    inf.infer(visitor)

    pt.imshow(clusterProgImg3.swapaxes(0, 1))
    # plot superpixels
    cmap = mpl.colors.ListedColormap(np.random.rand(nseg, 3))
    pt.imshow(labels.swapaxes(0, 1).squeeze(), cmap=cmap)
    pt.imshow(nodeFeaturesImgRgb)

    cmap = mpl.colors.ListedColormap(np.random.rand(nCluster, 3))
    for arg in callback.labels:
        arg = vigra.taggedView(arg, "n")
        argImg = rag.projectNodeFeaturesToGridGraph(arg.astype(np.uint32))
        argImg = vigra.taggedView(argImg, "xy")
        # plot superpixels
        pt.imshow(argImg.swapaxes(0, 1).squeeze(), cmap=cmap)
Ejemplo n.º 27
0
def test_mser():
    import cv2
    import vtool as vt
    import plottool as pt
    import numpy as np
    pt.qt4ensure()
    class Keypoints(ut.NiceRepr):
        """
        Convinence class for dealing with keypoints
        """
        def __init__(self, kparr, info=None):
            self.kparr = kparr
            if info is None:
                info = {}
            self.info = info

        def add_info(self, key, val):
            self.info[key] = val

        def __nice__(self):
            return ' ' + str(len(self.kparr))

        @property
        def scale(self):
            return vt.get_scales(self.kparr)

        @property
        def eccentricity(self):
            return vt.get_kpts_eccentricity(self.kparr)

        def compress(self, flags, inplace=False):
            subarr = self.kparr.compress(flags, axis=0)
            info = {key: ut.compress(val, flags) for key, val in self.info.items()}
            return Keypoints(subarr, info)

    img_fpath = ut.grab_test_imgpath(ut.get_argval('--fname', default='zebra.png'))
    imgBGR = vt.imread(img_fpath)
    imgGray = cv2.cvtColor(imgBGR, cv2.COLOR_BGR2GRAY)
    # http://docs.opencv.org/master/d3/d28/classcv_1_1MSER.html#gsc.tab=0
    # http://stackoverflow.com/questions/17647500/exact-meaning-of-the-parameters-given-to-initialize-mser-in-opencv-2-4-x
    factory = cv2.MSER_create
    img_area = np.product(np.array(vt.get_size(imgGray)))
    _max_area = (img_area // 10)
    _delta = 8
    _min_diversity = .5

    extractor = factory(_delta=_delta, _max_area=_max_area, _min_diversity=_min_diversity)
    # bboxes are x,y,w,h
    regions, bboxes = extractor.detectRegions(imgGray)
    # ellipse definition from [Fitzgibbon95]
    # http://www.bmva.org/bmvc/1995/bmvc-95-050.pdf p518
    # ell = [c_x, c_y, R_x, R_y, theta]
    # (cx, cy) = conic center
    # Rx and Ry = conic radii
    # theta is the counterclockwise angle
    fitz_ellipses = [cv2.fitEllipse(mser) for mser in regions]

    # http://answers.opencv.org/question/19015/how-to-use-mser-in-python/
    #hulls = [cv2.convexHull(p.reshape(-1, 1, 2)) for p in regions]
    #hull_ells = [cv2.fitEllipse(hull[:, 0]) for hull in hulls]
    invVR_mats = []
    for ell in fitz_ellipses:
        ((cx, cy), (dx, dy), degrees) = ell
        theta = np.radians(degrees)  # opencv lives in radians
        # Convert diameter to radians
        rx = dx / 2
        ry = dy / 2
        S = vt.scale_mat3x3(rx, ry)
        T = vt.translation_mat3x3(cx, cy)
        R = vt.rotation_mat3x3(theta)
        invVR = T.dot(R.dot(S))
        invVR_mats.append(invVR)
    invVR_mats = np.array(invVR_mats)
    #_oris = vt.get_invVR_mats_oris(invVR_mats)
    kpts2_ = vt.flatten_invV_mats_to_kpts(invVR_mats)

    self = Keypoints(kpts2_)
    self.add_info('regions', regions)
    flags = (self.eccentricity < .9)
    #flags = self.scale < np.mean(self.scale)
    #flags = self.scale < np.median(self.scale)
    self = self.compress(flags)
    import plottool as pt
    #pt.interact_keypoints.ishow_keypoints(imgBGR, self.kparr, None, ell_alpha=.4, color='distinct', fnum=2)
    #import plottool as pt
    vis = imgBGR.copy()

    for region in self.info['regions']:
        vis[region.T[1], region.T[0], :] = 0

    #regions, bbox = mser.detectRegions(gray)
    #hulls = [cv2.convexHull(p.reshape(-1, 1, 2)) for p in self.info['regions']]
    #cv2.polylines(vis, hulls, 1, (0, 255, 0))
    #for region in self.info['regions']:
    #    ell = cv2.fitEllipse(region)
    #    cv2.ellipse(vis, ell, (255))
    pt.interact_keypoints.ishow_keypoints(vis, self.kparr, None, ell_alpha=.4, color='distinct', fnum=2)
    #pt.imshow(vis, fnum=2)
    pt.update()

    #extractor = extract_factory['DAISY']()

    #desc_type_to_dtype = {
    #    cv2.CV_8U: np.uint8,
    #    cv2.CV_8s: np.uint,
    #}
    #def alloc_desc(extractor):
    #    desc_type = extractor.descriptorType()
    #    desc_size = extractor.descriptorSize()
    #    dtype = desc_type_to_dtype[desc_type]
    #    shape = (len(cv2_kpts), desc_size)
    #    desc = np.empty(shape, dtype=dtype)
    #    return desc

    #ut.search_module(cv2, 'array', recursive=True)
    #ut.search_module(cv2, 'freak', recursive=True)
    #ut.search_module(cv2, 'out', recursive=True)

    #cv2_kpts = cv2_kpts[0:2]

    #for key, factory in just_desc_factory_.items():
    #    extractor = factory()
    #    desc = alloc_desc(extractor)
    #    desc = extractor.compute(imgGray, cv2_kpts)
    #    feats[key] = (desc,)
    #    #extractor.compute(imgGray, cv2_kpts, desc)
    #    pass
    #kpts = np.array(list(map(from_cv2_kpts, cv2_kpts)))

    #orb = cv2.ORB()
    #kp1, des1 = orb.detectAndCompute(imgGray, None)
    #blober = cv2.SimpleBlobDetector_create()
    #haris_kpts = cv2.cornerHarris(imgGray, 2, 3, 0.04)

    #[name for name in dir(cv2) if 'mat' in name.lower()]
    #[name for name in dir(cv2.xfeatures2d) if 'desc' in name.lower()]

    #[name for name in dir(cv2) if 'detect' in name.lower()]
    #[name for name in dir(cv2) if 'extract' in name.lower()]
    #[name for name in dir(cv2) if 'ellip' in name.lower()]

    #sift = cv2.xfeatures2d.SIFT_create()
    #cv2_kpts = sift.detect(imgGray)
    #desc = sift.compute(imgGray, cv2_kpts)[1]

    #freak = cv2.xfeatures2d.FREAK_create()
    #cv2_kpts = freak.detect(imgGray)
    #desc = freak.compute(imgGray, cv2_kpts)[1]
    pass
Ejemplo n.º 28
0
 def thumb_getter(id_, thumbsize=128):
     """ Thumb getters must conform to thumbtup structure """
     #print(id_)
     return ut.grab_test_imgpath(id_)
Ejemplo n.º 29
0
def detect_opencv_keypoints():
    import cv2
    import vtool as vt
    import numpy as np  # NOQA

    #img_fpath = ut.grab_test_imgpath(ut.get_argval('--fname', default='lena.png'))
    img_fpath = ut.grab_test_imgpath(
        ut.get_argval('--fname', default='zebra.png'))
    imgBGR = vt.imread(img_fpath)
    imgGray = cv2.cvtColor(imgBGR, cv2.COLOR_BGR2GRAY)

    def from_cv2_kpts(cv2_kp):
        kp = (cv2_kp.pt[0], cv2_kp.pt[1], cv2_kp.size, 0, cv2_kp.size,
              cv2_kp.angle)
        return kp

    print('\n'.join(ut.search_module(cv2, 'create', recursive=True)))

    detect_factory = {
        #'BLOB': cv2.SimpleBlobDetector_create,
        #'HARRIS' : HarrisWrapper.create,
        #'SIFT': cv2.xfeatures2d.SIFT_create,  # really DoG
        'SURF': cv2.xfeatures2d.SURF_create,  # really harris corners
        'MSER': cv2.MSER_create,
        #'StarDetector_create',
    }

    extract_factory = {
        'SIFT': cv2.xfeatures2d.SIFT_create,
        'SURF': cv2.xfeatures2d.SURF_create,
        #'DAISY': cv2.xfeatures2d.DAISY_create,
        'FREAK': cv2.xfeatures2d.FREAK_create,
        #'LATCH': cv2.xfeatures2d.LATCH_create,
        #'LUCID': cv2.xfeatures2d.LUCID_create,
        #'ORB': cv2.ORB_create,
    }
    mask = None

    type_to_kpts = {}
    type_to_desc = {}

    key = 'BLOB'
    key = 'MSER'

    for key in detect_factory.keys():
        factory = detect_factory[key]
        extractor = factory()

        # For MSERS need to adapt shape and then convert into a keypoint repr
        if hasattr(extractor, 'detectRegions'):
            # bboxes are x,y,w,h
            regions, bboxes = extractor.detectRegions(imgGray)
            # ellipse definition from [Fitzgibbon95]
            # http://www.bmva.org/bmvc/1995/bmvc-95-050.pdf p518
            # ell = [c_x, c_y, R_x, R_y, theta]
            # (cx, cy) = conic center
            # Rx and Ry = conic radii
            # theta is the counterclockwise angle
            fitz_ellipses = [cv2.fitEllipse(mser) for mser in regions]

            # http://answers.opencv.org/question/19015/how-to-use-mser-in-python/
            #hulls = [cv2.convexHull(p.reshape(-1, 1, 2)) for p in regions]
            #hull_ells = [cv2.fitEllipse(hull[:, 0]) for hull in hulls]
            kpts_ = []
            for ell in fitz_ellipses:
                ((cx, cy), (rx, ry), degrees) = ell
                theta = np.radians(degrees)  # opencv lives in radians
                S = vt.scale_mat3x3(rx, ry)
                T = vt.translation_mat3x3(cx, cy)
                R = vt.rotation_mat3x3(theta)
                #R = np.eye(3)
                invVR = T.dot(R.dot(S))
                kpt = vt.flatten_invV_mats_to_kpts(np.array([invVR]))[0]
                kpts_.append(kpt)
            kpts_ = np.array(kpts_)

        tt = ut.tic('Computing %r keypoints' % (key, ))
        try:
            cv2_kpts = extractor.detect(imgGray, mask)
        except Exception as ex:
            ut.printex(ex,
                       'Failed to computed %r keypoints' % (key, ),
                       iswarning=True)
            pass
        else:
            ut.toc(tt)
            type_to_kpts[key] = cv2_kpts

    print(list(type_to_kpts.keys()))
    print(ut.depth_profile(list(type_to_kpts.values())))
    print('type_to_kpts = ' + ut.repr3(type_to_kpts, truncate=True))

    cv2_kpts = type_to_kpts['MSER']
    kp = cv2_kpts[0]  # NOQA
    #cv2.fitEllipse(cv2_kpts[0])
    cv2_kpts = type_to_kpts['SURF']

    for key in extract_factory.keys():
        factory = extract_factory[key]
        extractor = factory()
        tt = ut.tic('Computing %r descriptors' % (key, ))
        try:
            filtered_cv2_kpts, desc = extractor.compute(imgGray, cv2_kpts)
        except Exception as ex:
            ut.printex(ex,
                       'Failed to computed %r descriptors' % (key, ),
                       iswarning=True)
            pass
        else:
            ut.toc(tt)
            type_to_desc[key] = desc

    print(list(type_to_desc.keys()))
    print(ut.depth_profile(list(type_to_desc.values())))
    print('type_to_desc = ' + ut.repr3(type_to_desc, truncate=True))
Ejemplo n.º 30
0
def test_ori_extract_main():
    """
    CommandLine:
        python -m pyhesaff.tests.test_exhaustive_ori_extract --test-test_ori_extract_main
        python -m pyhesaff.tests.test_exhaustive_ori_extract --test-test_ori_extract_main --show

    Example:
        >>> # GUI_DOCTEST
        >>> from pyhesaff.tests.test_exhaustive_ori_extract import *  # NOQA
        >>> test_ori_extract_main()
        >>> ut.show_if_requested()
    """
    import pyhesaff
    from plottool import draw_func2 as df2
    from plottool.viz_keypoints import show_keypoints
    import vtool  # NOQA
    import vtool.image as gtool
    import vtool.keypoint as ktool
    np.set_printoptions(threshold=5000, linewidth=5000, precision=3)
    # Read data
    print('[rotinvar] loading test data')

    img_fpath = ut.grab_test_imgpath('jeff.png')
    imgL = gtool.cvt_BGR2L(gtool.imread(img_fpath))
    detect_kw0 = {}
    detect_kw1 = {'scale_min': 20, 'scale_max': 100}
    detect_kw2 = {'scale_min': 40, 'scale_max': 60}
    detect_kw3 = {'scale_min': 45, 'scale_max': 49}

    # Remove skew and anisotropic scaling
    def force_isotropic(kpts):
        kpts_ = kpts.copy()
        kpts_[:, ktool.SKEW_DIM] = 0
        kpts_[:, ktool.SCAX_DIM] = kpts_[:, ktool.SCAY_DIM]
        vecs_ = pyhesaff.extract_vecs(img_fpath, kpts_)
        return kpts_, vecs_

    def force_ori(kpts, ori):
        kpts_ = kpts.copy()
        kpts_[:, ktool.ORI_DIM] = ori
        vecs_ = pyhesaff.extract_vecs(img_fpath, kpts_)
        return kpts_, vecs_

    def shift_kpts(kpts, x, y):
        kpts_ = kpts.copy()
        kpts_[:, ktool.XDIM] += x
        kpts_[:, ktool.YDIM] += y
        vecs_ = pyhesaff.extract_vecs(img_fpath, kpts_)
        return kpts_, vecs_

    # --- Experiment ---
    kpts0, vecs0 = double_detect(img_fpath, **detect_kw0)
    kpts1, vecs1 = double_detect(img_fpath, **detect_kw1)
    kpts2, vecs2 = double_detect(img_fpath, **detect_kw2)
    #
    kpts3, vecs3 = double_detect(img_fpath, **detect_kw3)
    kpts4, vecs4 = force_isotropic(kpts3)
    kpts5, vecs5 = force_ori(kpts3, 1.45)
    kpts6, vecs6 = shift_kpts(kpts5, -60, -50)
    kpts7, vecs7 = force_ori(kpts6, 0)
    kpts8, vecs8 = force_ori(kpts7, 2.40)
    kpts9, vecs9 = force_ori(kpts8, 5.40)
    kpts10, vecs10 = force_ori(kpts9, 10.40)

    # --- Print ---

    # ---- Draw ----
    nRow, nCol = 1, 2
    df2.figure(fnum=2, doclf=True, docla=True)
    df2.figure(fnum=1, doclf=True, docla=True)

    def show_kpts_(fnum, pnum, kpts, vecs, title):
        print('--------')
        print('show_kpts: %r.%r' % (fnum, pnum))
        print('kpts  = %r' % (kpts, ))
        print('scales = %r' % ktool.get_scales(kpts))
        # FIXME: this exists in ibeis. move to vtool
        #dev_consistency.check_vecs(vecs3)

        show_keypoints(imgL,
                       kpts,
                       sifts=vecs,
                       pnum=pnum,
                       rect=True,
                       ori=True,
                       fnum=fnum,
                       title=title,
                       ell_alpha=1)

    show_kpts_(1, (nRow, nCol, 1), kpts3, vecs3, 'kpts3: original')
    show_kpts_(1, (nRow, nCol, 2), kpts4, vecs4, 'kpts4: isotropic + redetect')
    show_kpts_(2, (2, 3, 1), kpts5, vecs5, 'kpts5: force_ori + redetect')
    show_kpts_(2, (2, 3, 2), kpts6, vecs6, 'kpts6: shift')
    show_kpts_(2, (2, 3, 3), kpts7, vecs7, 'kpts7: shift + reorient')
    show_kpts_(2, (2, 3, 4), kpts8, vecs8, 'kpts8: shift + reorient')
    show_kpts_(2, (2, 3, 5), kpts9, vecs9, 'kpts9: reorient')
    show_kpts_(2, (2, 3, 6), kpts10, vecs10, 'kpts10: reorient')
Ejemplo n.º 31
0
def test_cv2_flann():
    """
    Ignore:
        [name for name in dir(cv2) if 'create' in name.lower()]
        [name for name in dir(cv2) if 'stereo' in name.lower()]

        ut.grab_zipped_url('https://priithon.googlecode.com/archive/a6117f5e81ec00abcfb037f0f9da2937bb2ea47f.tar.gz', download_dir='.')
    """
    import cv2
    from vtool.tests import dummy
    import plottool as pt
    import vtool as vt
    img1 = vt.imread(ut.grab_test_imgpath('easy1.png'))
    img2 = vt.imread(ut.grab_test_imgpath('easy2.png'))

    stereo = cv2.StereoBM_create(numDisparities=16, blockSize=15)
    disparity = stereo.compute(img1, img2)
    pt.imshow(disparity)
    pt.show()

    #cv2.estima

    flow = cv2.createOptFlow_DualTVL1()
    img1, img2 = vt.convert_image_list_colorspace([img1, img2],
                                                  'gray',
                                                  src_colorspace='bgr')
    img2 = vt.resize(img2, img1.shape[0:2][::-1])
    out = img1.copy()
    flow.calc(img1, img2, out)

    orb = cv2.ORB_create()
    kp1, vecs1 = orb.detectAndCompute(img1, None)
    kp2, vecs2 = orb.detectAndCompute(img2, None)

    detector = cv2.FeatureDetector_create("SIFT")
    descriptor = cv2.DescriptorExtractor_create("SIFT")

    skp = detector.detect(img1)
    skp, sd = descriptor.compute(img1, skp)

    tkp = detector.detect(img2)
    tkp, td = descriptor.compute(img2, tkp)

    out = img1.copy()
    cv2.drawKeypoints(img1, kp1, outImage=out)
    pt.imshow(out)

    vecs1 = dummy.testdata_dummy_sift(10)
    vecs2 = dummy.testdata_dummy_sift(10)  # NOQA

    FLANN_INDEX_KDTREE = 0  # bug: flann enums are missing
    #flann_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=4)
    index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
    search_params = dict(checks=50)  # or pass empty dictionary
    flann = cv2.FlannBasedMatcher(index_params, search_params)  # NOQA

    cv2.flann.Index(vecs1, index_params)

    #cv2.FlannBasedMatcher(flann_params)

    cv2.flann.Index(vecs1, flann_params)  # NOQA
Ejemplo n.º 32
0
def test_mser():
    import cv2
    import vtool as vt
    import plottool as pt
    import numpy as np
    pt.qt4ensure()

    class Keypoints(ut.NiceRepr):
        """
        Convinence class for dealing with keypoints
        """
        def __init__(self, kparr, info=None):
            self.kparr = kparr
            if info is None:
                info = {}
            self.info = info

        def add_info(self, key, val):
            self.info[key] = val

        def __nice__(self):
            return ' ' + str(len(self.kparr))

        @property
        def scale(self):
            return vt.get_scales(self.kparr)

        @property
        def eccentricity(self):
            return vt.get_kpts_eccentricity(self.kparr)

        def compress(self, flags, inplace=False):
            subarr = self.kparr.compress(flags, axis=0)
            info = {
                key: ut.compress(val, flags)
                for key, val in self.info.items()
            }
            return Keypoints(subarr, info)

    img_fpath = ut.grab_test_imgpath(
        ut.get_argval('--fname', default='zebra.png'))
    imgBGR = vt.imread(img_fpath)
    imgGray = cv2.cvtColor(imgBGR, cv2.COLOR_BGR2GRAY)
    # http://docs.opencv.org/master/d3/d28/classcv_1_1MSER.html#gsc.tab=0
    # http://stackoverflow.com/questions/17647500/exact-meaning-of-the-parameters-given-to-initialize-mser-in-opencv-2-4-x
    factory = cv2.MSER_create
    img_area = np.product(np.array(vt.get_size(imgGray)))
    _max_area = (img_area // 10)
    _delta = 8
    _min_diversity = .5

    extractor = factory(_delta=_delta,
                        _max_area=_max_area,
                        _min_diversity=_min_diversity)
    # bboxes are x,y,w,h
    regions, bboxes = extractor.detectRegions(imgGray)
    # ellipse definition from [Fitzgibbon95]
    # http://www.bmva.org/bmvc/1995/bmvc-95-050.pdf p518
    # ell = [c_x, c_y, R_x, R_y, theta]
    # (cx, cy) = conic center
    # Rx and Ry = conic radii
    # theta is the counterclockwise angle
    fitz_ellipses = [cv2.fitEllipse(mser) for mser in regions]

    # http://answers.opencv.org/question/19015/how-to-use-mser-in-python/
    #hulls = [cv2.convexHull(p.reshape(-1, 1, 2)) for p in regions]
    #hull_ells = [cv2.fitEllipse(hull[:, 0]) for hull in hulls]
    invVR_mats = []
    for ell in fitz_ellipses:
        ((cx, cy), (dx, dy), degrees) = ell
        theta = np.radians(degrees)  # opencv lives in radians
        # Convert diameter to radians
        rx = dx / 2
        ry = dy / 2
        S = vt.scale_mat3x3(rx, ry)
        T = vt.translation_mat3x3(cx, cy)
        R = vt.rotation_mat3x3(theta)
        invVR = T.dot(R.dot(S))
        invVR_mats.append(invVR)
    invVR_mats = np.array(invVR_mats)
    #_oris = vt.get_invVR_mats_oris(invVR_mats)
    kpts2_ = vt.flatten_invV_mats_to_kpts(invVR_mats)

    self = Keypoints(kpts2_)
    self.add_info('regions', regions)
    flags = (self.eccentricity < .9)
    #flags = self.scale < np.mean(self.scale)
    #flags = self.scale < np.median(self.scale)
    self = self.compress(flags)
    import plottool as pt
    #pt.interact_keypoints.ishow_keypoints(imgBGR, self.kparr, None, ell_alpha=.4, color='distinct', fnum=2)
    #import plottool as pt
    vis = imgBGR.copy()

    for region in self.info['regions']:
        vis[region.T[1], region.T[0], :] = 0

    #regions, bbox = mser.detectRegions(gray)
    #hulls = [cv2.convexHull(p.reshape(-1, 1, 2)) for p in self.info['regions']]
    #cv2.polylines(vis, hulls, 1, (0, 255, 0))
    #for region in self.info['regions']:
    #    ell = cv2.fitEllipse(region)
    #    cv2.ellipse(vis, ell, (255))
    pt.interact_keypoints.ishow_keypoints(vis,
                                          self.kparr,
                                          None,
                                          ell_alpha=.4,
                                          color='distinct',
                                          fnum=2)
    #pt.imshow(vis, fnum=2)
    pt.update()

    #extractor = extract_factory['DAISY']()

    #desc_type_to_dtype = {
    #    cv2.CV_8U: np.uint8,
    #    cv2.CV_8s: np.uint,
    #}
    #def alloc_desc(extractor):
    #    desc_type = extractor.descriptorType()
    #    desc_size = extractor.descriptorSize()
    #    dtype = desc_type_to_dtype[desc_type]
    #    shape = (len(cv2_kpts), desc_size)
    #    desc = np.empty(shape, dtype=dtype)
    #    return desc

    #ut.search_module(cv2, 'array', recursive=True)
    #ut.search_module(cv2, 'freak', recursive=True)
    #ut.search_module(cv2, 'out', recursive=True)

    #cv2_kpts = cv2_kpts[0:2]

    #for key, factory in just_desc_factory_.items():
    #    extractor = factory()
    #    desc = alloc_desc(extractor)
    #    desc = extractor.compute(imgGray, cv2_kpts)
    #    feats[key] = (desc,)
    #    #extractor.compute(imgGray, cv2_kpts, desc)
    #    pass
    #kpts = np.array(list(map(from_cv2_kpts, cv2_kpts)))

    #orb = cv2.ORB()
    #kp1, des1 = orb.detectAndCompute(imgGray, None)
    #blober = cv2.SimpleBlobDetector_create()
    #haris_kpts = cv2.cornerHarris(imgGray, 2, 3, 0.04)

    #[name for name in dir(cv2) if 'mat' in name.lower()]
    #[name for name in dir(cv2.xfeatures2d) if 'desc' in name.lower()]

    #[name for name in dir(cv2) if 'detect' in name.lower()]
    #[name for name in dir(cv2) if 'extract' in name.lower()]
    #[name for name in dir(cv2) if 'ellip' in name.lower()]

    #sift = cv2.xfeatures2d.SIFT_create()
    #cv2_kpts = sift.detect(imgGray)
    #desc = sift.compute(imgGray, cv2_kpts)[1]

    #freak = cv2.xfeatures2d.FREAK_create()
    #cv2_kpts = freak.detect(imgGray)
    #desc = freak.compute(imgGray, cv2_kpts)[1]
    pass
Ejemplo n.º 33
0
def rhombicuboctahedron():
    import vtk

    # First, you need to store the vertex locations.

    import numpy as np

    fu = 1  # full unit
    hu = 0.5  # half unit
    d = np.sqrt((fu**2) / 2)  # diag
    hh = hu + d  # half height

    # left view faces us

    import utool as ut
    import six
    import itertools

    counter = ut.partial(six.next, itertools.count(0))

    vertex_locations = vtk.vtkPoints()
    vertex_locations.SetNumberOfPoints(24)

    p1, p2, p3 = np.array([(-hu, -hu, hh), (hu, -hu, hh), (hu, hu, hh),
                           (-hu, hu, hh)]).T
    plist = [p1, p2, p3]

    # three of the six main faces
    # perms = list(itertools.permutations((0, 1, 2), 3))
    perms = [(0, 1, 2), (0, 2, 1), (2, 0, 1)]

    vertex_array = []

    # VERTEXES
    # left, up, back
    vplist = ['L', 'U', 'B', 'R', 'D', 'F']
    vpdict = {}
    print('perms = %r' % (perms, ))
    for x in range(3):
        vp = vplist[x]
        p = np.vstack(ut.take(plist, perms[x])).T
        counts = [counter() for z in range(4)]
        vpdict[vp] = counts
        vertex_array.extend(p.tolist())
        vertex_locations.SetPoint(counts[0], p[0])
        vertex_locations.SetPoint(counts[1], p[1])
        vertex_locations.SetPoint(counts[2], p[2])
        vertex_locations.SetPoint(counts[3], p[3])

    # three more of the six main faces
    perms = [(0, 1, 2), (0, 2, 1), (2, 0, 1)]
    plist[-1] = -plist[-1]
    # right, down, front
    print('perms = %r' % (perms, ))
    for x in range(3):
        p = np.vstack(ut.take(plist, perms[x])).T
        counts = [counter() for z in range(4)]
        vp = vplist[x + 3]
        vpdict[vp] = counts
        vertex_array.extend(p.tolist())
        vertex_locations.SetPoint(counts[0], p[0])
        vertex_locations.SetPoint(counts[1], p[1])
        vertex_locations.SetPoint(counts[2], p[2])
        vertex_locations.SetPoint(counts[3], p[3])

    pd = vtk.vtkPolyData()
    pd.SetPoints(vertex_locations)

    polygon_faces = vtk.vtkCellArray()

    face_dict = {
        'L': [vpdict['L'][0], vpdict['L'][1], vpdict['L'][2], vpdict['L'][3]],
        'D': [vpdict['D'][0], vpdict['D'][1], vpdict['D'][2], vpdict['D'][3]],
        'U': [vpdict['U'][0], vpdict['U'][1], vpdict['U'][2], vpdict['U'][3]],
        'F': [vpdict['F'][0], vpdict['F'][1], vpdict['F'][2], vpdict['F'][3]],
        'R': [vpdict['R'][0], vpdict['R'][1], vpdict['R'][2], vpdict['R'][3]],
        'B': [vpdict['B'][0], vpdict['B'][1], vpdict['B'][2], vpdict['B'][3]],
        'FL': [vpdict['L'][0], vpdict['L'][3], vpdict['F'][2], vpdict['F'][3]],
        'BL': [vpdict['L'][1], vpdict['L'][2], vpdict['B'][2], vpdict['B'][3]],
        'UL': [vpdict['L'][2], vpdict['L'][3], vpdict['U'][3], vpdict['U'][2]],
        'DL': [vpdict['L'][0], vpdict['L'][1], vpdict['D'][2], vpdict['D'][3]],
        'UFL': [vpdict['L'][3], vpdict['F'][2], vpdict['U'][3]],
        'DFL': [vpdict['L'][0], vpdict['F'][3], vpdict['D'][3]],
        'UBL': [vpdict['L'][2], vpdict['B'][2], vpdict['U'][2]],
        'DBL': [vpdict['L'][1], vpdict['B'][3], vpdict['D'][2]],
        'UFR': [vpdict['R'][3], vpdict['F'][1], vpdict['U'][0]],
        'DFR': [vpdict['R'][0], vpdict['F'][0], vpdict['D'][0]],
        'UBR': [vpdict['R'][2], vpdict['B'][1], vpdict['U'][1]],
        'DBR': [vpdict['R'][1], vpdict['B'][0], vpdict['D'][1]],
        'FR': [vpdict['R'][3], vpdict['R'][0], vpdict['F'][0], vpdict['F'][1]],
        'BR': [vpdict['R'][2], vpdict['R'][1], vpdict['B'][0], vpdict['B'][1]],
        'UR': [vpdict['R'][3], vpdict['R'][2], vpdict['U'][1], vpdict['U'][0]],
        'DR': [vpdict['R'][1], vpdict['R'][0], vpdict['D'][0], vpdict['D'][1]],
        'DF': [vpdict['F'][0], vpdict['F'][3], vpdict['D'][3], vpdict['D'][0]],
        'DB': [vpdict['B'][3], vpdict['B'][0], vpdict['D'][1], vpdict['D'][2]],
        'UF': [vpdict['F'][1], vpdict['F'][2], vpdict['U'][3], vpdict['U'][0]],
        'UB': [vpdict['B'][2], vpdict['B'][1], vpdict['U'][1], vpdict['U'][2]],
    }

    for key, vert_ids in face_dict.items():
        # if key != 'L':
        #    continue
        if len(vert_ids) == 4:
            q = vtk.vtkQuad()
        else:
            q = vtk.vtkTriangle()
        for count, idx in enumerate(vert_ids):
            q.GetPointIds().SetId(count, idx)
        polygon_faces.InsertNextCell(q)

    # Next you create a vtkPolyData to store your face and vertex information
    # that
    # represents your polyhedron.
    pd = vtk.vtkPolyData()
    pd.SetPoints(vertex_locations)
    pd.SetPolys(polygon_faces)

    face_stream = vtk.vtkIdList()
    face_stream.InsertNextId(polygon_faces.GetNumberOfCells())
    vertex_list = vtk.vtkIdList()

    polygon_faces.InitTraversal()
    while polygon_faces.GetNextCell(vertex_list) == 1:
        face_stream.InsertNextId(vertex_list.GetNumberOfIds())

        for j in range(vertex_list.GetNumberOfIds()):
            face_stream.InsertNextId(vertex_list.GetId(j))

    ug = vtk.vtkUnstructuredGrid()
    ug.SetPoints(vertex_locations)
    ug.InsertNextCell(vtk.VTK_POLYHEDRON, face_stream)

    # writer = vtk.vtkUnstructuredGridWriter()
    # writer.SetFileName("rhombicuboctahedron.vtk")
    # # writer.SetInputData(ug)
    # writer.SetInput(ug)
    # writer.Write()

    mapper = vtk.vtkDataSetMapper()
    mapper.SetInput(ug)

    actor = vtk.vtkActor()
    actor.SetMapper(mapper)

    if 1:
        # Read the image data from a file
        import utool as ut

        textureCoords = vtk.vtkFloatArray()
        textureCoords.SetNumberOfComponents(3)
        # coords = ut.take(vertex_array, face_dict['L'])
        # for coord in coords:
        #    textureCoords.InsertNextTuple(tuple(coord))
        textureCoords.InsertNextTuple((0, 0, 0))
        textureCoords.InsertNextTuple((1, 0, 0))
        textureCoords.InsertNextTuple((1, 1, 0))
        textureCoords.InsertNextTuple((0, 1, 0))

        # Create texture object
        fpath = ut.grab_test_imgpath('zebra.png')
        reader = vtk.vtkPNGReader()
        reader.SetFileName(fpath)

        texture = vtk.vtkTexture()
        texture.SetInput(reader.GetOutput())
        texture.RepeatOff()
        texture.InterpolateOff()

        ptdat = pd.GetPointData()
        ptdat.SetTCoords(textureCoords)

        actor.SetTexture(texture)

    ren = vtk.vtkRenderer()
    ren.AddActor(actor)

    renw = vtk.vtkRenderWindow()
    renw.AddRenderer(ren)

    iren = vtk.vtkRenderWindowInteractor()
    iren.SetRenderWindow(renw)

    ren.ResetCamera()
    renw.Render()
    iren.Start()
Ejemplo n.º 34
0
def detect_opencv_keypoints():
    import cv2
    import vtool as vt
    import numpy as np  # NOQA

    #img_fpath = ut.grab_test_imgpath(ut.get_argval('--fname', default='lena.png'))
    img_fpath = ut.grab_test_imgpath(ut.get_argval('--fname', default='zebra.png'))
    imgBGR = vt.imread(img_fpath)
    imgGray = cv2.cvtColor(imgBGR, cv2.COLOR_BGR2GRAY)

    def from_cv2_kpts(cv2_kp):
        kp = (cv2_kp.pt[0], cv2_kp.pt[1], cv2_kp.size, 0, cv2_kp.size, cv2_kp.angle)
        return kp

    print('\n'.join(ut.search_module(cv2, 'create', recursive=True)))

    detect_factory = {
        #'BLOB': cv2.SimpleBlobDetector_create,
        #'HARRIS' : HarrisWrapper.create,
        #'SIFT': cv2.xfeatures2d.SIFT_create,  # really DoG
        'SURF': cv2.xfeatures2d.SURF_create,  # really harris corners
        'MSER': cv2.MSER_create,
        #'StarDetector_create',

    }

    extract_factory = {
        'SIFT': cv2.xfeatures2d.SIFT_create,
        'SURF': cv2.xfeatures2d.SURF_create,
        #'DAISY': cv2.xfeatures2d.DAISY_create,
        'FREAK': cv2.xfeatures2d.FREAK_create,
        #'LATCH': cv2.xfeatures2d.LATCH_create,
        #'LUCID': cv2.xfeatures2d.LUCID_create,
        #'ORB': cv2.ORB_create,
    }
    mask = None

    type_to_kpts = {}
    type_to_desc = {}

    key = 'BLOB'
    key = 'MSER'

    for key in detect_factory.keys():
        factory = detect_factory[key]
        extractor = factory()

        # For MSERS need to adapt shape and then convert into a keypoint repr
        if hasattr(extractor, 'detectRegions'):
            # bboxes are x,y,w,h
            regions, bboxes = extractor.detectRegions(imgGray)
            # ellipse definition from [Fitzgibbon95]
            # http://www.bmva.org/bmvc/1995/bmvc-95-050.pdf p518
            # ell = [c_x, c_y, R_x, R_y, theta]
            # (cx, cy) = conic center
            # Rx and Ry = conic radii
            # theta is the counterclockwise angle
            fitz_ellipses = [cv2.fitEllipse(mser) for mser in regions]

            # http://answers.opencv.org/question/19015/how-to-use-mser-in-python/
            #hulls = [cv2.convexHull(p.reshape(-1, 1, 2)) for p in regions]
            #hull_ells = [cv2.fitEllipse(hull[:, 0]) for hull in hulls]
            kpts_ = []
            for ell in fitz_ellipses:
                ((cx, cy), (rx, ry), degrees) = ell
                theta = np.radians(degrees)  # opencv lives in radians
                S = vt.scale_mat3x3(rx, ry)
                T = vt.translation_mat3x3(cx, cy)
                R = vt.rotation_mat3x3(theta)
                #R = np.eye(3)
                invVR = T.dot(R.dot(S))
                kpt = vt.flatten_invV_mats_to_kpts(np.array([invVR]))[0]
                kpts_.append(kpt)
            kpts_ = np.array(kpts_)

        tt = ut.tic('Computing %r keypoints' % (key,))
        try:
            cv2_kpts = extractor.detect(imgGray, mask)
        except Exception as ex:
            ut.printex(ex, 'Failed to computed %r keypoints' % (key,), iswarning=True)
            pass
        else:
            ut.toc(tt)
            type_to_kpts[key] = cv2_kpts

    print(list(type_to_kpts.keys()))
    print(ut.depth_profile(list(type_to_kpts.values())))
    print('type_to_kpts = ' + ut.repr3(type_to_kpts, truncate=True))

    cv2_kpts = type_to_kpts['MSER']
    kp = cv2_kpts[0]  # NOQA
    #cv2.fitEllipse(cv2_kpts[0])
    cv2_kpts = type_to_kpts['SURF']

    for key in extract_factory.keys():
        factory = extract_factory[key]
        extractor = factory()
        tt = ut.tic('Computing %r descriptors' % (key,))
        try:
            filtered_cv2_kpts, desc = extractor.compute(imgGray, cv2_kpts)
        except Exception as ex:
            ut.printex(ex, 'Failed to computed %r descriptors' % (key,), iswarning=True)
            pass
        else:
            ut.toc(tt)
            type_to_desc[key] = desc

    print(list(type_to_desc.keys()))
    print(ut.depth_profile(list(type_to_desc.values())))
    print('type_to_desc = ' + ut.repr3(type_to_desc, truncate=True))
Ejemplo n.º 35
0
def gridsearch_chipextract():
    r"""
    CommandLine:
        python -m vtool.chip --test-gridsearch_chipextract --show

    Example:
        >>> # GRIDSEARCH
        >>> from vtool.chip import *  # NOQA
        >>> gridsearch_chipextract()
        >>> ut.show_if_requested()
    """
    import cv2
    test_func = extract_chip_from_img
    if False:
        gpath = ut.grab_test_imgpath('carl.jpg')
        bbox = (100, 3, 100, 100)
        theta = 0.0
        new_size = (58, 34)
    else:
        gpath = '/media/raid/work/GZ_Master1/_ibsdb/images/1524525d-2131-8770-d27c-3a5f9922e9e9.jpg'
        bbox = (450, 373, 2062, 1124)
        theta = 0.0
        old_size = bbox[2:4]
        #target_area = 700 ** 2
        target_area = 1200**2
        new_size = get_scaled_sizes_with_area(target_area, [old_size])[0]
        print('old_size = %r' % (old_size, ))
        print('new_size = %r' % (new_size, ))
        #new_size = (677, 369)
    imgBGR = gtool.imread(gpath)
    args = (imgBGR, bbox, theta, new_size)
    param_info = ut.ParamInfoList(
        'extract_params',
        [
            ut.ParamInfo(
                'interpolation',
                cv2.INTER_LANCZOS4,
                varyvals=[
                    cv2.INTER_LANCZOS4,
                    cv2.INTER_CUBIC,
                    cv2.INTER_LINEAR,
                    cv2.INTER_NEAREST,
                    #cv2.INTER_AREA
                ],
            )
        ])
    show_func = None
    # Generalize
    import plottool as pt
    pt.imshow(imgBGR)  # HACK
    cfgdict_list, cfglbl_list = param_info.get_gridsearch_input(
        defaultslice=slice(0, 10))
    fnum = pt.ensure_fnum(None)
    if show_func is None:
        show_func = pt.imshow
    lbl = ut.get_funcname(test_func)
    cfgresult_list = [
        test_func(*args, **cfgdict)
        for cfgdict in ut.ProgressIter(cfgdict_list, lbl=lbl)
    ]
    onclick_func = None
    ut.interact_gridsearch_result_images(show_func,
                                         cfgdict_list,
                                         cfglbl_list,
                                         cfgresult_list,
                                         fnum=fnum,
                                         figtitle=lbl,
                                         unpack=False,
                                         max_plots=25,
                                         onclick_func=onclick_func)
    pt.iup()
Ejemplo n.º 36
0
def test_ori_extract_main():
    """
    CommandLine:
        python -m pyhesaff.tests.test_exhaustive_ori_extract --test-test_ori_extract_main
        python -m pyhesaff.tests.test_exhaustive_ori_extract --test-test_ori_extract_main --show

    Example:
        >>> # GUI_DOCTEST
        >>> from pyhesaff.tests.test_exhaustive_ori_extract import *  # NOQA
        >>> test_ori_extract_main()
        >>> ut.show_if_requested()
    """
    import pyhesaff
    from plottool import draw_func2 as df2
    from plottool.viz_keypoints import show_keypoints
    import vtool  # NOQA
    import vtool.image as gtool
    import vtool.keypoint as ktool
    np.set_printoptions(threshold=5000, linewidth=5000, precision=3)
    # Read data
    print('[rotinvar] loading test data')

    img_fpath = ut.grab_test_imgpath('jeff.png')
    imgL = gtool.cvt_BGR2L(gtool.imread(img_fpath))
    detect_kw0 = {
    }
    detect_kw1 = {
        'scale_min': 20,
        'scale_max': 100
    }
    detect_kw2 = {
        'scale_min': 40,
        'scale_max': 60
    }
    detect_kw3 = {
        'scale_min': 45,
        'scale_max': 49
    }
    # Remove skew and anisotropic scaling
    def force_isotropic(kpts):
        kpts_ = kpts.copy()
        kpts_[:, ktool.SKEW_DIM] = 0
        kpts_[:, ktool.SCAX_DIM] = kpts_[:, ktool.SCAY_DIM]
        vecs_ = pyhesaff.extract_vecs(img_fpath, kpts_)
        return kpts_, vecs_

    def force_ori(kpts, ori):
        kpts_ = kpts.copy()
        kpts_[:, ktool.ORI_DIM] = ori
        vecs_ = pyhesaff.extract_vecs(img_fpath, kpts_)
        return kpts_, vecs_

    def shift_kpts(kpts, x, y):
        kpts_ = kpts.copy()
        kpts_[:, ktool.XDIM] += x
        kpts_[:, ktool.YDIM] += y
        vecs_ = pyhesaff.extract_vecs(img_fpath, kpts_)
        return kpts_, vecs_

    # --- Experiment ---
    kpts0, vecs0 = double_detect(img_fpath, **detect_kw0)
    kpts1, vecs1 = double_detect(img_fpath, **detect_kw1)
    kpts2, vecs2 = double_detect(img_fpath, **detect_kw2)
    #
    kpts3, vecs3 = double_detect(img_fpath, **detect_kw3)
    kpts4, vecs4 = force_isotropic(kpts3)
    kpts5, vecs5 = force_ori(kpts3, 1.45)
    kpts6, vecs6 = shift_kpts(kpts5, -60, -50)
    kpts7, vecs7 = force_ori(kpts6, 0)
    kpts8, vecs8 = force_ori(kpts7, 2.40)
    kpts9, vecs9 = force_ori(kpts8, 5.40)
    kpts10, vecs10 = force_ori(kpts9, 10.40)

    # --- Print ---

    # ---- Draw ----
    nRow, nCol = 1, 2
    df2.figure(fnum=2, doclf=True, docla=True)
    df2.figure(fnum=1, doclf=True, docla=True)

    def show_kpts_(fnum, pnum, kpts, vecs, title):
        print('--------')
        print('show_kpts: %r.%r' % (fnum, pnum))
        print('kpts  = %r' % (kpts,))
        print('scales = %r' % ktool.get_scales(kpts))
        # FIXME: this exists in ibeis. move to vtool
        #dev_consistency.check_vecs(vecs3)

        show_keypoints(imgL, kpts, sifts=vecs, pnum=pnum, rect=True,
                       ori=True, fnum=fnum, title=title, ell_alpha=1)

    show_kpts_(1, (nRow, nCol, 1), kpts3, vecs3, 'kpts3: original')
    show_kpts_(1, (nRow, nCol, 2), kpts4, vecs4, 'kpts4: isotropic + redetect')
    show_kpts_(2, (2, 3, 1), kpts5, vecs5, 'kpts5: force_ori + redetect')
    show_kpts_(2, (2, 3, 2), kpts6, vecs6, 'kpts6: shift')
    show_kpts_(2, (2, 3, 3), kpts7, vecs7, 'kpts7: shift + reorient')
    show_kpts_(2, (2, 3, 4), kpts8, vecs8, 'kpts8: shift + reorient')
    show_kpts_(2, (2, 3, 5), kpts9, vecs9, 'kpts9: reorient')
    show_kpts_(2, (2, 3, 6), kpts10, vecs10, 'kpts10: reorient')
Ejemplo n.º 37
0
def test_rot_invar():
    r"""
    CommandLine:
        python -m pyhesaff test_rot_invar --show --rebuild-hesaff --no-rmbuild
        python -m pyhesaff test_rot_invar --show --nocpp

        python -m vtool.tests.dummy testdata_ratio_matches --show --ratio_thresh=1.0 --rotation_invariance --rebuild-hesaff
        python -m vtool.tests.dummy testdata_ratio_matches --show --ratio_thresh=1.1 --rotation_invariance --rebuild-hesaff

    Example:
        >>> # DISABLE_DODCTEST
        >>> from pyhesaff._pyhesaff import *  # NOQA
        >>> test_rot_invar()
    """
    import cv2
    import utool as ut
    import vtool as vt
    import plottool as pt
    TAU = 2 * np.pi
    fnum = pt.next_fnum()
    NUM_PTS = 5  # 9
    theta_list = np.linspace(0, TAU, NUM_PTS, endpoint=False)
    nRows, nCols = pt.get_square_row_cols(len(theta_list), fix=True)
    next_pnum = pt.make_pnum_nextgen(nRows, nCols)
    # Expand the border a bit around star.png
    pad_ = 100
    img_fpath = ut.grab_test_imgpath('star.png')
    img_fpath2 = vt.pad_image_ondisk(img_fpath, pad_, value=26)
    for theta in theta_list:
        print('-----------------')
        print('theta = %r' % (theta, ))
        #theta = ut.get_argval('--theta', type_=float, default=TAU * 3 / 8)
        img_fpath = vt.rotate_image_ondisk(img_fpath2,
                                           theta,
                                           borderMode=cv2.BORDER_REPLICATE)
        if not ut.get_argflag('--nocpp'):
            (kpts_list_ri, vecs_list2) = detect_feats(img_fpath,
                                                      rotation_invariance=True)
            kpts_ri = ut.strided_sample(kpts_list_ri, 2)
        (kpts_list_gv, vecs_list1) = detect_feats(img_fpath,
                                                  rotation_invariance=False)
        kpts_gv = ut.strided_sample(kpts_list_gv, 2)
        # find_kpts_direction
        imgBGR = vt.imread(img_fpath)
        kpts_ripy = vt.find_kpts_direction(imgBGR,
                                           kpts_gv,
                                           DEBUG_ROTINVAR=False)
        # Verify results stdout
        #print('nkpts = %r' % (len(kpts_gv)))
        #print(vt.kpts_repr(kpts_gv))
        #print(vt.kpts_repr(kpts_ri))
        #print(vt.kpts_repr(kpts_ripy))
        # Verify results plot
        pt.figure(fnum=fnum, pnum=next_pnum())
        pt.imshow(imgBGR)
        #if len(kpts_gv) > 0:
        #    pt.draw_kpts2(kpts_gv, ori=True, ell_color=pt.BLUE, ell_linewidth=10.5)
        ell = False
        rect = True
        if not ut.get_argflag('--nocpp'):
            if len(kpts_ri) > 0:
                pt.draw_kpts2(kpts_ri,
                              rect=rect,
                              ell=ell,
                              ori=True,
                              ell_color=pt.RED,
                              ell_linewidth=5.5)
        if len(kpts_ripy) > 0:
            pt.draw_kpts2(kpts_ripy,
                          rect=rect,
                          ell=ell,
                          ori=True,
                          ell_color=pt.GREEN,
                          ell_linewidth=3.5)
        #print('\n'.join(vt.get_ori_strs(np.vstack([kpts_gv, kpts_ri, kpts_ripy]))))
        #ut.embed(exec_lines=['pt.update()'])
    pt.set_figtitle('green=python, red=C++')
    pt.show_if_requested()
Ejemplo n.º 38
0
def rhombicuboctahedron():
    import vtk
    # First, you need to store the vertex locations.

    import numpy as np
    fu = 1  # full unit
    hu = .5  # half unit
    d = np.sqrt((fu ** 2) / 2)  # diag
    hh = hu + d  # half height

    # left view faces us

    import utool as ut
    import six
    import itertools
    counter = ut.partial(six.next, itertools.count(0))

    vertex_locations = vtk.vtkPoints()
    vertex_locations.SetNumberOfPoints(24)

    p1, p2, p3 = np.array([
        (-hu, -hu, hh),
        ( hu, -hu, hh),
        ( hu,  hu, hh),
        (-hu,  hu, hh),
    ]).T
    plist = [p1, p2, p3]

    # three of the six main faces
    #perms = list(itertools.permutations((0, 1, 2), 3))
    perms = [(0, 1, 2), (0, 2, 1), (2, 0, 1)]

    vertex_array = []

    # VERTEXES
    # left, up, back
    vplist = ['L', 'U', 'B', 'R', 'D', 'F']
    vpdict = {}
    print('perms = %r' % (perms,))
    for x in range(3):
        vp = vplist[x]
        p = np.vstack(ut.take(plist, perms[x])).T
        counts = [counter() for z in range(4)]
        vpdict[vp] = counts
        vertex_array.extend(p.tolist())
        vertex_locations.SetPoint(counts[0], p[0])
        vertex_locations.SetPoint(counts[1], p[1])
        vertex_locations.SetPoint(counts[2], p[2])
        vertex_locations.SetPoint(counts[3], p[3])

    # three more of the six main faces
    perms = [(0, 1, 2), (0, 2, 1), (2, 0, 1)]
    plist[-1] = -plist[-1]
    # right, down, front
    print('perms = %r' % (perms,))
    for x in range(3):
        p = np.vstack(ut.take(plist, perms[x])).T
        counts = [counter() for z in range(4)]
        vp = vplist[x + 3]
        vpdict[vp] = counts
        vertex_array.extend(p.tolist())
        vertex_locations.SetPoint(counts[0], p[0])
        vertex_locations.SetPoint(counts[1], p[1])
        vertex_locations.SetPoint(counts[2], p[2])
        vertex_locations.SetPoint(counts[3], p[3])

    pd = vtk.vtkPolyData()
    pd.SetPoints(vertex_locations)

    polygon_faces = vtk.vtkCellArray()

    face_dict = {
        'L': [vpdict['L'][0], vpdict['L'][1], vpdict['L'][2], vpdict['L'][3]],
        'D': [vpdict['D'][0], vpdict['D'][1], vpdict['D'][2], vpdict['D'][3]],
        'U': [vpdict['U'][0], vpdict['U'][1], vpdict['U'][2], vpdict['U'][3]],
        'F': [vpdict['F'][0], vpdict['F'][1], vpdict['F'][2], vpdict['F'][3]],
        'R': [vpdict['R'][0], vpdict['R'][1], vpdict['R'][2], vpdict['R'][3]],
        'B': [vpdict['B'][0], vpdict['B'][1], vpdict['B'][2], vpdict['B'][3]],
        'FL': [ vpdict['L'][0], vpdict['L'][3], vpdict['F'][2], vpdict['F'][3], ],
        'BL': [ vpdict['L'][1], vpdict['L'][2], vpdict['B'][2], vpdict['B'][3], ],
        'UL': [ vpdict['L'][2], vpdict['L'][3], vpdict['U'][3], vpdict['U'][2], ],
        'DL': [ vpdict['L'][0], vpdict['L'][1], vpdict['D'][2], vpdict['D'][3], ],
        'UFL': [ vpdict['L'][3], vpdict['F'][2], vpdict['U'][3], ],
        'DFL': [ vpdict['L'][0], vpdict['F'][3], vpdict['D'][3], ],
        'UBL': [ vpdict['L'][2], vpdict['B'][2], vpdict['U'][2], ],
        'DBL': [ vpdict['L'][1], vpdict['B'][3], vpdict['D'][2], ],
        'UFR': [ vpdict['R'][3], vpdict['F'][1], vpdict['U'][0], ],
        'DFR': [ vpdict['R'][0], vpdict['F'][0], vpdict['D'][0], ],
        'UBR': [ vpdict['R'][2], vpdict['B'][1], vpdict['U'][1], ],
        'DBR': [ vpdict['R'][1], vpdict['B'][0], vpdict['D'][1], ],
        'FR': [ vpdict['R'][3], vpdict['R'][0], vpdict['F'][0], vpdict['F'][1], ],
        'BR': [ vpdict['R'][2], vpdict['R'][1], vpdict['B'][0], vpdict['B'][1], ],
        'UR': [ vpdict['R'][3], vpdict['R'][2], vpdict['U'][1], vpdict['U'][0], ],
        'DR': [ vpdict['R'][1], vpdict['R'][0], vpdict['D'][0], vpdict['D'][1], ],
        'DF': [ vpdict['F'][0], vpdict['F'][3], vpdict['D'][3], vpdict['D'][0], ],
        'DB': [ vpdict['B'][3], vpdict['B'][0], vpdict['D'][1], vpdict['D'][2], ],
        'UF': [ vpdict['F'][1], vpdict['F'][2], vpdict['U'][3], vpdict['U'][0], ],
        'UB': [ vpdict['B'][2], vpdict['B'][1], vpdict['U'][1], vpdict['U'][2], ],
    }

    for key, vert_ids in face_dict.items():
        #if key != 'L':
        #    continue
        if len(vert_ids) == 4:
            q = vtk.vtkQuad()
        else:
            q = vtk.vtkTriangle()
        for count, idx in enumerate(vert_ids):
            q.GetPointIds().SetId(count, idx)
        polygon_faces.InsertNextCell(q)

    # Next you create a vtkPolyData to store your face and vertex information
    #that
    # represents your polyhedron.
    pd = vtk.vtkPolyData()
    pd.SetPoints(vertex_locations)
    pd.SetPolys(polygon_faces)

    face_stream = vtk.vtkIdList()
    face_stream.InsertNextId(polygon_faces.GetNumberOfCells())
    vertex_list = vtk.vtkIdList()

    polygon_faces.InitTraversal()
    while polygon_faces.GetNextCell(vertex_list) == 1:
        face_stream.InsertNextId(vertex_list.GetNumberOfIds())

        for j in range(vertex_list.GetNumberOfIds()):
            face_stream.InsertNextId(vertex_list.GetId(j))

    ug = vtk.vtkUnstructuredGrid()
    ug.SetPoints(vertex_locations)
    ug.InsertNextCell(vtk.VTK_POLYHEDRON, face_stream)

    #writer = vtk.vtkUnstructuredGridWriter()
    #writer.SetFileName("rhombicuboctahedron.vtk")
    ##writer.SetInputData(ug)
    #writer.SetInput(ug)
    #writer.Write()

    mapper = vtk.vtkDataSetMapper()
    mapper.SetInput(ug)

    actor = vtk.vtkActor()
    actor.SetMapper(mapper)

    if 1:
        # Read the image data from a file
        import utool as ut

        textureCoords = vtk.vtkFloatArray()
        textureCoords.SetNumberOfComponents(3)
        #coords = ut.take(vertex_array, face_dict['L'])
        #for coord in coords:
        #    textureCoords.InsertNextTuple(tuple(coord))
        textureCoords.InsertNextTuple((0, 0, 0))
        textureCoords.InsertNextTuple((1, 0, 0))
        textureCoords.InsertNextTuple((1, 1, 0))
        textureCoords.InsertNextTuple((0, 1, 0))

        # Create texture object
        fpath = ut.grab_test_imgpath('zebra.png')
        reader = vtk.vtkPNGReader()
        reader.SetFileName(fpath)

        texture = vtk.vtkTexture()
        texture.SetInput(reader.GetOutput())
        texture.RepeatOff()
        texture.InterpolateOff()

        ptdat = pd.GetPointData()
        ptdat.SetTCoords(textureCoords)

        actor.SetTexture(texture)

    ren = vtk.vtkRenderer()
    ren.AddActor(actor)

    renw = vtk.vtkRenderWindow()
    renw.AddRenderer(ren)

    iren = vtk.vtkRenderWindowInteractor()
    iren.SetRenderWindow(renw)

    ren.ResetCamera()
    renw.Render()
    iren.Start()
Ejemplo n.º 39
0
def segmentation_example():
    import vigra
    import opengm
    import sklearn
    import sklearn.mixture
    import numpy as np
    from vigra import graphs
    import matplotlib as mpl
    import plottool as pt

    pt.ensure_pylab_qt4()

    # load image and convert to LAB
    img_fpath = str(ut.grab_test_imgpath(str('lena.png')))
    img = vigra.impex.readImage(img_fpath)
    imgLab = vigra.colors.transform_RGB2Lab(img)

    superpixelDiameter = 15   # super-pixel size
    slicWeight = 15.0        # SLIC color - spatial weight
    labels, nseg = vigra.analysis.slicSuperpixels(imgLab, slicWeight,
                                                  superpixelDiameter)
    labels = vigra.analysis.labelImage(labels) - 1

    # get 2D grid graph and RAG
    gridGraph = graphs.gridGraph(img.shape[0:2])
    rag = graphs.regionAdjacencyGraph(gridGraph, labels)

    # Node Features
    nodeFeatures = rag.accumulateNodeFeatures(imgLab)
    nodeFeaturesImg = rag.projectNodeFeaturesToGridGraph(nodeFeatures)
    nodeFeaturesImg = vigra.taggedView(nodeFeaturesImg, "xyc")
    nodeFeaturesImgRgb = vigra.colors.transform_Lab2RGB(nodeFeaturesImg)

    nCluster = 5
    g = sklearn.mixture.GMM(n_components=nCluster)
    g.fit(nodeFeatures[:, :])
    clusterProb = g.predict_proba(nodeFeatures)
    # https://github.com/opengm/opengm/blob/master/src/interfaces/python/examples/tutorial/Irregular%20Factor%20Graphs.ipynb
    # https://github.com/opengm/opengm/blob/master/src/interfaces/python/examples/tutorial/Hard%20and%20Soft%20Constraints.ipynb
    clusterProbImg = rag.projectNodeFeaturesToGridGraph(
        clusterProb.astype(np.float32))
    clusterProbImg = vigra.taggedView(clusterProbImg, "xyc")

    ndim_data = clusterProbImg.reshape((-1, nCluster))
    pca = sklearn.decomposition.PCA(n_components=3)
    print(ndim_data.shape)
    pca.fit(ndim_data)
    print(ut.repr2(pca.explained_variance_ratio_, precision=2))
    oldshape = (clusterProbImg.shape[0:2] + (-1,))
    clusterProgImg3 = pca.transform(ndim_data).reshape(oldshape)
    print(clusterProgImg3.shape)

    # graphical model with as many variables
    # as superpixels, each has 3 states
    gm = opengm.gm(np.ones(rag.nodeNum, dtype=opengm.label_type) * nCluster)
    # convert probabilites to energies
    probs = np.clip(clusterProb, 0.00001, 0.99999)
    costs = -1.0 * np.log(probs)
    # add ALL unaries AT ONCE
    fids = gm.addFunctions(costs)
    gm.addFactors(fids, np.arange(rag.nodeNum))
    # add a potts function
    beta = 40.0  # strength of potts regularizer
    regularizer = opengm.pottsFunction([nCluster] * 2, 0.0, beta)
    fid = gm.addFunction(regularizer)
    # get variable indices of adjacent superpixels
    # - or "u" and "v" node id's for edges
    uvIds = rag.uvIds()
    uvIds = np.sort(uvIds, axis=1)
    # add all second order factors at once
    gm.addFactors(fid, uvIds)

    # get super-pixels with slic on LAB image
    Inf = opengm.inference.BeliefPropagation
    parameter = opengm.InfParam(steps=10, damping=0.5, convergenceBound=0.001)
    inf = Inf(gm, parameter=parameter)

    class PyCallback(object):

        def __init__(self,):
            self.labels = []

        def begin(self, inference):
            print("begin of inference")

        def end(self, inference):
            self.labels.append(inference.arg())

        def visit(self, inference):
            gm = inference.gm()
            labelVector = inference.arg()
            print("energy  %r" % (gm.evaluate(labelVector),))
            self.labels.append(labelVector)

    callback = PyCallback()
    visitor = inf.pythonVisitor(callback, visitNth=1)

    inf.infer(visitor)

    pt.imshow(clusterProgImg3.swapaxes(0, 1))
    # plot superpixels
    cmap = mpl.colors.ListedColormap(np.random.rand(nseg, 3))
    pt.imshow(labels.swapaxes(0, 1).squeeze(), cmap=cmap)
    pt.imshow(nodeFeaturesImgRgb)

    cmap = mpl.colors.ListedColormap(np.random.rand(nCluster, 3))
    for arg in callback.labels:
        arg = vigra.taggedView(arg, "n")
        argImg = rag.projectNodeFeaturesToGridGraph(arg.astype(np.uint32))
        argImg = vigra.taggedView(argImg, "xy")
        # plot superpixels
        pt.imshow(argImg.swapaxes(0, 1).squeeze(), cmap=cmap)
Ejemplo n.º 40
0
def intra_encounter_matching():
    qreq_, cm_list = testdata_workflow()
    # qaids = [cm.qaid for cm in cm_list]
    # top_aids = [cm.get_top_aids(5) for cm in cm_list]
    import numpy as np
    from scipy.sparse import coo_matrix, csgraph
    aid_pairs = np.array([(cm.qaid, daid) for cm in cm_list for daid in cm.get_top_aids(5)])
    top_scores = ut.flatten([cm.get_top_scores(5) for cm in cm_list])

    N = aid_pairs.max() + 1
    mat = coo_matrix((top_scores, aid_pairs.T), shape=(N, N))
    csgraph.connected_components(mat)
    tree = csgraph.minimum_spanning_tree(mat)  # NOQA
    import plottool as pt
    dense = mat.todense()
    pt.imshow(dense / dense.max() * 255)
    pt.show_if_requested()

    # load image and convert to LAB
    img_fpath = str(ut.grab_test_imgpath(str('lena.png')))
    img = vigra.impex.readImage(img_fpath)
    imgLab = vigra.colors.transform_RGB2Lab(img)

    superpixelDiameter = 15   # super-pixel size
    slicWeight = 15.0        # SLIC color - spatial weight
    labels, nseg = vigra.analysis.slicSuperpixels(imgLab, slicWeight,
                                                  superpixelDiameter)
    labels = vigra.analysis.labelImage(labels)-1

    # get 2D grid graph and RAG
    gridGraph = graphs.gridGraph(img.shape[0:2])
    rag = graphs.regionAdjacencyGraph(gridGraph, labels)

    nodeFeatures = rag.accumulateNodeFeatures(imgLab)
    nodeFeaturesImg = rag.projectNodeFeaturesToGridGraph(nodeFeatures)
    nodeFeaturesImg = vigra.taggedView(nodeFeaturesImg, "xyc")
    nodeFeaturesImgRgb = vigra.colors.transform_Lab2RGB(nodeFeaturesImg)

    #from sklearn.cluster import MiniBatchKMeans, KMeans
    from sklearn import mixture
    nCluster   = 3
    g = mixture.GMM(n_components=nCluster)
    g.fit(nodeFeatures[:,:])
    clusterProb = g.predict_proba(nodeFeatures)

    import numpy
    #https://github.com/opengm/opengm/blob/master/src/interfaces/python/examples/tutorial/Irregular%20Factor%20Graphs.ipynb
    #https://github.com/opengm/opengm/blob/master/src/interfaces/python/examples/tutorial/Hard%20and%20Soft%20Constraints.ipynb

    clusterProbImg = rag.projectNodeFeaturesToGridGraph(clusterProb.astype(numpy.float32))
    clusterProbImg = vigra.taggedView(clusterProbImg, "xyc")

    # strength of potts regularizer
    beta = 40.0
    # graphical model with as many variables
    # as superpixels, each has 3 states
    gm = opengm.gm(numpy.ones(rag.nodeNum,dtype=opengm.label_type)*nCluster)
    # convert probabilites to energies
    probs = numpy.clip(clusterProb, 0.00001, 0.99999)
    costs = -1.0*numpy.log(probs)
    # add ALL unaries AT ONCE
    fids = gm.addFunctions(costs)
    gm.addFactors(fids,numpy.arange(rag.nodeNum))
    # add a potts function
    regularizer = opengm.pottsFunction([nCluster]*2,0.0,beta)
    fid = gm.addFunction(regularizer)
    # get variable indices of adjacent superpixels
    # - or "u" and "v" node id's for edges
    uvIds = rag.uvIds()
    uvIds = numpy.sort(uvIds,axis=1)
    # add all second order factors at once
    gm.addFactors(fid,uvIds)

    # get super-pixels with slic on LAB image

    import opengm
    # Matching Graph
    cost_matrix = np.array([
        [0.5, 0.6, 0.2, 0.4, 0.1],
        [0.0, 0.5, 0.2, 0.9, 0.2],
        [0.0, 0.0, 0.5, 0.1, 0.1],
        [0.0, 0.0, 0.0, 0.5, 0.1],
        [0.0, 0.0, 0.0, 0.0, 0.5],
    ])
    cost_matrix += cost_matrix.T
    number_of_labels = 5
    num_annots = 5
    cost_matrix = (cost_matrix * 2) - 1
    #gm = opengm.gm(number_of_labels)
    gm = opengm.gm(np.ones(num_annots) * number_of_labels)
    aids = np.arange(num_annots)
    aid_pairs = np.array([(a1, a2) for a1, a2 in ut.iprod(aids, aids) if a1 != a2], dtype=np.uint32)
    aid_pairs.sort(axis=1)
    # 2nd order function
    fid = gm.addFunction(cost_matrix)
    gm.addFactors(fid, aid_pairs)
    Inf = opengm.inference.BeliefPropagation
    #Inf = opengm.inference.Multicut
    parameter = opengm.InfParam(steps=10, damping=0.5, convergenceBound=0.001)
    parameter = opengm.InfParam()
    inf = Inf(gm, parameter=parameter)
    class PyCallback(object):
        def __init__(self,):
            self.labels=[]
            pass
        def begin(self,inference):
            print("begin of inference")
            pass
        def end(self,inference):
            self.labels.append(inference.arg())
            pass
        def visit(self,inference):
            gm=inference.gm()
            labelVector=inference.arg()
            print("energy  %r" % (gm.evaluate(labelVector),))
            self.labels.append(labelVector)
            pass
    callback=PyCallback()
    visitor=inf.pythonVisitor(callback,visitNth=1)
    inf.infer(visitor)
    print(callback.labels)
    # baseline jobid
    # https://github.com/opengm/opengm/blob/master/src/interfaces/python/examples/tutorial/OpenGM%20tutorial.ipynb
    numVar = 10
    unaries = np.ones([numVar, 3], dtype=opengm.value_type)
    gm = opengm.gm(np.ones(numVar, dtype=opengm.label_type) * 3)
    unary_fids = gm.addFunctions(unaries)
    gm.addFactors(unary_fids, np.arange(numVar))
    infParam = opengm.InfParam(
        workflow=ut.ensure_ascii('(IC)(TTC-I,CC-I)'),
    )
    inf = opengm.inference.Multicut(gm, parameter=infParam)
    visitor = inf.verboseVisitor(printNth=1, multiline=False)
    inf.infer(visitor)
    arg = inf.arg()

    # gridVariableIndices = opengm.secondOrderGridVis(img.shape[0], img.shape[1])
    # fid = gm.addFunction(regularizer)
    # gm.addFactors(fid, gridVariableIndices)
    # regularizer = opengm.pottsFunction([3, 3], 0.0, beta)
    # gridVariableIndices = opengm.secondOrderGridVis(img.shape[0], img.shape[1])
    # fid = gm.addFunction(regularizer)
    # gm.addFactors(fid, gridVariableIndices)

    unaries = np.random.rand(10, 10, 2)
    potts = opengm.PottsFunction([2, 2], 0.0, 0.4)
    gm = opengm.grid2d2Order(unaries=unaries, regularizer=potts)

    inf = opengm.inference.GraphCut(gm)
    inf.infer()
    arg = inf.arg()  # NOQA
    """
Ejemplo n.º 41
0
def test_rot_invar():
    r"""
    CommandLine:
        python -m pyhesaff test_rot_invar --show --rebuild-hesaff --no-rmbuild
        python -m pyhesaff test_rot_invar --show --nocpp

        python -m vtool.tests.dummy testdata_ratio_matches --show --ratio_thresh=1.0 --rotation_invariance --rebuild-hesaff
        python -m vtool.tests.dummy testdata_ratio_matches --show --ratio_thresh=1.1 --rotation_invariance --rebuild-hesaff

    Example:
        >>> # DISABLE_DODCTEST
        >>> from pyhesaff._pyhesaff import *  # NOQA
        >>> test_rot_invar()
    """
    import cv2
    import utool as ut
    import vtool as vt
    import plottool as pt
    TAU = 2 * np.pi
    fnum = pt.next_fnum()
    NUM_PTS = 5  # 9
    theta_list = np.linspace(0, TAU, NUM_PTS, endpoint=False)
    nRows, nCols = pt.get_square_row_cols(len(theta_list), fix=True)
    next_pnum = pt.make_pnum_nextgen(nRows, nCols)
    # Expand the border a bit around star.png
    pad_ = 100
    img_fpath = ut.grab_test_imgpath('star.png')
    img_fpath2 = vt.pad_image_ondisk(img_fpath, pad_, value=26)
    for theta in theta_list:
        print('-----------------')
        print('theta = %r' % (theta,))
        #theta = ut.get_argval('--theta', type_=float, default=TAU * 3 / 8)
        img_fpath = vt.rotate_image_ondisk(img_fpath2, theta, borderMode=cv2.BORDER_REPLICATE)
        if not ut.get_argflag('--nocpp'):
            (kpts_list_ri, vecs_list2) = detect_feats(img_fpath, rotation_invariance=True)
            kpts_ri = ut.strided_sample(kpts_list_ri, 2)
        (kpts_list_gv, vecs_list1) = detect_feats(img_fpath, rotation_invariance=False)
        kpts_gv = ut.strided_sample(kpts_list_gv, 2)
        # find_kpts_direction
        imgBGR = vt.imread(img_fpath)
        kpts_ripy = vt.find_kpts_direction(imgBGR, kpts_gv, DEBUG_ROTINVAR=False)
        # Verify results stdout
        #print('nkpts = %r' % (len(kpts_gv)))
        #print(vt.kpts_repr(kpts_gv))
        #print(vt.kpts_repr(kpts_ri))
        #print(vt.kpts_repr(kpts_ripy))
        # Verify results plot
        pt.figure(fnum=fnum, pnum=next_pnum())
        pt.imshow(imgBGR)
        #if len(kpts_gv) > 0:
        #    pt.draw_kpts2(kpts_gv, ori=True, ell_color=pt.BLUE, ell_linewidth=10.5)
        ell = False
        rect = True
        if not ut.get_argflag('--nocpp'):
            if len(kpts_ri) > 0:
                pt.draw_kpts2(kpts_ri, rect=rect, ell=ell, ori=True,
                              ell_color=pt.RED, ell_linewidth=5.5)
        if len(kpts_ripy) > 0:
            pt.draw_kpts2(kpts_ripy, rect=rect, ell=ell,  ori=True,
                          ell_color=pt.GREEN, ell_linewidth=3.5)
        #print('\n'.join(vt.get_ori_strs(np.vstack([kpts_gv, kpts_ri, kpts_ripy]))))
        #ut.embed(exec_lines=['pt.update()'])
    pt.set_figtitle('green=python, red=C++')
    pt.show_if_requested()
Ejemplo n.º 42
0
def gridsearch_chipextract():
    r"""
    CommandLine:
        python -m vtool.chip --test-gridsearch_chipextract --show

    Example:
        >>> # GRIDSEARCH
        >>> from vtool.chip import *  # NOQA
        >>> gridsearch_chipextract()
        >>> ut.show_if_requested()
    """
    import cv2
    test_func = extract_chip_from_img
    if False:
        gpath = ut.grab_test_imgpath('carl.jpg')
        bbox = (100, 3, 100, 100)
        theta = 0.0
        new_size = (58, 34)
    else:
        gpath = '/media/raid/work/GZ_Master1/_ibsdb/images/1524525d-2131-8770-d27c-3a5f9922e9e9.jpg'
        bbox = (450, 373, 2062, 1124)
        theta = 0.0
        old_size = bbox[2:4]
        #target_area = 700 ** 2
        target_area = 1200 ** 2
        new_size = get_scaled_sizes_with_area(target_area, [old_size])[0]
        print('old_size = %r' % (old_size,))
        print('new_size = %r' % (new_size,))
        #new_size = (677, 369)
    imgBGR = gtool.imread(gpath)
    args = (imgBGR, bbox, theta, new_size)
    param_info = ut.ParamInfoList('extract_params', [
        ut.ParamInfo('interpolation', cv2.INTER_LANCZOS4,
                     varyvals=[
                         cv2.INTER_LANCZOS4,
                         cv2.INTER_CUBIC,
                         cv2.INTER_LINEAR,
                         cv2.INTER_NEAREST,
                         #cv2.INTER_AREA
                     ],)
    ])
    show_func = None
    # Generalize
    import plottool as pt
    pt.imshow(imgBGR)  # HACK
    cfgdict_list, cfglbl_list = param_info.get_gridsearch_input(defaultslice=slice(0, 10))
    fnum = pt.ensure_fnum(None)
    if show_func is None:
        show_func = pt.imshow
    lbl = ut.get_funcname(test_func)
    cfgresult_list = [
        test_func(*args, **cfgdict)
        for cfgdict in ut.ProgressIter(cfgdict_list, lbl=lbl)
    ]
    onclick_func = None
    ut.interact_gridsearch_result_images(
        show_func, cfgdict_list, cfglbl_list,
        cfgresult_list, fnum=fnum,
        figtitle=lbl, unpack=False,
        max_plots=25, onclick_func=onclick_func)
    pt.iup()
Ejemplo n.º 43
0
def scalespace():
    r"""
    THIS DOES NOT SHOW A REAL SCALE SPACE PYRAMID YET. FIXME.

    Returns:
        ?: imgBGRA_warped

    CommandLine:
        python -m ibeis.scripts.specialdraw scalespace --show

    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis.scripts.specialdraw import *  # NOQA
        >>> imgBGRA_warped = scalespace()
        >>> result = ('imgBGRA_warped = %s' % (ut.repr2(imgBGRA_warped),))
        >>> print(result)
        >>> ut.quit_if_noshow()
        >>> import plottool as pt
        >>> ut.show_if_requested()
    """
    import numpy as np
    # import matplotlib.pyplot as plt
    import cv2
    import vtool as vt
    import plottool as pt
    pt.qt4ensure()

    #imgBGR = vt.imread(ut.grab_test_imgpath('lena.png'))
    imgBGR = vt.imread(ut.grab_test_imgpath('zebra.png'))
    # imgBGR = vt.imread(ut.grab_test_imgpath('carl.jpg'))

    # Convert to colored intensity image
    imgGray = cv2.cvtColor(imgBGR, cv2.COLOR_BGR2GRAY)
    imgBGR = cv2.cvtColor(imgGray, cv2.COLOR_GRAY2BGR)
    imgRaw = imgBGR

    # TODO: # stack images in pyramid # boarder?
    initial_sigma = 1.6
    num_intervals = 4

    def makepyramid_octave(imgRaw, level, num_intervals):
        # Downsample image to take sigma to a power of level
        step = (2**(level))
        img_level = imgRaw[::step, ::step]
        # Compute interval relative scales
        interval = np.array(list(range(num_intervals)))
        relative_scales = (2**((interval / num_intervals)))
        sigma_intervals = initial_sigma * relative_scales
        octave_intervals = []
        for sigma in sigma_intervals:
            sizex = int(6. * sigma + 1.) + int(1 - (int(6. * sigma + 1.) % 2))
            ksize = (sizex, sizex)
            img_blur = cv2.GaussianBlur(img_level,
                                        ksize,
                                        sigmaX=sigma,
                                        sigmaY=sigma,
                                        borderType=cv2.BORDER_REPLICATE)
            octave_intervals.append(img_blur)
        return octave_intervals

    pyramid = []
    num_octaves = 4
    for level in range(num_octaves):
        octave = makepyramid_octave(imgRaw, level, num_intervals)
        pyramid.append(octave)

    def makewarp(imgBGR):
        # hack a projection matrix using dummy homogrpahy
        imgBGRA = cv2.cvtColor(imgBGR, cv2.COLOR_BGR2BGRA)
        imgBGRA[:, :, 3] = .87 * 255  # hack alpha
        imgBGRA = vt.pad_image(imgBGRA, 2, value=[0, 0, 255, 255])
        size = np.array(vt.get_size(imgBGRA))
        pts1 = np.array([(0, 0), (0, 1), (1, 1), (1, 0)]) * size
        x_adjust = .15
        y_adjust = .5
        pts2 = np.array([(x_adjust, 0), (0, 1 - y_adjust), (1, 1 - y_adjust),
                         (1 - x_adjust, 0)]) * size
        H = cv2.findHomography(pts1, pts2)[0]

        dsize = np.array(vt.bbox_from_verts(pts2)[2:4]).astype(np.int)
        warpkw = dict(flags=cv2.INTER_LANCZOS4, borderMode=cv2.BORDER_CONSTANT)
        imgBGRA_warped = cv2.warpPerspective(imgBGRA, H, tuple(dsize),
                                             **warpkw)
        return imgBGRA_warped

    framesize = (700, 500)
    steps = np.array([.04, .03, .02, .01]) * 1.3

    numintervals = 4
    octave_ty_starts = [1.0]
    for i in range(1, 4):
        prev_ty = octave_ty_starts[-1]
        prev_base = pyramid[i - 1][0]
        next_ty = prev_ty - ((prev_base.shape[0] / framesize[1]) / 2 +
                             (numintervals - 1) * (steps[i - 1]))
        octave_ty_starts.append(next_ty)

    def temprange(stop, step, num):
        return [stop - (x * step) for x in range(num)]

    layers = []
    for i in range(0, 4):
        ty_start = octave_ty_starts[i]
        step = steps[i]
        intervals = pyramid[i]
        ty_range = temprange(ty_start, step, numintervals)
        nextpart = [
            vt.embed_in_square_image(makewarp(interval),
                                     framesize,
                                     img_origin=(.5, .5),
                                     target_origin=(.5, ty / 2))
            for ty, interval in zip(ty_range, intervals)
        ]
        layers += nextpart

    for layer in layers:
        pt.imshow(layer)

    pt.plt.grid(False)
Ejemplo n.º 44
0
def testdata_ratio_matches(fname1='easy1.png', fname2='easy2.png', **kwargs):
    r"""
    Runs simple ratio-test matching between two images.
    Technically this is not dummy data.

    Args:
        fname1 (str):
        fname2 (str):

    Returns:
        tuple : matches_testtup

    CommandLine:
        python -m vtool.tests.dummy --test-testdata_ratio_matches
        python -m vtool.tests.dummy --test-testdata_ratio_matches --help
        python -m vtool.tests.dummy --test-testdata_ratio_matches --show
        python -m vtool.tests.dummy --test-testdata_ratio_matches --show --ratio_thresh=1.1 --rotation_invariance

        python -m vtool.tests.dummy --test-testdata_ratio_matches --show --ratio_thresh=.625 --rotation_invariance --fname1 easy1.png --fname2 easy3.png
        python -m vtool.tests.dummy --test-testdata_ratio_matches --show --ratio_thresh=.625 --no-rotation_invariance --fname1 easy1.png --fname2 easy3.png

    Example:
        >>> # ENABLE_DOCTEST
        >>> from vtool.tests.dummy import *  # NOQA
        >>> import vtool as vt
        >>> # build test data
        >>> fname1 = ut.get_argval('--fname1', type_=str, default='easy1.png')
        >>> fname2 = ut.get_argval('--fname2', type_=str, default='easy2.png')
        >>> # execute function
        >>> default_dict = vt.get_extract_features_default_params()
        >>> default_dict['ratio_thresh'] = .625
        >>> kwargs = ut.argparse_dict(default_dict)
        >>> matches_testtup = testdata_ratio_matches(fname1, fname2, **kwargs)
        >>> (kpts1, kpts2, fm_RAT, fs_RAT, rchip1, rchip2) = matches_testtup
        >>> if ut.show_was_requested():
        >>>     import plottool as pt
        >>>     pt.show_chipmatch2(rchip1, rchip2, kpts1, kpts2, fm_RAT, fs_RAT, ori=True)
        >>>     num_matches = len(fm_RAT)
        >>>     score_sum = sum(fs_RAT)
        >>>     title = 'Simple matches using the Lowe\'s ratio test'
        >>>     title += '\n num_matches=%r, score_sum=%.2f' % (num_matches, score_sum)
        >>>     pt.set_figtitle(title)
        >>>     pt.show_if_requested()
    """
    import utool as ut
    import vtool as vt
    from vtool import image as gtool
    from vtool import features as feattool
    import pyflann
    # Get params
    ratio_thresh = kwargs.get('ratio_thresh', .625)
    print('ratio_thresh=%r' % (ratio_thresh, ))
    featkw = vt.get_extract_features_default_params()
    ut.updateif_haskey(featkw, kwargs)
    # Read Images
    fpath1 = ut.grab_test_imgpath(fname1)
    fpath2 = ut.grab_test_imgpath(fname2)
    # Extract Features
    kpts1, vecs1 = feattool.extract_features(fpath1, **featkw)
    kpts2, vecs2 = feattool.extract_features(fpath2, **featkw)
    rchip1 = gtool.imread(fpath1)
    rchip2 = gtool.imread(fpath2)

    # Run Algorithm
    def assign_nearest_neighbors(vecs1, vecs2, K=2):
        checks = 800
        flann_params = {'algorithm': 'kdtree', 'trees': 8}
        #pseudo_max_dist_sqrd = (np.sqrt(2) * 512) ** 2
        pseudo_max_dist_sqrd = 2 * (512**2)
        flann = vt.flann_cache(vecs1, flann_params=flann_params)
        try:
            fx2_to_fx1, _fx2_to_dist = flann.nn_index(vecs2,
                                                      num_neighbors=K,
                                                      checks=checks)
        except pyflann.FLANNException:
            print('vecs1.shape = %r' % (vecs1.shape, ))
            print('vecs2.shape = %r' % (vecs2.shape, ))
            print('vecs1.dtype = %r' % (vecs1.dtype, ))
            print('vecs2.dtype = %r' % (vecs2.dtype, ))
            raise
        fx2_to_dist = np.divide(_fx2_to_dist, pseudo_max_dist_sqrd)
        return fx2_to_fx1, fx2_to_dist

    def ratio_test(fx2_to_fx1, fx2_to_dist, ratio_thresh):
        fx2_to_ratio = np.divide(fx2_to_dist.T[0], fx2_to_dist.T[1])
        fx2_to_isvalid = fx2_to_ratio < ratio_thresh
        fx2_m = np.where(fx2_to_isvalid)[0]
        fx1_m = fx2_to_fx1.T[0].take(fx2_m)
        fs_RAT = np.subtract(1.0, fx2_to_ratio.take(fx2_m))
        fm_RAT = np.vstack((fx1_m, fx2_m)).T
        # return normalizer info as well
        fx1_m_normalizer = fx2_to_fx1.T[1].take(fx2_m)
        fm_norm_RAT = np.vstack((fx1_m_normalizer, fx2_m)).T
        return fm_RAT, fs_RAT, fm_norm_RAT

    # GET NEAREST NEIGHBORS
    fx2_to_fx1, fx2_to_dist = assign_nearest_neighbors(vecs1, vecs2, K=2)
    #fx2_m = np.arange(len(fx2_to_fx1))
    #fx1_m = fx2_to_fx1.T[0]
    #fm_ORIG = np.vstack((fx1_m, fx2_m)).T
    #fs_ORIG = fx2_to_dist.T[0]
    #fs_ORIG = 1 - np.divide(fx2_to_dist.T[0], fx2_to_dist.T[1])
    #np.ones(len(fm_ORIG))
    # APPLY RATIO TEST
    #ratio_thresh = .625
    fm_RAT, fs_RAT, fm_norm_RAT = ratio_test(fx2_to_fx1, fx2_to_dist,
                                             ratio_thresh)
    kpts1 = kpts1.astype(np.float64)
    kpts2 = kpts2.astype(np.float64)
    matches_testtup = (kpts1, kpts2, fm_RAT, fs_RAT, rchip1, rchip2)
    return matches_testtup
Ejemplo n.º 45
0
def scalespace():
    r"""
    THIS DOES NOT SHOW A REAL SCALE SPACE PYRAMID YET. FIXME.

    Returns:
        ?: imgBGRA_warped

    CommandLine:
        python -m ibeis.scripts.specialdraw scalespace --show

    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis.scripts.specialdraw import *  # NOQA
        >>> imgBGRA_warped = scalespace()
        >>> result = ('imgBGRA_warped = %s' % (ut.repr2(imgBGRA_warped),))
        >>> print(result)
        >>> ut.quit_if_noshow()
        >>> import plottool as pt
        >>> ut.show_if_requested()
    """
    import numpy as np
    # import matplotlib.pyplot as plt
    import cv2
    import vtool as vt
    import plottool as pt
    pt.qt4ensure()

    #imgBGR = vt.imread(ut.grab_test_imgpath('lena.png'))
    imgBGR = vt.imread(ut.grab_test_imgpath('zebra.png'))
    # imgBGR = vt.imread(ut.grab_test_imgpath('carl.jpg'))

    # Convert to colored intensity image
    imgGray = cv2.cvtColor(imgBGR, cv2.COLOR_BGR2GRAY)
    imgBGR = cv2.cvtColor(imgGray, cv2.COLOR_GRAY2BGR)
    imgRaw = imgBGR

    # TODO: # stack images in pyramid # boarder?
    initial_sigma = 1.6
    num_intervals = 4

    def makepyramid_octave(imgRaw, level, num_intervals):
        # Downsample image to take sigma to a power of level
        step = (2 ** (level))
        img_level = imgRaw[::step, ::step]
        # Compute interval relative scales
        interval = np.array(list(range(num_intervals)))
        relative_scales = (2 ** ((interval / num_intervals)))
        sigma_intervals = initial_sigma * relative_scales
        octave_intervals = []
        for sigma in sigma_intervals:
            sizex = int(6. * sigma + 1.) + int(1 - (int(6. * sigma + 1.) % 2))
            ksize = (sizex, sizex)
            img_blur = cv2.GaussianBlur(img_level, ksize, sigmaX=sigma,
                                        sigmaY=sigma,
                                        borderType=cv2.BORDER_REPLICATE)
            octave_intervals.append(img_blur)
        return octave_intervals

    pyramid = []
    num_octaves = 4
    for level in range(num_octaves):
        octave = makepyramid_octave(imgRaw, level, num_intervals)
        pyramid.append(octave)

    def makewarp(imgBGR):
        # hack a projection matrix using dummy homogrpahy
        imgBGRA = cv2.cvtColor(imgBGR, cv2.COLOR_BGR2BGRA)
        imgBGRA[:, :, 3] = .87 * 255  # hack alpha
        imgBGRA = vt.pad_image(imgBGRA, 2, value=[0, 0, 255, 255])
        size = np.array(vt.get_size(imgBGRA))
        pts1 = np.array([(0, 0), (0, 1), (1, 1), (1, 0)]) * size
        x_adjust = .15
        y_adjust = .5
        pts2 = np.array([(x_adjust, 0), (0, 1 - y_adjust), (1, 1 - y_adjust), (1 - x_adjust, 0)]) * size
        H = cv2.findHomography(pts1, pts2)[0]

        dsize = np.array(vt.bbox_from_verts(pts2)[2:4]).astype(np.int)
        warpkw = dict(flags=cv2.INTER_LANCZOS4, borderMode=cv2.BORDER_CONSTANT)
        imgBGRA_warped = cv2.warpPerspective(imgBGRA, H, tuple(dsize), **warpkw)
        return imgBGRA_warped

    framesize = (700, 500)
    steps = np.array([.04, .03, .02, .01]) * 1.3

    numintervals = 4
    octave_ty_starts = [1.0]
    for i in range(1, 4):
        prev_ty = octave_ty_starts[-1]
        prev_base = pyramid[i - 1][0]
        next_ty = prev_ty - ((prev_base.shape[0] / framesize[1]) / 2 + (numintervals - 1) * (steps[i - 1]))
        octave_ty_starts.append(next_ty)

    def temprange(stop, step, num):
        return [stop - (x * step) for x in  range(num)]

    layers = []
    for i in range(0, 4):
        ty_start = octave_ty_starts[i]
        step = steps[i]
        intervals = pyramid[i]
        ty_range = temprange(ty_start, step, numintervals)
        nextpart = [
            vt.embed_in_square_image(makewarp(interval), framesize, img_origin=(.5, .5),
                                     target_origin=(.5, ty / 2))
            for ty, interval in  zip(ty_range, intervals)
        ]
        layers += nextpart

    for layer in layers:
        pt.imshow(layer)

    pt.plt.grid(False)