Пример #1
0
def __testwarp(tup):
    # THIS DOES NOT CAUSE A PROBLEM FOR SOME FREAKING REASON
    import cv2
    import numpy as np
    import vtool as vt
    img = tup[0]
    M = vt.rotation_mat3x3(.1)[0:2].dot(vt.translation_mat3x3(-10, 10))
    #new = cv2.warpAffine(img, M[0:2], (500, 500), flags=cv2.INTER_LANCZOS4,
    #                     borderMode=cv2.BORDER_CONSTANT)
    # ONLY FAILS WHEN OUTPUT SIZE IS LARGE
    #dsize = (314, 314)  # (313, 313) does not cause the error
    dsize = (500, 500)  # (313, 313) does not cause the error
    dst = np.empty(dsize[::-1], dtype=img.dtype)
    #new = cv2.warpAffine(img, M[0:2], dsize)
    print('Warping?')
    new = cv2.warpAffine(img, M[0:2], dsize, dst)
    print(dst is new)
    return new
Пример #2
0
def __testwarp(tup):
    # THIS DOES NOT CAUSE A PROBLEM FOR SOME FREAKING REASON
    import cv2
    import numpy as np
    import vtool as vt
    img = tup[0]
    M = vt.rotation_mat3x3(.1)[0:2].dot(vt.translation_mat3x3(-10, 10))
    #new = cv2.warpAffine(img, M[0:2], (500, 500), flags=cv2.INTER_LANCZOS4,
    #                     borderMode=cv2.BORDER_CONSTANT)
    # ONLY FAILS WHEN OUTPUT SIZE IS LARGE
    #dsize = (314, 314)  # (313, 313) does not cause the error
    dsize = (500, 500)  # (313, 313) does not cause the error
    dst = np.empty(dsize[::-1], dtype=img.dtype)
    #new = cv2.warpAffine(img, M[0:2], dsize)
    print('Warping?')
    new = cv2.warpAffine(img, M[0:2], dsize, dst)
    print(dst is new)
    return new
Пример #3
0
def warp_patch_onto_kpts(
        kpts, patch, chipshape,
        weights=None,
        out=None,
        cov_scale_factor=.2,
        cov_agg_mode='max',
        cov_remove_shape=False,
        cov_remove_scale=False,
        cov_size_penalty_on=True,
        cov_size_penalty_power=.5,
        cov_size_penalty_frac=.1):
    r"""
    Overlays the source image onto a destination image in each keypoint location

    Args:
        kpts (ndarray[float32_t, ndim=2]):  keypoints
        patch (ndarray): patch to warp (like gaussian)
        chipshape (tuple):
        weights (ndarray): score for every keypoint

    Kwargs:
        cov_scale_factor (float):

    Returns:
        ndarray: mask

    CommandLine:
        python -m vtool.coverage_kpts --test-warp_patch_onto_kpts
        python -m vtool.coverage_kpts --test-warp_patch_onto_kpts --show
        python -m vtool.coverage_kpts --test-warp_patch_onto_kpts --show --hole
        python -m vtool.coverage_kpts --test-warp_patch_onto_kpts --show --square
        python -m vtool.coverage_kpts --test-warp_patch_onto_kpts --show --square --hole

    Example:
        >>> # ENABLE_DOCTEST
        >>> from vtool.coverage_kpts import *  # NOQA
        >>> import vtool as vt
        >>> import pyhesaff
        >>> img_fpath    = ut.grab_test_imgpath('carl.jpg')
        >>> (kpts, vecs) = pyhesaff.detect_feats(img_fpath)
        >>> kpts = kpts[::15]
        >>> chip = vt.imread(img_fpath)
        >>> chipshape = chip.shape
        >>> weights = np.ones(len(kpts))
        >>> cov_scale_factor = 1.0
        >>> srcshape = (19, 19)
        >>> radius = srcshape[0] / 2.0
        >>> sigma = 0.4 * radius
        >>> SQUARE = ut.get_argflag('--square')
        >>> HOLE = ut.get_argflag('--hole')
        >>> if SQUARE:
        >>>     patch = np.ones(srcshape)
        >>> else:
        >>>     patch = ptool.gaussian_patch(shape=srcshape, sigma=sigma) #, norm_01=False)
        >>>     patch = patch / patch.max()
        >>> if HOLE:
        >>>     patch[int(patch.shape[0] / 2), int(patch.shape[1] / 2)] = 0
        >>> # execute function
        >>> dstimg = warp_patch_onto_kpts(kpts, patch, chipshape, weights, cov_scale_factor=cov_scale_factor)
        >>> # verify results
        >>> print('dstimg stats %r' % (ut.get_stats_str(dstimg, axis=None)),)
        >>> print('patch stats %r' % (ut.get_stats_str(patch, axis=None)),)
        >>> #print(patch.sum())
        >>> assert np.all(ut.inbounds(dstimg, 0, 1, eq=True))
        >>> # show results
        >>> ut.quit_if_noshow()
        >>> import plottool as pt
        >>> mask = dstimg
        >>> show_coverage_map(chip, mask, patch, kpts)
        >>> pt.show_if_requested()
    """
    import vtool as vt
    #if len(kpts) == 0:
    #    return None
    chip_scale_h = int(np.ceil(chipshape[0] * cov_scale_factor))
    chip_scale_w = int(np.ceil(chipshape[1] * cov_scale_factor))
    if len(kpts) == 0:
        dstimg =  np.zeros((chip_scale_h, chip_scale_w))
        return dstimg
    if weights is None:
        weights = np.ones(len(kpts))
    dsize = (chip_scale_w, chip_scale_h)
    # Allocate destination image
    patch_shape = patch.shape
    # Scale keypoints into destination image
    # <HACK>
    if cov_remove_shape:
        # disregard affine information in keypoints
        # i still dont understand why we are trying this
        (patch_h, patch_w) = patch_shape
        half_width  = (patch_w / 2.0)  # - .5
        half_height = (patch_h / 2.0)  # - .5
        # Center src image
        T1 = vt.translation_mat3x3(-half_width + .5, -half_height + .5)
        # Scale src to the unit circle
        if not cov_remove_scale:
            S1 = vt.scale_mat3x3(1.0 / half_width, 1.0 / half_height)
        # Transform the source image to the keypoint ellipse
        kpts_T = np.array([vt.translation_mat3x3(x, y) for (x, y) in vt.get_xys(kpts).T])
        if not cov_remove_scale:
            kpts_S = np.array([vt.scale_mat3x3(np.sqrt(scale))
                               for scale in vt.get_scales(kpts).T])
        # Adjust for the requested scale factor
        S2 = vt.scale_mat3x3(cov_scale_factor, cov_scale_factor)
        #perspective_list = [S2.dot(A).dot(S1).dot(T1) for A in invVR_aff2Ds]
        if not cov_remove_scale:
            M_list = reduce(vt.matrix_multiply, (S2, kpts_T, kpts_S, S1, T1))
        else:
            M_list = reduce(vt.matrix_multiply, (S2, kpts_T, T1))
    # </HACK>
    else:
        M_list = ktool.get_transforms_from_patch_image_kpts(kpts, patch_shape,
                                                            cov_scale_factor)
    affmat_list = M_list[:, 0:2, :]
    weight_list = weights
    # For each keypoint warp a gaussian scaled by the feature score into the image
    warped_patch_iter = warped_patch_generator(
        patch, dsize, affmat_list, weight_list,
        cov_size_penalty_on=cov_size_penalty_on,
        cov_size_penalty_power=cov_size_penalty_power,
        cov_size_penalty_frac=cov_size_penalty_frac)
    # Either max or sum
    if cov_agg_mode == 'max':
        dstimg = vt.iter_reduce_ufunc(np.maximum, warped_patch_iter, out=out)
    elif cov_agg_mode == 'sum':
        dstimg = vt.iter_reduce_ufunc(np.add, warped_patch_iter, out=out)
        # HACK FOR SUM: DO NOT DO THIS FOR MAX
        dstimg[dstimg > 1.0] = 1.0
    else:
        raise AssertionError('Unknown cov_agg_mode=%r' % (cov_agg_mode,))
    return dstimg
Пример #4
0
def detect_opencv_keypoints():
    import cv2
    import vtool as vt
    import numpy as np  # NOQA

    #img_fpath = ut.grab_test_imgpath(ut.get_argval('--fname', default='lena.png'))
    img_fpath = ut.grab_test_imgpath(
        ut.get_argval('--fname', default='zebra.png'))
    imgBGR = vt.imread(img_fpath)
    imgGray = cv2.cvtColor(imgBGR, cv2.COLOR_BGR2GRAY)

    def from_cv2_kpts(cv2_kp):
        kp = (cv2_kp.pt[0], cv2_kp.pt[1], cv2_kp.size, 0, cv2_kp.size,
              cv2_kp.angle)
        return kp

    print('\n'.join(ut.search_module(cv2, 'create', recursive=True)))

    detect_factory = {
        #'BLOB': cv2.SimpleBlobDetector_create,
        #'HARRIS' : HarrisWrapper.create,
        #'SIFT': cv2.xfeatures2d.SIFT_create,  # really DoG
        'SURF': cv2.xfeatures2d.SURF_create,  # really harris corners
        'MSER': cv2.MSER_create,
        #'StarDetector_create',
    }

    extract_factory = {
        'SIFT': cv2.xfeatures2d.SIFT_create,
        'SURF': cv2.xfeatures2d.SURF_create,
        #'DAISY': cv2.xfeatures2d.DAISY_create,
        'FREAK': cv2.xfeatures2d.FREAK_create,
        #'LATCH': cv2.xfeatures2d.LATCH_create,
        #'LUCID': cv2.xfeatures2d.LUCID_create,
        #'ORB': cv2.ORB_create,
    }
    mask = None

    type_to_kpts = {}
    type_to_desc = {}

    key = 'BLOB'
    key = 'MSER'

    for key in detect_factory.keys():
        factory = detect_factory[key]
        extractor = factory()

        # For MSERS need to adapt shape and then convert into a keypoint repr
        if hasattr(extractor, 'detectRegions'):
            # bboxes are x,y,w,h
            regions, bboxes = extractor.detectRegions(imgGray)
            # ellipse definition from [Fitzgibbon95]
            # http://www.bmva.org/bmvc/1995/bmvc-95-050.pdf p518
            # ell = [c_x, c_y, R_x, R_y, theta]
            # (cx, cy) = conic center
            # Rx and Ry = conic radii
            # theta is the counterclockwise angle
            fitz_ellipses = [cv2.fitEllipse(mser) for mser in regions]

            # http://answers.opencv.org/question/19015/how-to-use-mser-in-python/
            #hulls = [cv2.convexHull(p.reshape(-1, 1, 2)) for p in regions]
            #hull_ells = [cv2.fitEllipse(hull[:, 0]) for hull in hulls]
            kpts_ = []
            for ell in fitz_ellipses:
                ((cx, cy), (rx, ry), degrees) = ell
                theta = np.radians(degrees)  # opencv lives in radians
                S = vt.scale_mat3x3(rx, ry)
                T = vt.translation_mat3x3(cx, cy)
                R = vt.rotation_mat3x3(theta)
                #R = np.eye(3)
                invVR = T.dot(R.dot(S))
                kpt = vt.flatten_invV_mats_to_kpts(np.array([invVR]))[0]
                kpts_.append(kpt)
            kpts_ = np.array(kpts_)

        tt = ut.tic('Computing %r keypoints' % (key, ))
        try:
            cv2_kpts = extractor.detect(imgGray, mask)
        except Exception as ex:
            ut.printex(ex,
                       'Failed to computed %r keypoints' % (key, ),
                       iswarning=True)
            pass
        else:
            ut.toc(tt)
            type_to_kpts[key] = cv2_kpts

    print(list(type_to_kpts.keys()))
    print(ut.depth_profile(list(type_to_kpts.values())))
    print('type_to_kpts = ' + ut.repr3(type_to_kpts, truncate=True))

    cv2_kpts = type_to_kpts['MSER']
    kp = cv2_kpts[0]  # NOQA
    #cv2.fitEllipse(cv2_kpts[0])
    cv2_kpts = type_to_kpts['SURF']

    for key in extract_factory.keys():
        factory = extract_factory[key]
        extractor = factory()
        tt = ut.tic('Computing %r descriptors' % (key, ))
        try:
            filtered_cv2_kpts, desc = extractor.compute(imgGray, cv2_kpts)
        except Exception as ex:
            ut.printex(ex,
                       'Failed to computed %r descriptors' % (key, ),
                       iswarning=True)
            pass
        else:
            ut.toc(tt)
            type_to_desc[key] = desc

    print(list(type_to_desc.keys()))
    print(ut.depth_profile(list(type_to_desc.values())))
    print('type_to_desc = ' + ut.repr3(type_to_desc, truncate=True))
Пример #5
0
def test_mser():
    import cv2
    import vtool as vt
    import plottool as pt
    import numpy as np
    pt.qt4ensure()

    class Keypoints(ut.NiceRepr):
        """
        Convinence class for dealing with keypoints
        """
        def __init__(self, kparr, info=None):
            self.kparr = kparr
            if info is None:
                info = {}
            self.info = info

        def add_info(self, key, val):
            self.info[key] = val

        def __nice__(self):
            return ' ' + str(len(self.kparr))

        @property
        def scale(self):
            return vt.get_scales(self.kparr)

        @property
        def eccentricity(self):
            return vt.get_kpts_eccentricity(self.kparr)

        def compress(self, flags, inplace=False):
            subarr = self.kparr.compress(flags, axis=0)
            info = {
                key: ut.compress(val, flags)
                for key, val in self.info.items()
            }
            return Keypoints(subarr, info)

    img_fpath = ut.grab_test_imgpath(
        ut.get_argval('--fname', default='zebra.png'))
    imgBGR = vt.imread(img_fpath)
    imgGray = cv2.cvtColor(imgBGR, cv2.COLOR_BGR2GRAY)
    # http://docs.opencv.org/master/d3/d28/classcv_1_1MSER.html#gsc.tab=0
    # http://stackoverflow.com/questions/17647500/exact-meaning-of-the-parameters-given-to-initialize-mser-in-opencv-2-4-x
    factory = cv2.MSER_create
    img_area = np.product(np.array(vt.get_size(imgGray)))
    _max_area = (img_area // 10)
    _delta = 8
    _min_diversity = .5

    extractor = factory(_delta=_delta,
                        _max_area=_max_area,
                        _min_diversity=_min_diversity)
    # bboxes are x,y,w,h
    regions, bboxes = extractor.detectRegions(imgGray)
    # ellipse definition from [Fitzgibbon95]
    # http://www.bmva.org/bmvc/1995/bmvc-95-050.pdf p518
    # ell = [c_x, c_y, R_x, R_y, theta]
    # (cx, cy) = conic center
    # Rx and Ry = conic radii
    # theta is the counterclockwise angle
    fitz_ellipses = [cv2.fitEllipse(mser) for mser in regions]

    # http://answers.opencv.org/question/19015/how-to-use-mser-in-python/
    #hulls = [cv2.convexHull(p.reshape(-1, 1, 2)) for p in regions]
    #hull_ells = [cv2.fitEllipse(hull[:, 0]) for hull in hulls]
    invVR_mats = []
    for ell in fitz_ellipses:
        ((cx, cy), (dx, dy), degrees) = ell
        theta = np.radians(degrees)  # opencv lives in radians
        # Convert diameter to radians
        rx = dx / 2
        ry = dy / 2
        S = vt.scale_mat3x3(rx, ry)
        T = vt.translation_mat3x3(cx, cy)
        R = vt.rotation_mat3x3(theta)
        invVR = T.dot(R.dot(S))
        invVR_mats.append(invVR)
    invVR_mats = np.array(invVR_mats)
    #_oris = vt.get_invVR_mats_oris(invVR_mats)
    kpts2_ = vt.flatten_invV_mats_to_kpts(invVR_mats)

    self = Keypoints(kpts2_)
    self.add_info('regions', regions)
    flags = (self.eccentricity < .9)
    #flags = self.scale < np.mean(self.scale)
    #flags = self.scale < np.median(self.scale)
    self = self.compress(flags)
    import plottool as pt
    #pt.interact_keypoints.ishow_keypoints(imgBGR, self.kparr, None, ell_alpha=.4, color='distinct', fnum=2)
    #import plottool as pt
    vis = imgBGR.copy()

    for region in self.info['regions']:
        vis[region.T[1], region.T[0], :] = 0

    #regions, bbox = mser.detectRegions(gray)
    #hulls = [cv2.convexHull(p.reshape(-1, 1, 2)) for p in self.info['regions']]
    #cv2.polylines(vis, hulls, 1, (0, 255, 0))
    #for region in self.info['regions']:
    #    ell = cv2.fitEllipse(region)
    #    cv2.ellipse(vis, ell, (255))
    pt.interact_keypoints.ishow_keypoints(vis,
                                          self.kparr,
                                          None,
                                          ell_alpha=.4,
                                          color='distinct',
                                          fnum=2)
    #pt.imshow(vis, fnum=2)
    pt.update()

    #extractor = extract_factory['DAISY']()

    #desc_type_to_dtype = {
    #    cv2.CV_8U: np.uint8,
    #    cv2.CV_8s: np.uint,
    #}
    #def alloc_desc(extractor):
    #    desc_type = extractor.descriptorType()
    #    desc_size = extractor.descriptorSize()
    #    dtype = desc_type_to_dtype[desc_type]
    #    shape = (len(cv2_kpts), desc_size)
    #    desc = np.empty(shape, dtype=dtype)
    #    return desc

    #ut.search_module(cv2, 'array', recursive=True)
    #ut.search_module(cv2, 'freak', recursive=True)
    #ut.search_module(cv2, 'out', recursive=True)

    #cv2_kpts = cv2_kpts[0:2]

    #for key, factory in just_desc_factory_.items():
    #    extractor = factory()
    #    desc = alloc_desc(extractor)
    #    desc = extractor.compute(imgGray, cv2_kpts)
    #    feats[key] = (desc,)
    #    #extractor.compute(imgGray, cv2_kpts, desc)
    #    pass
    #kpts = np.array(list(map(from_cv2_kpts, cv2_kpts)))

    #orb = cv2.ORB()
    #kp1, des1 = orb.detectAndCompute(imgGray, None)
    #blober = cv2.SimpleBlobDetector_create()
    #haris_kpts = cv2.cornerHarris(imgGray, 2, 3, 0.04)

    #[name for name in dir(cv2) if 'mat' in name.lower()]
    #[name for name in dir(cv2.xfeatures2d) if 'desc' in name.lower()]

    #[name for name in dir(cv2) if 'detect' in name.lower()]
    #[name for name in dir(cv2) if 'extract' in name.lower()]
    #[name for name in dir(cv2) if 'ellip' in name.lower()]

    #sift = cv2.xfeatures2d.SIFT_create()
    #cv2_kpts = sift.detect(imgGray)
    #desc = sift.compute(imgGray, cv2_kpts)[1]

    #freak = cv2.xfeatures2d.FREAK_create()
    #cv2_kpts = freak.detect(imgGray)
    #desc = freak.compute(imgGray, cv2_kpts)[1]
    pass
Пример #6
0
def warp_patch_onto_kpts(kpts,
                         patch,
                         chipshape,
                         weights=None,
                         out=None,
                         cov_scale_factor=.2,
                         cov_agg_mode='max',
                         cov_remove_shape=False,
                         cov_remove_scale=False,
                         cov_size_penalty_on=True,
                         cov_size_penalty_power=.5,
                         cov_size_penalty_frac=.1):
    r"""
    Overlays the source image onto a destination image in each keypoint location

    Args:
        kpts (ndarray[float32_t, ndim=2]):  keypoints
        patch (ndarray): patch to warp (like gaussian)
        chipshape (tuple):
        weights (ndarray): score for every keypoint

    Kwargs:
        cov_scale_factor (float):

    Returns:
        ndarray: mask

    CommandLine:
        python -m vtool.coverage_kpts --test-warp_patch_onto_kpts
        python -m vtool.coverage_kpts --test-warp_patch_onto_kpts --show
        python -m vtool.coverage_kpts --test-warp_patch_onto_kpts --show --hole
        python -m vtool.coverage_kpts --test-warp_patch_onto_kpts --show --square
        python -m vtool.coverage_kpts --test-warp_patch_onto_kpts --show --square --hole

    Example:
        >>> # ENABLE_DOCTEST
        >>> from vtool.coverage_kpts import *  # NOQA
        >>> import vtool as vt
        >>> import pyhesaff
        >>> img_fpath    = ut.grab_test_imgpath('carl.jpg')
        >>> (kpts, vecs) = pyhesaff.detect_feats(img_fpath)
        >>> kpts = kpts[::15]
        >>> chip = vt.imread(img_fpath)
        >>> chipshape = chip.shape
        >>> weights = np.ones(len(kpts))
        >>> cov_scale_factor = 1.0
        >>> srcshape = (19, 19)
        >>> radius = srcshape[0] / 2.0
        >>> sigma = 0.4 * radius
        >>> SQUARE = ut.get_argflag('--square')
        >>> HOLE = ut.get_argflag('--hole')
        >>> if SQUARE:
        >>>     patch = np.ones(srcshape)
        >>> else:
        >>>     patch = ptool.gaussian_patch(shape=srcshape, sigma=sigma) #, norm_01=False)
        >>>     patch = patch / patch.max()
        >>> if HOLE:
        >>>     patch[int(patch.shape[0] / 2), int(patch.shape[1] / 2)] = 0
        >>> # execute function
        >>> dstimg = warp_patch_onto_kpts(kpts, patch, chipshape, weights, cov_scale_factor=cov_scale_factor)
        >>> # verify results
        >>> print('dstimg stats %r' % (ut.get_stats_str(dstimg, axis=None)),)
        >>> print('patch stats %r' % (ut.get_stats_str(patch, axis=None)),)
        >>> #print(patch.sum())
        >>> assert np.all(ut.inbounds(dstimg, 0, 1, eq=True))
        >>> # show results
        >>> ut.quit_if_noshow()
        >>> import plottool as pt
        >>> mask = dstimg
        >>> show_coverage_map(chip, mask, patch, kpts)
        >>> pt.show_if_requested()
    """
    import vtool as vt
    #if len(kpts) == 0:
    #    return None
    chip_scale_h = int(np.ceil(chipshape[0] * cov_scale_factor))
    chip_scale_w = int(np.ceil(chipshape[1] * cov_scale_factor))
    if len(kpts) == 0:
        dstimg = np.zeros((chip_scale_h, chip_scale_w))
        return dstimg
    if weights is None:
        weights = np.ones(len(kpts))
    dsize = (chip_scale_w, chip_scale_h)
    # Allocate destination image
    patch_shape = patch.shape
    # Scale keypoints into destination image
    # <HACK>
    if cov_remove_shape:
        # disregard affine information in keypoints
        # i still dont understand why we are trying this
        (patch_h, patch_w) = patch_shape
        half_width = (patch_w / 2.0)  # - .5
        half_height = (patch_h / 2.0)  # - .5
        # Center src image
        T1 = vt.translation_mat3x3(-half_width + .5, -half_height + .5)
        # Scale src to the unit circle
        if not cov_remove_scale:
            S1 = vt.scale_mat3x3(1.0 / half_width, 1.0 / half_height)
        # Transform the source image to the keypoint ellipse
        kpts_T = np.array(
            [vt.translation_mat3x3(x, y) for (x, y) in vt.get_xys(kpts).T])
        if not cov_remove_scale:
            kpts_S = np.array([
                vt.scale_mat3x3(np.sqrt(scale))
                for scale in vt.get_scales(kpts).T
            ])
        # Adjust for the requested scale factor
        S2 = vt.scale_mat3x3(cov_scale_factor, cov_scale_factor)
        #perspective_list = [S2.dot(A).dot(S1).dot(T1) for A in invVR_aff2Ds]
        if not cov_remove_scale:
            M_list = reduce(vt.matrix_multiply, (S2, kpts_T, kpts_S, S1, T1))
        else:
            M_list = reduce(vt.matrix_multiply, (S2, kpts_T, T1))
    # </HACK>
    else:
        M_list = ktool.get_transforms_from_patch_image_kpts(
            kpts, patch_shape, cov_scale_factor)
    affmat_list = M_list[:, 0:2, :]
    weight_list = weights
    # For each keypoint warp a gaussian scaled by the feature score into the image
    warped_patch_iter = warped_patch_generator(
        patch,
        dsize,
        affmat_list,
        weight_list,
        cov_size_penalty_on=cov_size_penalty_on,
        cov_size_penalty_power=cov_size_penalty_power,
        cov_size_penalty_frac=cov_size_penalty_frac)
    # Either max or sum
    if cov_agg_mode == 'max':
        dstimg = vt.iter_reduce_ufunc(np.maximum, warped_patch_iter, out=out)
    elif cov_agg_mode == 'sum':
        dstimg = vt.iter_reduce_ufunc(np.add, warped_patch_iter, out=out)
        # HACK FOR SUM: DO NOT DO THIS FOR MAX
        dstimg[dstimg > 1.0] = 1.0
    else:
        raise AssertionError('Unknown cov_agg_mode=%r' % (cov_agg_mode, ))
    return dstimg
Пример #7
0
def detect_opencv_keypoints():
    import cv2
    import vtool as vt
    import numpy as np  # NOQA

    #img_fpath = ut.grab_test_imgpath(ut.get_argval('--fname', default='lena.png'))
    img_fpath = ut.grab_test_imgpath(ut.get_argval('--fname', default='zebra.png'))
    imgBGR = vt.imread(img_fpath)
    imgGray = cv2.cvtColor(imgBGR, cv2.COLOR_BGR2GRAY)

    def from_cv2_kpts(cv2_kp):
        kp = (cv2_kp.pt[0], cv2_kp.pt[1], cv2_kp.size, 0, cv2_kp.size, cv2_kp.angle)
        return kp

    print('\n'.join(ut.search_module(cv2, 'create', recursive=True)))

    detect_factory = {
        #'BLOB': cv2.SimpleBlobDetector_create,
        #'HARRIS' : HarrisWrapper.create,
        #'SIFT': cv2.xfeatures2d.SIFT_create,  # really DoG
        'SURF': cv2.xfeatures2d.SURF_create,  # really harris corners
        'MSER': cv2.MSER_create,
        #'StarDetector_create',

    }

    extract_factory = {
        'SIFT': cv2.xfeatures2d.SIFT_create,
        'SURF': cv2.xfeatures2d.SURF_create,
        #'DAISY': cv2.xfeatures2d.DAISY_create,
        'FREAK': cv2.xfeatures2d.FREAK_create,
        #'LATCH': cv2.xfeatures2d.LATCH_create,
        #'LUCID': cv2.xfeatures2d.LUCID_create,
        #'ORB': cv2.ORB_create,
    }
    mask = None

    type_to_kpts = {}
    type_to_desc = {}

    key = 'BLOB'
    key = 'MSER'

    for key in detect_factory.keys():
        factory = detect_factory[key]
        extractor = factory()

        # For MSERS need to adapt shape and then convert into a keypoint repr
        if hasattr(extractor, 'detectRegions'):
            # bboxes are x,y,w,h
            regions, bboxes = extractor.detectRegions(imgGray)
            # ellipse definition from [Fitzgibbon95]
            # http://www.bmva.org/bmvc/1995/bmvc-95-050.pdf p518
            # ell = [c_x, c_y, R_x, R_y, theta]
            # (cx, cy) = conic center
            # Rx and Ry = conic radii
            # theta is the counterclockwise angle
            fitz_ellipses = [cv2.fitEllipse(mser) for mser in regions]

            # http://answers.opencv.org/question/19015/how-to-use-mser-in-python/
            #hulls = [cv2.convexHull(p.reshape(-1, 1, 2)) for p in regions]
            #hull_ells = [cv2.fitEllipse(hull[:, 0]) for hull in hulls]
            kpts_ = []
            for ell in fitz_ellipses:
                ((cx, cy), (rx, ry), degrees) = ell
                theta = np.radians(degrees)  # opencv lives in radians
                S = vt.scale_mat3x3(rx, ry)
                T = vt.translation_mat3x3(cx, cy)
                R = vt.rotation_mat3x3(theta)
                #R = np.eye(3)
                invVR = T.dot(R.dot(S))
                kpt = vt.flatten_invV_mats_to_kpts(np.array([invVR]))[0]
                kpts_.append(kpt)
            kpts_ = np.array(kpts_)

        tt = ut.tic('Computing %r keypoints' % (key,))
        try:
            cv2_kpts = extractor.detect(imgGray, mask)
        except Exception as ex:
            ut.printex(ex, 'Failed to computed %r keypoints' % (key,), iswarning=True)
            pass
        else:
            ut.toc(tt)
            type_to_kpts[key] = cv2_kpts

    print(list(type_to_kpts.keys()))
    print(ut.depth_profile(list(type_to_kpts.values())))
    print('type_to_kpts = ' + ut.repr3(type_to_kpts, truncate=True))

    cv2_kpts = type_to_kpts['MSER']
    kp = cv2_kpts[0]  # NOQA
    #cv2.fitEllipse(cv2_kpts[0])
    cv2_kpts = type_to_kpts['SURF']

    for key in extract_factory.keys():
        factory = extract_factory[key]
        extractor = factory()
        tt = ut.tic('Computing %r descriptors' % (key,))
        try:
            filtered_cv2_kpts, desc = extractor.compute(imgGray, cv2_kpts)
        except Exception as ex:
            ut.printex(ex, 'Failed to computed %r descriptors' % (key,), iswarning=True)
            pass
        else:
            ut.toc(tt)
            type_to_desc[key] = desc

    print(list(type_to_desc.keys()))
    print(ut.depth_profile(list(type_to_desc.values())))
    print('type_to_desc = ' + ut.repr3(type_to_desc, truncate=True))
Пример #8
0
def test_mser():
    import cv2
    import vtool as vt
    import plottool as pt
    import numpy as np
    pt.qt4ensure()
    class Keypoints(ut.NiceRepr):
        """
        Convinence class for dealing with keypoints
        """
        def __init__(self, kparr, info=None):
            self.kparr = kparr
            if info is None:
                info = {}
            self.info = info

        def add_info(self, key, val):
            self.info[key] = val

        def __nice__(self):
            return ' ' + str(len(self.kparr))

        @property
        def scale(self):
            return vt.get_scales(self.kparr)

        @property
        def eccentricity(self):
            return vt.get_kpts_eccentricity(self.kparr)

        def compress(self, flags, inplace=False):
            subarr = self.kparr.compress(flags, axis=0)
            info = {key: ut.compress(val, flags) for key, val in self.info.items()}
            return Keypoints(subarr, info)

    img_fpath = ut.grab_test_imgpath(ut.get_argval('--fname', default='zebra.png'))
    imgBGR = vt.imread(img_fpath)
    imgGray = cv2.cvtColor(imgBGR, cv2.COLOR_BGR2GRAY)
    # http://docs.opencv.org/master/d3/d28/classcv_1_1MSER.html#gsc.tab=0
    # http://stackoverflow.com/questions/17647500/exact-meaning-of-the-parameters-given-to-initialize-mser-in-opencv-2-4-x
    factory = cv2.MSER_create
    img_area = np.product(np.array(vt.get_size(imgGray)))
    _max_area = (img_area // 10)
    _delta = 8
    _min_diversity = .5

    extractor = factory(_delta=_delta, _max_area=_max_area, _min_diversity=_min_diversity)
    # bboxes are x,y,w,h
    regions, bboxes = extractor.detectRegions(imgGray)
    # ellipse definition from [Fitzgibbon95]
    # http://www.bmva.org/bmvc/1995/bmvc-95-050.pdf p518
    # ell = [c_x, c_y, R_x, R_y, theta]
    # (cx, cy) = conic center
    # Rx and Ry = conic radii
    # theta is the counterclockwise angle
    fitz_ellipses = [cv2.fitEllipse(mser) for mser in regions]

    # http://answers.opencv.org/question/19015/how-to-use-mser-in-python/
    #hulls = [cv2.convexHull(p.reshape(-1, 1, 2)) for p in regions]
    #hull_ells = [cv2.fitEllipse(hull[:, 0]) for hull in hulls]
    invVR_mats = []
    for ell in fitz_ellipses:
        ((cx, cy), (dx, dy), degrees) = ell
        theta = np.radians(degrees)  # opencv lives in radians
        # Convert diameter to radians
        rx = dx / 2
        ry = dy / 2
        S = vt.scale_mat3x3(rx, ry)
        T = vt.translation_mat3x3(cx, cy)
        R = vt.rotation_mat3x3(theta)
        invVR = T.dot(R.dot(S))
        invVR_mats.append(invVR)
    invVR_mats = np.array(invVR_mats)
    #_oris = vt.get_invVR_mats_oris(invVR_mats)
    kpts2_ = vt.flatten_invV_mats_to_kpts(invVR_mats)

    self = Keypoints(kpts2_)
    self.add_info('regions', regions)
    flags = (self.eccentricity < .9)
    #flags = self.scale < np.mean(self.scale)
    #flags = self.scale < np.median(self.scale)
    self = self.compress(flags)
    import plottool as pt
    #pt.interact_keypoints.ishow_keypoints(imgBGR, self.kparr, None, ell_alpha=.4, color='distinct', fnum=2)
    #import plottool as pt
    vis = imgBGR.copy()

    for region in self.info['regions']:
        vis[region.T[1], region.T[0], :] = 0

    #regions, bbox = mser.detectRegions(gray)
    #hulls = [cv2.convexHull(p.reshape(-1, 1, 2)) for p in self.info['regions']]
    #cv2.polylines(vis, hulls, 1, (0, 255, 0))
    #for region in self.info['regions']:
    #    ell = cv2.fitEllipse(region)
    #    cv2.ellipse(vis, ell, (255))
    pt.interact_keypoints.ishow_keypoints(vis, self.kparr, None, ell_alpha=.4, color='distinct', fnum=2)
    #pt.imshow(vis, fnum=2)
    pt.update()

    #extractor = extract_factory['DAISY']()

    #desc_type_to_dtype = {
    #    cv2.CV_8U: np.uint8,
    #    cv2.CV_8s: np.uint,
    #}
    #def alloc_desc(extractor):
    #    desc_type = extractor.descriptorType()
    #    desc_size = extractor.descriptorSize()
    #    dtype = desc_type_to_dtype[desc_type]
    #    shape = (len(cv2_kpts), desc_size)
    #    desc = np.empty(shape, dtype=dtype)
    #    return desc

    #ut.search_module(cv2, 'array', recursive=True)
    #ut.search_module(cv2, 'freak', recursive=True)
    #ut.search_module(cv2, 'out', recursive=True)

    #cv2_kpts = cv2_kpts[0:2]

    #for key, factory in just_desc_factory_.items():
    #    extractor = factory()
    #    desc = alloc_desc(extractor)
    #    desc = extractor.compute(imgGray, cv2_kpts)
    #    feats[key] = (desc,)
    #    #extractor.compute(imgGray, cv2_kpts, desc)
    #    pass
    #kpts = np.array(list(map(from_cv2_kpts, cv2_kpts)))

    #orb = cv2.ORB()
    #kp1, des1 = orb.detectAndCompute(imgGray, None)
    #blober = cv2.SimpleBlobDetector_create()
    #haris_kpts = cv2.cornerHarris(imgGray, 2, 3, 0.04)

    #[name for name in dir(cv2) if 'mat' in name.lower()]
    #[name for name in dir(cv2.xfeatures2d) if 'desc' in name.lower()]

    #[name for name in dir(cv2) if 'detect' in name.lower()]
    #[name for name in dir(cv2) if 'extract' in name.lower()]
    #[name for name in dir(cv2) if 'ellip' in name.lower()]

    #sift = cv2.xfeatures2d.SIFT_create()
    #cv2_kpts = sift.detect(imgGray)
    #desc = sift.compute(imgGray, cv2_kpts)[1]

    #freak = cv2.xfeatures2d.FREAK_create()
    #cv2_kpts = freak.detect(imgGray)
    #desc = freak.compute(imgGray, cv2_kpts)[1]
    pass