Esempio n. 1
0
    def feature_detector_create(self, detector_name, adaptation):

        if int(self.OPENCV_MAJOR) < 3:
            name = adaptation + detector_name
            detector = cv2.FeatureDetector_create(name)
        else:
            if detector_name == DetectorType.ORB:
                detector = cv2.ORB(adaptation)
            elif detector_name == DetectorType.FAST:
                # noinspection PyUnresolvedReferences
                detector = cv2.FastFeatureDetector_create()
            elif detector_name == DetectorType.STAR:
                # noinspection PyUnresolvedReferences
                detector = cv2.xfeatures2d.StarDetector_create()
            elif detector_name == DetectorType.MSER:
                # noinspection PyUnresolvedReferences
                detector = cv2.MSER_create()
            elif detector_name == DetectorType.GFTT:
                # noinspection PyUnresolvedReferences
                detector = cv2.GFTTDetector_create()
            elif detector_name == DetectorType.HARRIS:
                # noinspection PyUnresolvedReferences
                detector = cv2.xfeatures2d.HarrisLaplaceFeatureDetector_create()
            elif detector_name == DetectorType.BLOB:
                # noinspection PyUnresolvedReferences
                detector = cv2.SimpleBlobDetector_create()
            else:  # detector.detector() == DetectorType.BRISK:
                detector = cv2.BRISK(adaptation)

        return detector
Esempio n. 2
0
def init_feature(name):
    chunks = name.split('-')
    if chunks[0] == 'sift':
        detector = cv2.xfeatures2d.SIFT()
        norm = cv2.NORM_L2
    elif chunks[0] == 'surf':
        detector = cv2.xfeatures2d.SURF(800)
        norm = cv2.NORM_L2
    elif chunks[0] == 'orb':
        detector = cv2.ORB(400)
        norm = cv2.NORM_HAMMING
    elif chunks[0] == 'akaze':
        detector = cv2.AKAZE()
        norm = cv2.NORM_HAMMING
    elif chunks[0] == 'brisk':
        detector = cv2.BRISK()
        norm = cv2.NORM_HAMMING
    else:
        return None, None
    if 'flann' in chunks:
        if norm == cv2.NORM_L2:
            flann_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
        else:
            flann_params = dict(
                algorithm=FLANN_INDEX_LSH,
                table_number=6,  # 12
                key_size=12,  # 20
                multi_probe_level=1)  #2
        matcher = cv2.FlannBasedMatcher(
            flann_params, {})  # bug : need to pass empty dict (#1329)
    else:
        matcher = cv2.BFMatcher(norm)
    return detector, matcher
Esempio n. 3
0
def local_feature_detection(img, detetype, kmax=500):
    """ Sparsely detects local features in an image.

    OpenCV implementation of various detectors.

    :param img: input image;
    :param detetype: type of detector {SURF, SIFT, ORB, BRISK}.
    :param kmax: maximum number of keypoints to return. The kmax keypoints with largest response are returned;

    :return: detected keypoins; detection time;
    """

    try:
        if detetype == "SURF":
            surf = cv2.SURF()
            st_t = time.time()
            keypoints = surf.detect(img)
            ed_t = time.time()

            if kmax != -1:
                keypoints = keypoints[0:kmax]

        elif detetype == "SIFT":
            sift = cv2.SIFT(nfeatures=kmax)
            st_t = time.time()
            keypoints = sift.detect(img)
            ed_t = time.time()

        elif detetype == "ORB":
            orb = cv2.ORB(nfeatures=kmax)
            st_t = time.time()
            keypoints = orb.detect(img)
            ed_t = time.time()

        elif detetype == "BRISK":
            brisk = cv2.BRISK()
            st_t = time.time()
            keypoints = brisk.detect(img)
            ed_t = time.time()

            keypoints = keypoints[0:kmax]

        elif detetype == "Dense":
            keypoints = detect_dense_keypoints(img)

        else:
            surf = cv2.SURF()
            st_t = time.time()
            keypoints = surf.detect(img)
            ed_t = time.time()

        det_t = ed_t - st_t
        return keypoints, det_t

    except Exception as e:
        sys.stderr.write("Failure in detecting features\n")
        e_type, e_val, e_tb = sys.exc_info()
        traceback.print_exception(e_type, e_val, e_tb)
        return [], -1
Esempio n. 4
0
 def features_detector(self, feat_type = SIFT, params = None):
     
     assert feat_type == self.SIFT or feat_type == self.SURF or \
         feat_type == self.ORB or feat_type == self.BRISK
     
     if feat_type == self.SIFT:
         
         if params is None:
             nfeatures = 0
             nOctaveLayers = 3
             contrastThreshold = 0.04
             edgeThreshold=10
             sigma=1.6
         else:
             nfeatures = params["nfeatures"]
             nOctaveLayers = params["nOctaveLayers"]
             contrastThreshold = params["contrastThreshold"]
             edgeThreshold = params["edgeThreshold"]
             sigma = params["sigma"]
         
         detector = cv2.SIFT(nfeatures=0, 
                             nOctaveLayers=3, contrastThreshold=0.04, 
                             edgeThreshold=10, sigma=1.6)
         norm = cv2.NORM_L2
     elif feat_type == self.SURF:
         
         if params is None:
             hessianThreshold = 3000
             nOctaves = 1
             nOctaveLayers = 1
             upright = True
             extended = False
         else:
             hessianThreshold = params["hessianThreshold"]
             nOctaves = params["nOctaves"]
             nOctaveLayers = params["nOctaveLayers"]
             upright = params["upright"]
             extended = params["extended"]
             
         detector = cv2.SURF(hessianThreshold = hessianThreshold, 
                             nOctaves = nOctaves, 
                             nOctaveLayers = nOctaveLayers, 
                             upright = upright, 
                             extended = extended)
         norm = cv2.NORM_L2
         
     elif feat_type == self.ORB:
         detector = cv2.ORB(nfeatures=8000, scaleFactor=1.1, nlevels=8, edgeThreshold=10, firstLevel=0, WTA_K=2, patchSize=10)
         norm = cv2.NORM_HAMMING
     elif feat_type == self.BRISK:
         detector = cv2.BRISK()
         norm = cv2.NORM_HAMMING
    
     return detector, norm
Esempio n. 5
0
def detectAndCompute(im, mask=None, featureType="SIFT"):

    sift = cv2.SIFT()
    surf = cv2.SURF()
    brisk = cv2.BRISK()
    orb = cv2.ORB()

    agent = {"SIFT": sift, "SURF": surf, "BRISK": brisk, "ORB": orb}

    computer = agent[featureType]
    return computer.detectAndCompute(im, mask)
Esempio n. 6
0
def _features(cv_image):
    """ Takes a cvMat grayscale image and return the corresponding
        brisk descriptors. Do not change the brisk parameters """

    brisk = cv2.BRISK(BRISK_THRESHOLD, BRISK_OCTAVES, BRISK_SCALE)

    try:
        _, descriptors = brisk.detectAndCompute(cv_image, None)
    except:
        return []

    return descriptors
Esempio n. 7
0
def flann(A_path, B_path, limit=0):
    """Find most similar image.
    For every image in A_path find the the most similar image in B_path."""
    FLANN_INDEX_LSH = 6

    image_pairing = {}

    i = 0
    for imgA_name in sorted(os.listdir(A_path)):
        imgA_path = os.path.join(A_path, imgA_name)
        print "Searching for {0}".format(imgA_name)
        imgA = cv2.imread(imgA_path, 0)
        brisk = cv2.BRISK()
        kpA, desA = brisk.detectAndCompute(imgA, None)
        index_params = dict(
            algorithm=FLANN_INDEX_LSH,
            table_number=6,  # 12
            key_size=12,  # 20
            multi_probe_level=1)  #2
        search_params = dict(checks=50)  # or pass empty dictionary
        flann = cv2.FlannBasedMatcher(index_params, search_params)
        imgB_names = []
        for imgB_name in sorted(os.listdir(B_path)):
            imgB_names.append(imgB_name)
            imgB_path = os.path.join(B_path, imgB_name)
            imgB = cv2.imread(imgB_path, 0)
            # print "Detecting and computing {0}".format(imgB_name)
            kpB, desB = brisk.detectAndCompute(imgB, None)
            # print "Adding..."
            flann.add([desB])

        print len(flann.getTrainDescriptors()
                  )  #verify that it actually took the descriptors in

        # print "Training..."
        flann.train()

        # print "Matching..."
        matchidxs = []
        matches = flann.knnMatch(desA, k=2)
        for match in matches:
            for matchpart in match:
                matchidxs.append(matchpart.imgIdx)
        topimgidx = max(matchidxs, key=matchidxs.count)
        # print topimgidx
        # print imgB_names[topimgidx]
        image_pairing[imgA_name] = imgB_names[topimgidx]

        i += 1
        if limit > 0 and i > limit:
            break

    return image_pairing
Esempio n. 8
0
def register_frames(row):
    import tempfile
    import viderator
    import cv2
    import distpy
    import time
    hamming = distpy.Hamming()
    thrift = hadoopy_hbase.connect()
    fp = tempfile.NamedTemporaryFile()

    def get_column(column):
        return thrift.getRowWithColumns('videos', row,
                                        [column])[0].columns[column].value

    video_chunks = int(get_column('meta:video_chunks'))
    for x in range(video_chunks):
        fp.write(get_column('data:video-%d' % x))
    fp.flush()
    brisk = cv2.BRISK(40, 4, 1.)  # TODO: Get from model
    mask = None
    prev_descs = None
    prev_points = None
    match_thresh = 75
    min_inliers = 10
    frames_matched = []
    for frame_num, frame_time, frame in viderator.frame_iter(fp.name,
                                                             frame_skip=3):
        matched = False
        st = time.time()
        if mask is None:
            mask = np.ones((frame.shape[0], frame.shape[1]))
        points, descs = brisk.detectAndCompute(frame, mask)
        points = np.array([x.pt for x in points])
        #print((frame_num, points, descs))
        if prev_descs is not None:
            matches = (hamming.cdist(prev_descs, descs) <
                       match_thresh).nonzero()
            print(matches)
            a = np.array(prev_points[matches[0], :], dtype=np.float32)
            b = np.array(points[matches[1], :], dtype=np.float32)
            #print(a)
            #print(b)
            if a.shape[0] >= min_inliers and b.shape[0] >= min_inliers:
                h, i = cv2.findHomography(a, b, cv2.RANSAC)
                if np.sum(i) >= min_inliers:
                    matched = True
                print((h, i))
        frames_matched.append(matched)
        prev_descs = descs
        prev_points = points
        print(time.time() - st)
    print(matched)
Esempio n. 9
0
    def __init__(self):
        self.map_img_ = 0
        self.laser_img_ = 0

        # self.detector_ = cv2.ORB()
        self.detector_ = cv2.BRISK()
        # self.detector_ = cv2.Canny()

        self.bridge_ = CvBridge()

        self.scan_sub_ = rospy.Subscriber('scan_image', Image, self.laser_cb)

        self.read_map()
Esempio n. 10
0
    def __init__(self):
        # Descriptor
        orb = cv2.ORB()
        brisk = cv2.BRISK()

        # Matcher
        FLANN_INDEX_KDTREE = 0
        index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
        search_params = dict(checks=50)
        flann = cv2.FlannBasedMatcher(index_params, search_params)
        bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)

        # choose descriptor and matcher

        self.descriptor = orb
        self.matcher = flann
Esempio n. 11
0
def get_BRISK_descriptions(image, keyPoints, **kwargs):
    """
    Computes BRISK descriptions for given keypoints.
    input:  image (that was returned with the keypoints!)
            keyPoints - detected keypoints
            **kwargs = detection arguments
    output: list of descriptions for given keypoints
    """
    brisk = cv2.BRISK()
    # gets keypoints and descriptions
    kp, descriptions = brisk.compute(image, keyPoints)

    if descriptions is None:
        descriptions = np.array([])

    return descriptions
Esempio n. 12
0
def get_BRISK_keypoints(image, n=500):
    """
    Detects keypoints using BRISK feature detector.
    input:  image
            n - max no of returned keypoints (default 500)
    output: list of BRISK keypoints
    """
    # Initiate BRISK detector
    brisk = cv2.BRISK()

    # find keypoints
    keyPoints = brisk.detect(image, None)

    # sort by response and return best n keypoints
    keyPoints = sort_keypoints_by_response_and_get_n_best(keyPoints, n)

    return image, keyPoints, {}
Esempio n. 13
0
    def calculate(self, resource):
        """ Append descriptors to BRISK h5 table """

        (image_url, mask_url, gobject_url) = resource
        if image_url is '':
            raise FeatureExtractionError(resource, 400,
                                         'Image resource is required')
        if mask_url is not '':
            raise FeatureExtractionError(resource, 400,
                                         'Mask resource is not accepted')

        #image_url = BQServer().prepare_url(image_url, remap='gray')
        im = image2numpy(image_url, remap='gray')
        im = np.uint8(im)

        if gobject_url is '':
            fs = cv2.BRISK().detect(im)  # keypoints

        if gobject_url:
            (x, y, size) = gobject2keypoint(gobject_url)
            fs = [cv2.KeyPoint(x, y, size)]  # keypoints

        # extract the feature keypoints and descriptor
        descriptor_extractor = cv2.DescriptorExtractor_create("BRISK")
        (kpts, descriptors) = descriptor_extractor.compute(im, fs)

        if descriptors == None:  #taking Nonetype into account
            raise FeatureExtractionError(resource, 500,
                                         'No feature was calculated')

        x = []
        y = []
        response = []
        size = []
        angle = []
        octave = []

        for k in kpts[:500]:
            x.append(k.pt[0])
            y.append(k.pt[1])
            response.append(k.response)
            size.append(k.size)
            angle.append(k.angle)
            octave.append(k.octave)

        return (descriptors, x, y, response, size, angle, octave)
Esempio n. 14
0
def _features(cv_image, image_info=None):
    import cv2  # local import to prevent global libdc1394 errors
    """ Takes a cvMat grayscale image and return the corresponding
        brisk descriptors. Do not change the brisk parameters """
    brisk = cv2.BRISK(brisk_constants.BRISK_THRESHOLD,
                      brisk_constants.BRISK_OCTAVES,
                      brisk_constants.BRISK_SCALE)

    try:
        keypoints, descriptors = brisk.detectAndCompute(cv_image, None)
    except:
        return []
    """ Wrap this in a try/except to catch None and empty numpy arrays. It's very hard
        to check for both conditions when the array comes from C, this is the safest way """
    try:
        descriptors_n = len(descriptors)
    except:
        return []

    return descriptors
def init_feature(feature_type):
    feature_type = feature_type.lower()
    if feature_type == 'sift':
        feature = cv2.SIFT()
        norm = cv2.NORM_L2
    elif feature_type == 'surf':
        ##        feature = cv2.SURF(hessianThreshold = 800)
        feature = cv2.SURF()
        norm = cv2.NORM_L2
    elif feature_type == 'orb':
        feature = cv2.ORB(nfeatures=600)  # ~= #brisk
        ##        feature = cv2.ORB()
        norm = cv2.NORM_HAMMING
    elif feature_type == 'brisk':
        feature = cv2.BRISK()
        norm = cv2.NORM_HAMMING
    else:
        feature = None
        norm = None
    return feature, norm
Esempio n. 16
0
def get_features(file_name):

    # alternative detectors, descriptors, matchers, parameters ==> different results

    # Object Features
    # obj_original = cv.imread(path.join(file_name),cv.CV2_LOAD_IMAGE_COLOR)
    obj_original = cv.imread(path.join(file_name), cv.IMREAD_COLOR)

    #error checking
    if obj_original is None:
        print 'Couldn\'t find the object image with the provided path.'
        sys.exit()

    # basic feature detection works in grayscale
    obj = cv.cvtColor(obj_original, cv.COLOR_BGR2GRAY)

    #for cv version 3.1.0
    if cv.__version__ == '3.1.0':
        brisk = cv.BRISK_create()
        (obj_keypoints, obj_descriptors) = brisk.detectAndCompute(obj, None)

        #dump numpy array to file
        obj_descriptors.dump(resultname)

        return obj_descriptors

    #for cv versions 2.x.x
    detector = cv.BRISK(thresh=10, octaves=1)
    extractor = cv.DescriptorExtractor_create(
        'BRISK')  # non-patented. Thank you!

    matcher = cv.BFMatcher(cv.NORM_L2SQR)

    # keypoints are "interesting" points in an image:
    obj_keypoints = detector.detect(obj, None)

    # this lines up each keypoint with a mathematical description
    obj_keypoints, obj_descriptors = extractor.compute(obj, obj_keypoints)

    return obj_descriptors
Esempio n. 17
0
                      qualityLevel=0.3,
                      minDistance=7,
                      blockSize=7)
# Parameters for lucas kanade optical flow
lk_params = dict(winSize=(15, 15),
                 maxLevel=2,
                 criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10,
                           0.03))
# Create some random colors
color = np.random.randint(0, 255, (3000, 3))
# Take first frame and find corners in it
ret, old_frame = cap.read()
old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)
obj_mask = old_gray

detector = cv2.BRISK(thresh=15, octaves=0, patternScale=15.0)
kp = detector.detect(old_gray, obj_mask)
extractor = cv2.DescriptorExtractor_create('BRISK')
kp, des = extractor.compute(old_gray, kp)
print(len(kp))
p0 = np.zeros((3000, 1, 2))
for k, i in zip(kp, range(3000)):
    p0[i][0][0] = k.pt[0]
    p0[i][0][1] = k.pt[1]
print(np.shape(p0))
print(p0)
p0 = p0.astype(np.float32)
mask = np.zeros_like(old_frame)
var = 1
print(type(p0[0][0][0]))
while (1):
import numpy as np
import cv2
from matplotlib import pyplot as plt
import sys
import time

# =====
# Detector, Descriptor and Matcher
# =====
#detector = cv2.ORB_create()
detector = cv2.BRISK(thresh=10, octaves=1)
descriptor = cv2.BRISK_create()
matcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)

MIN_MATCH_COUNT = 3

# =====
# Reading and Preparing the images
# =====
# Reference Object to be Searched
img1 = cv2.imread('Y.png')
# Image to be Searched
img2 = cv2.imread('test2_roi.png')

# Find the Edges with Canny Edges
canny1 = cv2.Canny(img1, 100, 500)
canny2 = cv2.Canny(img2, 100, 500)
cv2.imshow("Frame", canny1)
print(canny1.dtype)
time.sleep(5)
Esempio n. 19
0
    def _create_detector(self):
        detector = cv2.BRISK(thresh=self._thresh,
                             octaves=self._octaves,
                             patternScale=self._pattern_scale)

        return detector
Esempio n. 20
0
    lap_hough_img, major_lines, minor_lines = find_main_lines(img, lap_img, angle_rad_major, angle_rad_minor)

    jp_img_2 = find_jp(major_lines, minor_lines, corners, img.copy())
    # Probabilistic Hough
    minLineLength = 20
    maxLineGap = 1
    hough_p_pixel_acc = 1
    hough_p_angle_acc = np.pi/360

    lines = cv2.HoughLinesP(canny_img, hough_p_pixel_acc, hough_p_angle_acc, 10, maxLineGap, minLineLength)
    hough_p_img = img.copy()
    if(lines is not None):
        for x1,y1,x2,y2 in lines[0]:
            cv2.line(hough_p_img,(x1,y1),(x2,y2),(0,0,0),1)
            
    brisk = cv2.BRISK()
    kp_brisk = brisk.detect(gray)
    brisk_img = img.copy()

    brisk_img = cv2.drawKeypoints(img, kp_brisk, brisk_img)


  
    plt.subplot(3, 4, 1)
    plt.imshow(hsv_img,cmap='gray')
    plt.title("HSV Masked")
    plt.subplot(3, 4, 2)
    plt.imshow(rect_img,cmap='gray')
    plt.title("Bounding Box")
    plt.subplot(3, 4, 3)
    plt.imshow(canny_img,cmap='gray')
Esempio n. 21
0
def local_feature_description(img, keypoints, desctype):
    """ Describes the given keypoints of an image.

    OpenCV implementation of various descriptors.

    :param img: input image;
    :param keypoints: computed keypoints;
    :param desctype: type of descriptor {SURF, SIFT, ORB, BRISK, RootSIFT}.

    :return: computed features, description time.
    """

    try:
        if desctype == "SURF":
            surf = cv2.SURF()
            st_t = time.time()
            __, features = surf.compute(img, keypoints)
            ed_t = time.time()

        elif desctype == "SIFT":
            sift = cv2.SIFT()
            st_t = time.time()
            __, features = sift.compute(img, keypoints)
            ed_t = time.time()

        elif desctype == "ORB":
            orb = cv2.ORB()
            st_t = time.time()
            __, features = orb.compute(img, keypoints)
            ed_t = time.time()

        elif desctype == "BRISK":
            brisk = cv2.BRISK()
            st_t = time.time()
            __, features = brisk.compute(img, keypoints)
            ed_t = time.time()

        elif desctype == "RootSIFT":
            eps = 0.00000001
            sift = cv2.SIFT()
            st_t = time.time()
            __, features = sift.compute(img, keypoints)

            features /= (np.sum(features, axis=1, keepdims=True) + eps)
            features = np.sqrt(features)

            ed_t = time.time()

        else:
            surf = cv2.SURF()
            st_t = time.time()
            __, features = surf.compute(img, keypoints, descriptors=features)
            ed_t = time.time()

        dsc_t = ed_t - st_t
        return features, dsc_t

    except:
        sys.stderr.write("Failure in detecting features\n")
        e_type, e_val, e_tb = sys.exc_info()
        traceback.print_exception(e_type, e_val, e_tb)
        return [], -1
Esempio n. 22
0
	def initialise(self, im_gray0, tl, br):

		# Initialise detector, descriptor, matcher
		self.detector = cv2.BRISK()
		self.descriptor = cv2.BRISK()
		self.matcher = cv2.BFMatcher(cv2.NORM_HAMMING)

		# Get initial keypoints in whole image
		keypoints_cv = self.detector.detect(im_gray0)

		# Remember keypoints that are in the rectangle as selected keypoints
		ind = util.in_rect(keypoints_cv, tl, br)
		selected_keypoints_cv = list(itertools.compress(keypoints_cv, ind))
		selected_keypoints_cv, self.selected_features = self.descriptor.compute(im_gray0, selected_keypoints_cv)
		selected_keypoints = util.keypoints_cv_to_np(selected_keypoints_cv)
		num_selected_keypoints = len(selected_keypoints_cv)

		if num_selected_keypoints == 0:
			raise Exception('No keypoints found in selection')

		# Remember keypoints that are not in the rectangle as background keypoints
		background_keypoints_cv = list(itertools.compress(keypoints_cv, ~ind))
		background_keypoints_cv, background_features = self.descriptor.compute(im_gray0, background_keypoints_cv)
		_ = util.keypoints_cv_to_np(background_keypoints_cv)

		# Assign each keypoint a class starting from 1, background is 0
		self.selected_classes = array(range(num_selected_keypoints)) + 1
		background_classes = zeros(len(background_keypoints_cv))

		# Stack background features and selected features into database
		self.features_database = vstack((background_features, self.selected_features))

		# Same for classes
		self.database_classes = hstack((background_classes, self.selected_classes))

		# Get all distances between selected keypoints in squareform
		pdist = scipy.spatial.distance.pdist(selected_keypoints)
		self.squareform = scipy.spatial.distance.squareform(pdist)

		# Get all angles between selected keypoints
		angles = np.empty((num_selected_keypoints, num_selected_keypoints))
		for k1, i1 in zip(selected_keypoints, range(num_selected_keypoints)):
			for k2, i2 in zip(selected_keypoints, range(num_selected_keypoints)):

				# Compute vector from k1 to k2
				v = k2 - k1

				# Compute angle of this vector with respect to x axis
				angle = math.atan2(v[1], v[0])

				# Store angle
				angles[i1, i2] = angle

		self.angles = angles

		# Find the center of selected keypoints
		center = np.mean(selected_keypoints, axis=0)

		# Remember the rectangle coordinates relative to the center
		self.center_to_tl = np.array(tl) - center
		self.center_to_tr = np.array([br[0], tl[1]]) - center
		self.center_to_br = np.array(br) - center
		self.center_to_bl = np.array([tl[0], br[1]]) - center

		# Calculate springs of each keypoint
		self.springs = selected_keypoints - center

		# Set start image for tracking
		self.im_prev = im_gray0

		# Make keypoints 'active' keypoints
		self.active_keypoints = np.copy(selected_keypoints)

		# Attach class information to active keypoints
		self.active_keypoints = hstack((selected_keypoints, self.selected_classes[:, None]))

		# Remember number of initial keypoints
		self.num_initial_keypoints = len(selected_keypoints_cv)
Esempio n. 23
0
def get_feature_detector_descriptor_extractor(
        feature_detector_name=str(),
        descriptor_extractor_name=None,
        feature_detector_params=None,
        descriptor_extractor_params=None):
    """
    :param feature_detector_name:
    :param descriptor_extractor_name:
    :param feature_detector_params: dict(nfeatures=1000) for ORB
    :param descriptor_extractor_params:
    :return:
    """
    assert len(feature_detector_name) != 0
    if feature_detector_params == None:
        feature_detector_params = dict()
    if descriptor_extractor_params == None:
        descriptor_extractor_params = dict()

    feature_detector_name = feature_detector_name.upper()

    normType = cv2.NORM_L2

    if feature_detector_name == "ORB" or feature_detector_name == "BRIEF" or feature_detector_name == "BRISK":
        normType = cv2.NORM_HAMMING

    feature_detector = descriptor_extractor = None
    if feature_detector_name == "ORB":
        assert descriptor_extractor_name is None and len(
            descriptor_extractor_params) == 0
        if imutils.is_cv2():
            feature_detector = descriptor_extractor = cv2.ORB(
                **feature_detector_params)
        else:
            feature_detector = descriptor_extractor = cv2.ORB_create(
                **feature_detector_params)

    elif feature_detector_name == "BRIEF":
        assert descriptor_extractor_name is None and len(
            descriptor_extractor_params) == 0
        if imutils.is_cv2():
            feature_detector = cv2.StarDetector(**feature_detector_params)
            #descriptor_extractor = cv2.BriefDescriptorExtractor(**descriptor_extractor_params) # seems not working
            descriptor_extractor = cv2.DescriptorExtractor_create("BRIEF")
        else:
            feature_detector = cv2.xfeatures2d.StarDetector_create(
                **feature_detector_params)
            descriptor_extractor = cv2.xfeatures2d.BriefDescriptorExtractor_create(
                **descriptor_extractor_params)

    elif feature_detector_name == "BRISK":
        assert descriptor_extractor_name is None and len(
            descriptor_extractor_params) == 0
        if imutils.is_cv2():
            feature_detector = descriptor_extractor = cv2.BRISK(
                **feature_detector_params)
        else:
            feature_detector = descriptor_extractor = cv2.BRISK_create(
                **feature_detector_params)

    elif feature_detector_name == "SURF":
        assert descriptor_extractor_name is None and len(
            descriptor_extractor_params) == 0
        if imutils.is_cv2():
            feature_detector = descriptor_extractor = cv2.SURF(
                **feature_detector_params)
        else:
            feature_detector = descriptor_extractor = cv2.xfeatures2d.SURF_create(
                **feature_detector_params)

    elif feature_detector_params == "SIFT":
        assert descriptor_extractor_name is None and len(
            descriptor_extractor_params) == 0
        if imutils.is_cv2():
            feature_detector = descriptor_extractor = cv2.SIFT(
                **feature_detector_params)
        else:
            feature_detector = descriptor_extractor = cv2.xfeatures2d.SIFT_create(
                **feature_detector_params)

    else:
        print(
            "Seems we have not predefined the target feature_detector and descriptor_extractor"
        )

    return feature_detector, descriptor_extractor, normType
Esempio n. 24
0
def processInput():
    print ""
    if inputArgs.left == "" or inputArgs.right == "":
        print "Missing images!"
        quit()

    # here we go ...

    # load image pair
    img_l = cv2.imread(inputArgs.left)
    img_r = cv2.imread(inputArgs.right)

    if img_l == None or img_r == None:
        print "Missing images!"
        quit()

    # we like them gray
    gray_l = cv2.cvtColor(img_l, cv2.COLOR_BGR2GRAY)
    gray_r = cv2.cvtColor(img_r, cv2.COLOR_BGR2GRAY)

    # which decetor are we using
    if inputArgs.feature == 'sift':
        detector = cv2.SIFT()
        norm = cv2.NORM_L2
    elif inputArgs.feature == 'surf':
        detector = cv2.SURF(800)
        norm = cv2.NORM_L2
    elif inputArgs.feature == 'orb':
        detector = cv2.ORB(400)
        norm = cv2.NORM_HAMMING
    elif inputArgs.feature == 'brisk':
        detector = cv2.BRISK()
        norm = cv2.NORM_HAMMING
    else:
        print "Wrong feature detector!"
        quit()

    # how are we matching detected features
    if inputArgs.match == 'bf':
        matcher = cv2.BFMatcher(norm)

    elif inputArgs.match == 'flann':
        # borrowed from: https://github.com/Itseez
        FLANN_INDEX_KDTREE = 1  # bug: flann enums are missing
        FLANN_INDEX_LSH = 6

        flann_params = []
        if norm == cv2.NORM_L2:
            flann_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
        else:
            flann_params = dict(
                algorithm=FLANN_INDEX_LSH,
                table_number=6,  # 12
                key_size=12,  # 20
                multi_probe_level=1)  #2
        matcher = cv2.FlannBasedMatcher(
            flann_params, {})  # bug : need to pass empty dict (#1329)

    print "Using: " + inputArgs.feature + " with " + inputArgs.match
    print ""

    print "detecting ..."
    # find the keypoints and descriptors
    kp_l, des_l = detector.detectAndCompute(gray_l, None)
    kp_r, des_r = detector.detectAndCompute(gray_r, None)

    print "Left image features: " + str(len(kp_l))
    print "Right image features: " + str(len(kp_l))
    print ""
    # visualization
    if inputArgs.debug == 1:
        # left
        img_l_tmp = img_l.copy()
        #for kp in kp_l:
        #	x = int(kp.pt[0])
        #	y = int(kp.pt[1])
        #	cv2.circle(img_l_tmp, (x, y), 2, (0, 0, 255))
        img_l_tmp = cv2.drawKeypoints(img_l_tmp, kp_l, img_l_tmp, (0, 0, 255),
                                      cv2.DRAW_MATCHES_FLAGS_DEFAULT)
        head, tail = os.path.split(inputArgs.left)
        cv2.imwrite(head + "/" + "feat_" + tail, img_l_tmp)
        # right
        img_r_tmp = img_r.copy()
        #for kp in kp_r:
        #	x = int(kp.pt[0])
        #	y = int(kp.pt[1])
        #	cv2.circle(img_r_tmp, (x, y), 2, (0, 0, 255))
        img_r_tmp = cv2.drawKeypoints(img_r_tmp, kp_r, img_r_tmp, (0, 0, 255),
                                      cv2.DRAW_MATCHES_FLAGS_DEFAULT)
        head, tail = os.path.split(inputArgs.right)
        cv2.imwrite(head + "/" + "feat_" + tail, img_r_tmp)

    print "matching ..."

    # match
    raw_matches = matcher.knnMatch(des_l, trainDescriptors=des_r, k=2)
    print "Raw matches: " + str(len(raw_matches))

    # filter matches: per Lowe's ratio test
    filtered_matches = []
    mkp_l = []
    mkp_r = []

    for m in raw_matches:
        if len(m
               ) == 2 and m[0].distance < m[1].distance * inputArgs.proportion:
            filtered_matches.append(m)
            mkp_l.append(kp_l[m[0].queryIdx])
            mkp_r.append(kp_r[m[0].trainIdx])
    print "Filtered matches: " + str(len(filtered_matches))

    # visualization
    if inputArgs.debug == 1:
        # draw points
        img_l_tmp = cv2.drawKeypoints(
            img_l_tmp, mkp_l, img_l_tmp, (255, 0, 0),
            cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
        head, tail = os.path.split(inputArgs.left)
        #cv2.imwrite(head+"/"+"feat_"+tail, img_l_tmp)
        img_r_tmp = cv2.drawKeypoints(
            img_r_tmp, mkp_r, img_r_tmp, (255, 0, 0),
            cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
        head, tail = os.path.split(inputArgs.right)
        #cv2.imwrite(head+"/"+"feat_"+tail, img_r_tmp)

        # merge image side by side
        h_l, w_l = img_l_tmp.shape[:2]
        h_r, w_r = img_r_tmp.shape[:2]
        img_tmp = np.zeros((max(h_l, h_l), w_r + w_r, 3), np.uint8)
        img_tmp[:h_l, :w_l] = img_l_tmp
        img_tmp[:h_r, w_l:w_l + w_r] = img_r_tmp

        # draw lines
        for m in filtered_matches:
            cv2.line(img_tmp, (int(round(kp_l[m[0].queryIdx].pt[0])),
                               int(round(kp_l[m[0].queryIdx].pt[1]))),
                     (int(w_l + round(kp_r[m[0].trainIdx].pt[0])),
                      int(round(kp_r[m[0].trainIdx].pt[1]))), (255, 0, 0), 1)

        cv2.imwrite(inputArgs.name + "_features.jpg", img_tmp)

    # filter matches: per direction (since it's a stereo pair, most of the points should have the same angle between them)
    if inputArgs.stddev != 0.0:
        ang_stddev = 360.0
        stddev = 180.0
        while abs(stddev) > inputArgs.stddev:
            ang_stddev = stddev
            raw_matches = []  # silly !!!
            for m in filtered_matches:  # silly !!!
                raw_matches.append(m)  # silly !!!

            filtered_matches = []
            mkp_l = []
            mkp_r = []

            ang = []
            for m in raw_matches:
                xDiff = kp_r[m[0].trainIdx].pt[0] - kp_l[m[0].queryIdx].pt[
                    0]  #p2.x - p1.x
                yDiff = kp_r[m[0].trainIdx].pt[1] - kp_l[m[0].queryIdx].pt[
                    1]  #p2.y - p1.y
                #print math.degrees(math.atan2(yDiff,xDiff))
                ang.append(math.degrees(math.atan2(yDiff, xDiff)))

            mean = np.mean(ang)
            differences = [(value - mean)**2 for value in ang]
            stddev = np.mean(differences)**0.5
            #print mean
            #print stddev

            ang = []
            for m in raw_matches:
                xDiff = kp_r[m[0].trainIdx].pt[0] - kp_l[m[0].queryIdx].pt[
                    0]  #p2.x - p1.x
                yDiff = kp_r[m[0].trainIdx].pt[1] - kp_l[m[0].queryIdx].pt[
                    1]  #p2.y - p1.y
                ang_tmp = math.degrees(math.atan2(yDiff, xDiff))
                if (mean + stddev) > (mean - stddev):
                    if (mean + stddev) >= ang_tmp and (mean -
                                                       stddev) <= ang_tmp:
                        filtered_matches.append(m)
                        mkp_l.append(kp_l[m[0].queryIdx])
                        mkp_r.append(kp_r[m[0].trainIdx])
                        ang.append(math.degrees(math.atan2(yDiff, xDiff)))
                else:
                    if (mean + stddev) <= ang_tmp and (mean -
                                                       stddev) >= ang_tmp:
                        filtered_matches.append(m)
                        mkp_l.append(kp_l[m[0].queryIdx])
                        mkp_r.append(kp_r[m[0].trainIdx])
                        ang.append(math.degrees(math.atan2(yDiff, xDiff)))

            ##print np.median(ang)
            mean = np.mean(ang)
            differences = [(value - mean)**2 for value in ang]
            stddev = np.mean(differences)**0.5
            #print mean
            #print stddev
            if (abs(ang_stddev) - abs(stddev)) < 0.001:
                break

        print "Filtered matches cheat: " + str(len(filtered_matches))

        mkp_pairs = zip(mkp_l, mkp_r)
        file = open(inputArgs.name + "_kp.txt", "w")
        for p in mkp_pairs:
            # left x , left y ; right x , right y
            file.write(
                str(p[0].pt[0]) + "," + str(p[0].pt[1]) + ";" +
                str(p[1].pt[0]) + "," + str(p[1].pt[1]) + "\n")
        file.close()

        # visualization
        if inputArgs.debug == 1:
            # draw lines
            for m in filtered_matches:
                cv2.line(img_tmp, (int(round(kp_l[m[0].queryIdx].pt[0])),
                                   int(round(kp_l[m[0].queryIdx].pt[1]))),
                         (int(w_l + round(kp_r[m[0].trainIdx].pt[0])),
                          int(round(kp_r[m[0].trainIdx].pt[1]))), (0, 255, 0),
                         1)

            cv2.imwrite(inputArgs.name + "_features.jpg", img_tmp)
Esempio n. 25
0
def algo2(inp1, inp2):
    detector = cv2.BRISK(thresh=10, octaves=1)
    extractor = cv2.DescriptorExtractor_create(
        'BRISK')  # non-patented. Thank you!
    matcher = cv2.BFMatcher(cv2.NORM_L2SQR)

    # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
    # Object Features
    # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
    obj_original = cv2.imread(
        path.join('Dataset', inp1),  #input1 variable
        cv2.CV_LOAD_IMAGE_COLOR)
    if obj_original is None:
        print 'Couldn\'t find the object image with the provided path.'
        sys.exit()

    # basic feature detection works in grayscale
    obj = cv2.cvtColor(obj_original, cv2.COLOR_BGR2GRAY)
    mask_name = "mask_" + inp1
    algo1(inp1, mask_name)
    obj_mask = cv2.imread(path.join('masks', mask_name),
                          cv2.CV_LOAD_IMAGE_GRAYSCALE)  #inp1 mask creation
    if obj_mask is None:
        print 'Couldn\'t find the object mask image with the provided path.' \
              ' Continuing without it.'

    # keypoints are "interesting" points in an image:
    obj_keypoints = detector.detect(obj, obj_mask)
    # this lines up each keypoint with a mathematical description
    obj_keypoints, obj_descriptors = extractor.compute(obj, obj_keypoints)
    print 'Object Summary  *************************************************'
    print '    {} keypoints'.format(len(obj_keypoints))

    # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
    # Scene Features
    # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
    scene_original = cv2.imread(path.join('Dataset', inp2),
                                cv2.CV_LOAD_IMAGE_COLOR)  #inp2 reading
    if scene_original is None:
        print 'Couldn\'t find the scene image with the provided path.'
        sys.exit()

    scene = cv2.cvtColor(scene_original, cv2.COLOR_BGR2GRAY)
    mask_name2 = "mask_" + inp2
    algo1(inp2, mask_name2)
    scene_mask = cv2.imread(path.join('masks', mask_name2),
                            cv2.CV_LOAD_IMAGE_GRAYSCALE)
    if scene_mask is None:
        print 'Couldn\'t find the scene mask image with the provided path.' \
              ' Continuing without it.'

    scene_keypoints = detector.detect(scene, scene_mask)
    scene_keypoints, scene_descriptors = extractor.compute(
        scene, scene_keypoints)
    print 'Scene Summary  **************************************************'
    print '    {} keypoints'.format(len(scene_keypoints))

    # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
    # Match features between the object and scene
    # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
    min_matches = 3
    matches = matcher.match(obj_descriptors, scene_descriptors)
    if len(matches) < min_matches:
        print 'Not enough matches found between the image and scene keypoints.'
        sys.exit()

    #do some filtering of the matches to find the best ones
    distances = [match.distance for match in matches]
    min_dist = min(distances)
    avg_dist = sum(distances) / len(distances)
    # basically allow everything except awful outliers
    # a lower number like 2 will exclude a lot of matches if that's what you need
    min_multiplier_tolerance = 10
    min_dist = min_dist or avg_dist * 1.0 / min_multiplier_tolerance
    good_matches = [
        match for match in matches
        if match.distance <= min_multiplier_tolerance * min_dist
    ]
    print 'Match Summary  **************************************************'
    print '    {} / {}      good / total matches'.format(
        len(good_matches), len(matches))
    if len(good_matches) < min_matches:
        print 'not enough good matches to continue'
        sys.exit()

    # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
    # Calculate the shape of the object discovered in the scene.
    # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
    #extract the positions of the good matches within the object and scene
    obj_matched_points = np.array(
        [obj_keypoints[match.queryIdx].pt for match in good_matches])
    scene_matched_points = np.array(
        [scene_keypoints[match.trainIdx].pt for match in good_matches])
    # find the homography which describes how the object is oriented in the scene
    # also gets a mask which identifies each match as an inlier or outlier
    homography, homography_mask = cv2.findHomography(obj_matched_points,
                                                     scene_matched_points,
                                                     cv2.RANSAC, 2.0)
    print 'Homography Summary  **************************************************'
    print '    {} / {}      inliers / good matches'.format(
        np.sum(homography_mask), len(homography_mask))

    # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
    # Extract sizes and coordinates
    # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
    obj_h, obj_w = obj.shape[0:2]
    scene_h, scene_w = scene.shape[0:2]
    #corners: opencv uses (top left, top right, bottom right, bottom left)
    obj_top_left = (0, 0)
    obj_top_right = (obj_w, 0)
    obj_bottom_right = (obj_w, obj_h)
    obj_bottom_left = (0, obj_h)
    object_corners_float = np.array(
        [obj_top_left, obj_top_right, obj_bottom_right, obj_bottom_left],
        dtype=np.float32)
    #corners of the object in the scene (I don't know about the reshaping)
    obj_in_scene_corners_float =\
        cv2.perspectiveTransform(object_corners_float.reshape(1, -1, 2),
                                 homography).reshape(-1, 2)

    # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
    # Visualize the matching results
    # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
    # create a combined image of the original object and the scene
    blue = (255, 0, 0)
    green = (0, 255, 0)
    red = (0, 0, 255)
    combo_image = np.zeros((max(scene_h, obj_h), scene_w + obj_w), np.uint8)
    combo_image[0:obj_h, 0:obj_w] = obj  # copy the obj into the combo image
    combo_image[0:scene_h, obj_w:obj_w + scene_w] = scene  # same for the scene
    combo_image = cv2.cvtColor(combo_image,
                               cv2.COLOR_GRAY2BGR)  # color for output
    # draw a polygon around the object in the scene
    obj_in_scene_offset_corners_float = obj_in_scene_corners_float + (obj_w, 0)
    cv2.polylines(combo_image, [np.int32(obj_in_scene_offset_corners_float)],
                  True, blue, 2)
    # mark inlier and outlier matches
    for (x1, y1), (x2, y2), inlier in zip(np.int32(obj_matched_points),
                                          np.int32(scene_matched_points),
                                          homography_mask):
        if inlier:
            #draw a line with circle ends for each inlier
            cv2.line(combo_image, (x1, y1), (x2 + obj_w, y2), green)
            cv2.circle(combo_image, (x1, y1), 4, green, 2)
            cv2.circle(combo_image, (x2 + obj_w, y2), 4, green, 2)
        else:
            #draw a red x for outliers
            r = 2
            weight = 2
            cv2.line(combo_image, (x1 - r, y1 - r), (x1 + r, y1 + r), red,
                     weight)
            cv2.line(combo_image, (x1 - r, y1 + r), (x1 + r, y1 - r), red,
                     weight)
            cv2.line(combo_image, (x2 + obj_w - r, y2 - r),
                     (x2 + obj_w + r, y2 + r), red, weight)
            cv2.line(combo_image, (x2 + obj_w - r, y2 + r),
                     (x2 + obj_w + r, y2 - r), red, weight)

    # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
    # Do a sanity check on the discovered object
    # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
    use_extracted = True  # keep track of whether the extraction works or not
    #check for size of the discovered result versus the original
    scale_tolerance = 0.7
    obj_area = polygon_area(object_corners_float)
    obj_in_scene_area = polygon_area(obj_in_scene_corners_float)
    area_min_allowed = obj_area * (1 - scale_tolerance)**2
    area_max_allowed = obj_area * (1 + scale_tolerance)**2
    if not (area_min_allowed < obj_in_scene_area < area_max_allowed):
        print 'A homography was found but it seems too large or' \
              ' too small for a real match.'
        use_extracted = False
    #else:
    #we have to do something here

    # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
    # Extract the object from the original scene
    # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
    #max/min the corners to disentangle any flipped, etc. projections
    '''tops_bottoms = list(corner[1] for corner in obj_in_scene_corners_float)
Esempio n. 26
0
import numpy as np
import cv2
from matplotlib import pyplot as plt

img1 = cv2.imread('im1.jpg', 0)
img2 = cv2.imread('im2.jpg', 0)

h1, w1 = img1.shape[:2]
h2, w2 = img2.shape[:2]

MIN_MATCH_COUNT = 10

# Initiate BRISK detector
detector = cv2.BRISK()

# Find the keypoints and descriptors with BRISK
kp1, des1 = detector.detectAndCompute(img2, None)
kp2, des2 = detector.detectAndCompute(img1, None)

# initialize Brute-Force matcher
# http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_feature2d/py_matcher/py_matcher.html#brute-force-matching-with-sift-descriptors-and-ratio-test
bf = cv2.BFMatcher()

# use KNN match of Brute-Force matcher for descriptors
matches = bf.knnMatch(des1, des2, k=2)

good = []

#exclude outliers
for m, n in matches:
    if m.distance < 0.7 * n.distance:
Esempio n. 27
0
import sys  # argv
import matplotlib.pyplot as plt  # plot image
from itertools import permutations  # generate all combinations of clusters
import time  # To measure time performance

# Parameter for SURF
HESSIAN_THRESHOLD = 400

# List of Keypoint Detectors and Keypoint Descriptors
KP_DET_DESC = { \
    'SIFT'  : cv2.SIFT(), \
    'SURF'  : cv2.SURF(HESSIAN_THRESHOLD), \
    'STAR'  : cv2.FeatureDetector_create("STAR"), \
    'ORB'   : cv2.ORB(),\
    'BRIEF' : cv2.DescriptorExtractor_create("BRIEF"), \
    'BRISK' : cv2.BRISK(), \
    'FREAK' : cv2.DescriptorExtractor_create("FREAK")
    }


def unique_rows(arr):
    ''' Return unique rows of array a '''
    return  np.unique(arr.view(np.dtype((np.void, \
                arr.dtype.itemsize*arr.shape[1])))) \
                .view(arr.dtype).reshape(-1, arr.shape[1])


def show_color_image(img):
    ''' OpenCV load image in BGR, matplotlib uses RGB '''
    #b,g,r = cv2.split(img)
    #img2 = cv2.merge([r,g,b])