コード例 #1
0
    def detectKp(self, img, mask):

        if self.equalnum == True:
            # Initiate FAST object with default values
            fast = cv2.FastFeatureDetector(threshold=15)
            best_kp = fast.detect(img, mask)
            print len(best_kp)
            if len(best_kp) >= self.numpatch:
                best_kp = random.sample(best_kp, self.numpatch)
        else:
            fast = cv2.FastFeatureDetector(threshold=self.threshold)
            best_kp = fast.detect(img, mask)

        #remove keypoints from the border of the image to be able to use the binary descriptors
        #border = 40 #size of the border
        #height, width = img.shape[:2]

        #best_kp_no_border = []
        #for i in range(0,n):
        #kp = best_kp[i]
        #if kp.pt[0] < border or kp.pt[0] > (width - border) or kp.pt[1] < border or kp.pt[1] > (height - border):
        #True
        #else:
        #best_kp_no_border.append(kp)

        # draw the keypoints
        #img3 = cv2.drawKeypoints(img,best_kp_no_border, color=(255,0,0))
        #cv2.imshow('image2',img3)
        #cv2.waitKey(0)
        #cv2.destroyAllWindows()

        return best_kp
コード例 #2
0
 def __init__(self):
     # FLANN parameters
     FLANN_INDEX_KDTREE = 0
     index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
     search_params = dict(checks=50)
     self.flann = cv2.FlannBasedMatcher(index_params, search_params)
     self.detector = cv2.FastFeatureDetector()  #.SIFT()
コード例 #3
0
ファイル: Test.py プロジェクト: BPPJH/Self-Driving-Buggy
def fastFeatureTest():
    img = cv2.imread('Robot.jpg', 0)

    # Initiate FAST object with default values
    fast = cv2.FastFeatureDetector()

    # find and draw the keypoints
    kp = fast.detect(img, None)
    img2 = cv2.drawKeypoints(img, kp, color=(255, 0, 0))

    # Print all default params
    print "Threshold: ", fast.getInt('threshold')
    print "nonmaxSuppression: ", fast.getBool('nonmaxSuppression')
    print "neighborhood: ", fast.getInt('type')
    print "Total Keypoints with nonmaxSuppression: ", len(kp)

    cv2.imwrite('fast_true.png', img2)

    # Disable nonmaxSuppression
    fast.setBool('nonmaxSuppression', 0)
    kp = fast.detect(img, None)

    print "Total Keypoints without nonmaxSuppression: ", len(kp)

    img3 = cv2.drawKeypoints(img, kp, color=(255, 0, 0))

    cv2.imwrite('fast_false.png', img3)
コード例 #4
0
ファイル: main.py プロジェクト: novokrest/CVision
def fast_feature_detect(gray_img):
    fast = cv2.FastFeatureDetector()
    f_features = list(
        sorted(fast.detect(gray_img, None),
               key=lambda l: -l.response))[:MAX_TRACK_POINTS]

    return np.array([[f.pt] for f in f_features], np.float32)
コード例 #5
0
def fastCornerDetector(self):
    currFilter = 'FASTCornerDetection'
    print("Applying Filter : ", currFilter)
    self.processedImage = self.originalImage.copy()
    self.lastFilter = 'FASTCornerDetection'
    self.initializeSlider(s=0, e=50)
    self.processedImage = self.originalImage.copy()
    gray = self.processedImage
    img = self.processedImage

    if len(self.processedImage.shape) > 2:
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    # Initiate FAST object with default values
    fast = cv2.FastFeatureDetector()

    # find and draw the keypoints
    kp = fast.detect(gray, None)
    img2 = cv2.drawKeypoints(img, kp, color=(255, 0, 0))

    # Print all default params
    print("Threshold: ", fast.getInt('threshold'))
    print("nonmaxSuppression: ", fast.getBool('nonmaxSuppression'))
    print("neighborhood: ", fast.getInt('type'))
    print("Total Keypoints with nonmaxSuppression: ", len(kp))
    self.processedImage = img2.copy()
    self.displayImage()
コード例 #6
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--image_path', type=str, default="../Images/test.png")
    parser.add_argument('--num_ret_points', type=int, default=10)
    parser.add_argument('--tolerance', type=float, default=0.1)
    args = parser.parse_args()

    img = cv2.imread(args.image_path)
    cv2.imshow('Input Image', img)
    cv2.waitKey(0)

    fast = cv2.FastFeatureDetector()
    keypoints = fast.detect(img, None)
    img2 = cv2.drawKeypoints(img, keypoints, color=(255, 0, 0))
    cv2.imshow('Detected FAST keypoints', img2)
    cv2.waitKey(0)

    # keypoints should be sorted by strength in descending order before feeding to SSC to work correctly
    shuffle(keypoints)  # simulating sorting by score with random shuffle

    selected_keypoints = ssc(keypoints, args.num_ret_points, args.tolerance,
                             img.shape[1], img.shape[0])

    img3 = cv2.drawKeypoints(img, selected_keypoints, color=(255, 0, 0))
    cv2.imshow('Selected keypoints', img3)
    cv2.waitKey(0)
コード例 #7
0
def fast(filename):
    img = cv2.imread(filename, 0)

    # Initiate FAST object with default values
    fast = cv2.FastFeatureDetector()

    # find and draw the keypoints
    kp = fast.detect(img, None)
    img2 = cv2.drawKeypoints(img, kp, color=(255, 0, 0))

    # Print all default params
    print("Threshold: ", fast.getInt('threshold'))
    print("nonmaxSuppression: ", fast.getBool('nonmaxSuppression'))
    print("neighborhood: ", fast.getInt('type'))
    print("Total Keypoints with nonmaxSuppression: ", len(kp))

    cv2.imshow('fast_true', img2)
    if cv2.waitKey(0) & 0xff == 27:
        cv2.destroyAllWindows()

    # Disable nonmaxSuppression
    fast.setBool('nonmaxSuppression', 0)
    kp = fast.detect(img, None)

    print("Total Keypoints without nonmaxSuppression: ", len(kp))

    img3 = cv2.drawKeypoints(img, kp, color=(255, 0, 0))

    cv2.imshow('fast_false', img3)
    if cv2.waitKey(0) & 0xff == 27:
        cv2.destroyAllWindows()
コード例 #8
0
def fast0(img):
    fast = cv2.FastFeatureDetector()
    kp = fast.detect(img, None)
    img2 = cv2.drawKeypoints(img, kp, color=(255, 0, 0))
    cv2.imshow("Sift", img2)
    cv2.waitKey(0)
    cv2.destroyAllWindows()
コード例 #9
0
def run_FAST_v4(dp_color, perspective, segmentation_set):
    """
    with max suppression is better than without max suppression

    v4 : if keypoint is inside of given segment, accept it.

    iterates for kp ( for contour)
    :param img:
    :return:
    """

    # Initiate FAST object with default values
    fast = cv2.FastFeatureDetector(threshold=60, nonmaxSuppression=True)

    # find and draw the keypoints
    kp_set = fast.detect(dp_color, None)

    score = [0 for x in range(len(segmentation_set))]
    counts = [0 for x in range(len(segmentation_set))]

    for kp in kp_set:
        x = int(kp.pt[0])
        y = int(kp.pt[1])

        checked = False
        for i in range(len(segmentation_set)):
            # find the nearest contour from selected keypoint.
            s = segmentation_set[i]  # x1 x2 y1 y2

            if (s[0] < x < s[1]) and (s[2] < y < s[3]):
                score[i] += np.sqrt(perspective[x])
                counts[i] += 1

    return np.array(score)
コード例 #10
0
def featureDetectDesCorner(roiImageFiltered):
    fast = cv2.FastFeatureDetector()
    kp = fast.detect(roiImageFiltered, None)
    roiKeyPointImage = cv2.drawKeypoints(roiImageFiltered,
                                         kp,
                                         color=(255, 0, 0))
    return kp, roiKeyPointImage
コード例 #11
0
ファイル: main.py プロジェクト: falmusha/hackberry
def old_test():

    img1a_path = '/Users/ifahad7/Dropbox/School/FYDP/hackberry/test_images/images/img_1_a.jpg'
    img1b_path = '/Users/ifahad7/Dropbox/School/FYDP/hackberry/test_images/images/img_1_b.jpg'
    img1c_path = '/Users/ifahad7/Dropbox/School/FYDP/hackberry/test_images/images/img_1_c.jpg'
    img1d_path = '/Users/ifahad7/Dropbox/School/FYDP/hackberry/test_images/images/img_1_d.jpg'

    img2a_path = '/Users/ifahad7/Dropbox/School/FYDP/hackberry/test_images/images/img_2_a.jpg'
    img2b_path = '/Users/ifahad7/Dropbox/School/FYDP/hackberry/test_images/images/img_2_b.jpg'
    img2c_path = '/Users/ifahad7/Dropbox/School/FYDP/hackberry/test_images/images/img_2_c.jpg'

    img1a = cv2.imread(img1a_path)  # queryImage
    img1b = cv2.imread(img1b_path)  # trainImage
    img1c = cv2.imread(img1c_path)  # trainImage
    img1d = cv2.imread(img1d_path)  # trainImage

    img2a = cv2.imread(img2a_path)  # queryImage
    img2b = cv2.imread(img2b_path)  # trainImage
    img2c = cv2.imread(img2c_path)  # trainImage

    hcv = hackberry_cv.ComputerVision()

    kp_alg = cv2.FastFeatureDetector()
    #kp_alg = cv2.SURF()
    des_alg = cv2.SURF()

    out = hcv.stitch(img2a, img2b, kp_alg, des_alg)
    out = hcv.stitch(out, img2c, kp_alg, des_alg)
    cv2.imwrite('out_1.jpg', out)

    out = hcv.stitch(img1a, img1b, kp_alg, des_alg)
    out = hcv.stitch(out, img1c, kp_alg, des_alg)
    out = hcv.stitch(out, img1d, kp_alg, des_alg)
    cv2.imwrite('out_2.jpg', out)
コード例 #12
0
def build_cluster(image, featureValue, K):
    img = cv2.imread(image)
    fast = cv2.FastFeatureDetector(featureValue)
    orb = cv2.ORB(180)
    kp = fast.detect(img,None)
    kp, des = orb.compute(img, kp)

    # build keypoints location array for cluster
    locations = np.empty((len(kp),2))
    for i in range(len(kp)):
        loc = array((int(kp[i].pt[0]), int(kp[i].pt[1])))
        locations[i]=loc

    kcenters, distortion  = kmeans(locations, K)
    kcenters = kcenters[kcenters[:,0].argsort()]

    # cluster index: 0: left eye, 1 mouth and nose, 2: right eye
    kpCluster = {i: [] for i in range(K)}
    clusterLoc = {i: [] for i in range(K)}
    for i in range(len(kp)):
        set = 0
        minDis = sys.maxint
        for j in range(K):
            dis = euclidean(locations[i], kcenters[j])
            if dis<minDis:
                set = j
                minDis = dis
        kpCluster[set].append(kp[i])
        clusterLoc[set].append(locations[i])

    imageFeature = [len(kp)]
    for i in range(K):
        clusterFeature = cluster_feature(clusterLoc[i], kcenters[i])
        imageFeature = imageFeature + clusterFeature
    return imageFeature
コード例 #13
0
def check

# 物体纹理辨别
img2 = cv2.imread('phone_pic/matchIphone.png')
img = cv2.imread("phone_pic/iphone5bai.png")

fast = cv2.FastFeatureDetector()
kp = fast.detect(img, None)
# cv2.drawKeypoints(img, kp, color=(255, 0, 0))



# x = cv2.Sobel(img,cv2.CV_16SC1,1,0)
# y = cv2.Sobel(img,cv2.CV_16S,0,1)

# absX = cv2.convertScaleAbs(x)
# absY = cv2.convertScaleAbs(y)
# cv2.imshow("absX", absX)
# cv2.imshow("absY", absY)
# dst = cv2.addWeighted(absX,0.2,absY,0.2,0)
# print img.shape
#regRect = []
#im = img[0:100, 100:200]

cv2.imshow("histImgB", img2)
cv2.waitKey(0)
コード例 #14
0
    def detect(self, img, detector, method=PREDEFINED):
        # set det_ver == initialized det_ver
        det_ver = self.det_ver

        # update det_ver with trackbar
        FeatureMapper.set_det_ver(self, detector)

        # convert image to greyscale
        grey = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        # check if input method has different value from PREDEFINED (default)
        if self.det_ver != self.PREDEFINED:
            # Harris corner detector
            if self.det_ver == 0:
                mod_grey = np.float32(
                    grey)  # not sure if necessary to convert type to float32
                key_points = cv2.cornerHarris(
                    mod_grey, 2, 3, 0.04)  # detect corners, given params
            # FAST corner detector
            elif self.det_ver == 1:
                fast = cv2.FastFeatureDetector(
                )  # create FAST object (default params)
                key_points = fast.detect(grey, None)  # detect key points
            # ORB corner detector
            elif self.det_ver == 2:
                orb = cv2.ORB_create()  # create ORB object (default params)
                key_points = orb.detect(grey, None)  # detect key points
        else:
            # SIFT corner detector
            sift = cv2.SIFT.create()  # create SIFT object
            key_points = sift.detect(grey, None)  # detect key points
        return key_points
コード例 #15
0
def fast(img_url):
    resp = urllib.urlopen(img_url)
    image = np.asarray(bytearray(resp.read()), dtype="uint8")
    img = cv.imdecode(image, cv.IMREAD_COLOR)
# Initiate FAST object with default values
    fast = cv.FastFeatureDetector()
    # find and draw the keypoints
    kp = fast.detect(img,None)
    img2 = cv.drawKeypoints(img, kp, color=(255,0,0))
    # Print all default params
    print("Threshold: ", fast.getInt('threshold'))
    print("nonmaxSuppression: ", fast.getBool('nonmaxSuppression'))
    print("neighborhood: ", fast.getInt('type'))
    print("Total Keypoints with nonmaxSuppression: ", len(kp))
    # cv.imwrite('./images/fast_true.png',img2)
    # Disable nonmaxSuppression
    fast.setBool('nonmaxSuppression',0)
    kp = fast.detect(img,None)
    print("Total Keypoints without nonmaxSuppression: ", len(kp))
    img3 = cv.drawKeypoints(img, kp, color=(255,0,0))
    # cv.imwrite('./images/fast_false.png',img3)
    return img3


# fast(url)
# cv.waitKey(0)
# cv.destroyAllWindows()
コード例 #16
0
def fastFeature_kp(image, featureValue):
    img = cv2.imread(image)
    fast = cv2.FastFeatureDetector(featureValue)
    orb = cv2.ORB(180)
    kp = fast.detect(img, None)
    kp, des = orb.compute(img, kp)
    return kp, des
コード例 #17
0
ファイル: core.py プロジェクト: gitter-badger/soto_project
    def __init__(self, config):
        # Get all the settings from the config file
        self.video_folder = config.get('default', 'video_folder')
        self.video_files = json.loads(config.get('default', 'video_files'))
        self.visualize = json.loads(config.get('default', 'visualize'))
        self.number_of_objects = json.loads(
            config.get('default', 'number_of_objects'))
        self.inertia_threshold = json.loads(
            config.get('default', 'inertia_threshold'))

        self.arena_settings = json.loads(
            config.get('default', 'arena_settings'))
        self.led_settings = json.loads(config.get('default', 'led_settings'))
        self.lk_settings = json.loads(config.get('default', 'lk_settings'))
        self.detector = json.loads(config.get('default', 'detector'))
        self.ShiTom_settings = json.loads(
            config.get('default', 'ShiTom_settings'))
        self.FAST_settings = json.loads(config.get('default', 'FAST_settings'))

        self.arena_settings["points"] = np.array(self.arena_settings["points"])
        self.led_settings["center_1"] = tuple(self.led_settings["center_1"])
        self.led_settings["center_2"] = tuple(self.led_settings["center_2"])

        self.lk_settings["winSize"] = tuple(self.lk_settings["winSize"])
        self.lk_settings["criteria"] = (long(3),
                                        self.lk_settings["criteria_count"],
                                        self.lk_settings["criteria_eps"])
        del self.lk_settings["criteria_eps"], self.lk_settings[
            "criteria_count"]

        try:
            self.mmse_lookback = json.loads(
                config.get('default', 'MMSE_lookback'))
        except:
            self.mmse_lookback = 6

        self.permutations = np.array(
            list(permutations(range(self.number_of_objects))))
        self.color_pallet = plt.get_cmap('jet')(np.linspace(
            0, 1.0, self.number_of_objects)) * 255

        self.track_len = 10  # must be higher than 5
        self.tracks = []

        self.visual_image = None

        self.frame_idx = 1
        self.start_time = 0.

        self.previous_centers = None
        self.ordered_centers = None
        self.new_labels = None

        self.led_status = [0, 0]
        self.led_values = [0, 0]

        self.arena_mask = None

        self.fast = cv2.FastFeatureDetector(**self.FAST_settings)
コード例 #18
0
def feature_detection(img1, points1): 
	fast = cv2.FastFeatureDetector(20)		#sets the threshold
	fast.setBool('nonmaxSuppression',1)		#makes non-maxsupresison true 
	kp = fast.detect(img1,None) 
	cd_x=np.array([k.pt[0] for k in kp])
	cd_y=np.array([k.pt[1] for k in kp])
	for i in range(len(cd_x)):
		points1.append([[cd_x[i],cd_y[i]]])
コード例 #19
0
def featureDetectCorner(roiImageFiltered):
    fast = cv2.FastFeatureDetector()
    kp = fast.detect(roiImageFiltered, None)
    if (np.size(kp) > 0):
        roiKeyPointImage = roiImageFiltered  #cv2.drawKeypoints(roiImageFiltered, kp, color=(255, 0, 0))
        return kp, roiKeyPointImage
    else:
        return kp, roiImageFiltered
コード例 #20
0
 def __init__(self, cornerSize, winSize):
     self.FAST = cv2.FastFeatureDetector()
     self.cornerSize = cornerSize
     self.winSize = winSize
     self.criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER,
                      30, 0.001)
     corner_x = []
     corner_p = []
コード例 #21
0
def findHomography(im0, im1):
    fd = cv2.FastFeatureDetector(10)
    fs = fd.detect(im0)
    fs = np.float32([x.pt for x in fs])
    nfs, s, te = cv2.calcOpticalFlowPyrLK(im0, im1, fs, None, (121, 121))
    #fs = fs[reshape(s > 0, s.size)]
    #nfs = nfs[reshape(s > 0, s.size)]
    return cv2.findHomography(fs, nfs, cv2.RANSAC)
コード例 #22
0
ファイル: index.py プロジェクト: kopsinnovaties/ultron
def processImage(imagePath):
	orignalImage = cv2.imread(imagePath, cv2.CV_LOAD_IMAGE_COLOR)

	grayImage = cv2.cvtColor(orignalImage, cv2.COLOR_BGR2GRAY)

	blurredImage = cv2.GaussianBlur(grayImage, (3, 3), 0)

	edgedCorrectedImage = auto_canny(blurredImage)

	# ret,thresh = cv2.threshold(blurredImage,8,255,cv2.THRESH_BINARY)
	# thresh=cv2.inRange(grayImage,190,255);
	# contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)

	# croppedImage = removeWhiteSpace(edgedCorrectedImage, thresh, contours, hierarchy)

	# removeText(edgedCorrectedImage)

	ret,thresh = cv2.threshold(grayImage,127,255,cv2.THRESH_BINARY_INV)
	# contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)

	# cv2.drawContours(image, contours, -1, (0,255,0), 1)
	# print croppedImage

	newFile = open('data','w')

	fast = cv2.FastFeatureDetector(120)
	kp = fast.detect(grayImage,None)

	counter = 0

	i = 0
	kpArray = []

	for keyPoints in kp:
		kpArray.append(keyPoints.pt)

	kpArray = sorted(kpArray,key=itemgetter(0))

	prunedKpArray = pruningArray(kpArray)

	for ele in prunedKpArray:
		newFile.write('a'+str(i)+'=model.floorplan.newCorner('+str(ele[0])+','+str(ele[1])+');\n')
		counter+=1
		i+=1
		if counter%2 is 0 :
			i = 0
			newFile.write('model.floorplan.newWall(a0,a1);\n')
	# for keyPoints in kpArray:
	# 	newFile.write('a'+str(i)+'=model.floorplan.newCorner'+str(keyPoints.pt)+';\n')
	# 	counter+=1
	# 	i+=1
	# 	if counter%2 is 0 :
	# 		i = 0
	# 		newFile.write('model.floorplan.newWall(a0,a1);\n')



	newFile.close()
コード例 #23
0
    def fastFeatureDetector(self, img):
        # Initiate FAST object with default values
        fast = cv2.FastFeatureDetector()

        # find and draw the keypoints
        kp = fast.detect(img, None)
        img2 = cv2.drawKeypoints(img, kp, color=(255, 0, 0))

        return img2
コード例 #24
0
 def test_fast(self):
     fd = cv2.FastFeatureDetector(30, True)
     img = self.get_sample("samples/cpp/right02.jpg", 0)
     img = cv2.medianBlur(img, 3)
     imgc = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
     keypoints = fd.detect(img)
     self.assert_(600 <= len(keypoints) <= 700)
     for kpt in keypoints:
         self.assertNotEqual(kpt.response, 0)
コード例 #25
0
def test_fast():
    img_path = glob.glob("dataset/bear/*.JPEG")[0]
    img = cv2.imread(img_path)
    gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    fast = cv2.FastFeatureDetector(50)
    keypoints = fast.detect(img, None)
    img = cv2.drawKeypoints(img, keypoints, color=(255, 0, 0))
    cv2.imshow("image", img)
    cv2.waitKey(0)
    print("keypoints len = {0}".format(len(keypoints)))
コード例 #26
0
ファイル: TrackImage.py プロジェクト: blefaudeux/facealign
    def __motion_estimation_feature(self, ref_frame, new_frame, min_matches=7):
        # Create an ORB detector
        detector = cv.FastFeatureDetector(16, True)
        # extractor = cv.DescriptorExtractor_create('SIFT')
        extractor = cv.DescriptorExtractor_create('ORB')
        # extractor = cv.DescriptorExtractor_create('FREAK')

        # find the keypoints and descriptors
        kp1 = detector.detect(new_frame)
        k1, des1 = extractor.compute(new_frame, kp1)

        kp2 = detector.detect(ref_frame)
        k2, des2 = extractor.compute(ref_frame, kp2)

        # Match using bruteforce
        matcher = cv.DescriptorMatcher_create('BruteForce-Hamming')
        matches = matcher.match(des1, des2)
        matcher.knnMatch(
            des1,
            des2,
        )

        # keep only the reasonable matches
        dist = [m.distance for m in matches]
        thres_dist = (sum(dist) / len(dist)) * 0.3
        good_matches = [m for m in matches if m.distance < thres_dist]

        # compute the transformation from the brute force matches
        if len(good_matches) > min_matches:
            print "Enough matchs for compensation - %d/%d" % (
                len(good_matches), min_matches)
            self.corners = np.float32(
                [k1[m.queryIdx].pt for m in good_matches]).reshape(-1, 1, 2)
            self.corners_next = np.float32(
                [k2[m.trainIdx].pt for m in good_matches]).reshape(-1, 1, 2)

            transform, mask = cv.findHomography(self.corners,
                                                self.corners_next, cv.RANSAC,
                                                3.0)

            # Check that the transform indeed explains the corners shifts ?
            mask_match = [m for m in mask if m == 1]

            if len(mask_match) < min_matches:
                print "Tracking lost - %d final matches" % len(mask_match)
                return None, False

            print("Transformation deemed valid")
            return transform, True

        else:
            print "Not enough matches are found - %d/%d" % (len(good_matches),
                                                            min_matches)
            return None, False
コード例 #27
0
ファイル: Kmeans.py プロジェクト: deepxkn/facial-expression
def create_cluster_image(image, featureValue, K):
    img = cv2.imread(image)
    fast = cv2.FastFeatureDetector(featureValue)
    orb = cv2.ORB(180)
    kp = fast.detect(img, None)
    kp, des = orb.compute(img, kp)
    print len(kp)

    # build keypoints location array for cluster
    locations = np.empty((len(kp), 2))
    for i in range(len(kp)):
        loc = array((int(kp[i].pt[0]), int(kp[i].pt[1])))
        locations[i] = loc

    size = len(des) / K
    #centers = array((locations[size], locations[2*size], locations[3*size], locations[4*size], locations[5*size], locations[6*size-1]))
    #centers = array((locations[0], locations[2*size], locations[3*size], locations[4*size-1]))
    Ncenters = array(
        (locations[0], locations[2 * size], locations[3 * size - 1]))
    kcenters, distortion = kmeans(locations, K)
    kcenters = kcenters[kcenters[:, 0].argsort()]

    kpCluster = {i: [] for i in range(K)}
    for i in range(len(kp)):
        set = 0
        minDis = sys.maxint
        for j in range(K):
            dis = euclidean(locations[i], kcenters[j])
            if dis < minDis:
                set = j
                minDis = dis
        kpCluster[set].append(kp[i])

    pic = img
    for i in range(K):
        pic = cv2.drawKeypoints(pic, kpCluster[i], color=colors[i], flags=0)

    leftIndex = findNeaghborPoint(locations, kcenters[0])
    leftEye = [kp[leftIndex]]
    pic = cv2.drawKeypoints(pic, leftEye, color=colors[5], flags=0)
    rightIndex = findNeaghborPoint(locations, kcenters[2])
    rightEye = [kp[rightIndex]]
    pic = cv2.drawKeypoints(pic, rightEye, color=colors[5], flags=0)
    MIndex = findNeaghborPoint(locations, kcenters[1])
    mouse = [kp[MIndex]]
    pic = cv2.drawKeypoints(pic, mouse, color=colors[5], flags=0)
    imageName = image
    cv2.imshow(imageName, pic)
    cv2.waitKey(0)
    cv2.destroyAllWindows()
    newoutput = line.replace("resize", "output")
    cv2.imwrite(newoutput, pic)
コード例 #28
0
    def __init__(self):
        self.image_pub = rospy.Publisher("image_topic_2", Image, queue_size=1)

        self.bridge = CvBridge()
        self.image_sub = rospy.Subscriber('/io/internal_camera/head_camera/image_raw',Image,self.homography_callback)
        self.marker_sub = rospy.Subscriber('/io/internal_camera/head_camera/image_raw',Image,self.marker_callback)
        self.fast = cv2.FastFeatureDetector()
        self.fast.setBool('nonmaxSuppression',1)
        self.fast.setInt('threshold', 10)

        self.H = 0

        self.pub = rospy.Publisher('bounding_points', Float64MultiArray, queue_size=10)
コード例 #29
0
def featureDetectCorner(roiImageFiltered):
    roiImageFiltered_unit8 = unit8Image(roiImageFiltered)
    fast = cv2.FastFeatureDetector()
    try:
        kp = fast.detect(roiImageFiltered_unit8, None)
        roiKeyPointImage_unit8 = cv2.drawKeypoints(roiImageFiltered_unit8,
                                                   kp,
                                                   color=(255, 0, 0))
        return kp, roiKeyPointImage_unit8
    except TypeError:
        print 'The input image in the function featureDetectCorner is not of data type unit8 or its not grayscale. Plese convert the image to unit8 using numpy.unit8.'
    except IOError:
        print 'The path to the file in featureDetectCorner is not correctly specified. Please check that the file is in the correct location.'
    else:
        print 'The function featureDetectCorner is not working. You have most likely given invalid arguments.'
コード例 #30
0
def find_keypoints(img):
  # Initiate FAST object with default values
  fast = cv2.FastFeatureDetector()

  # find and draw the keypoints
  kp = fast.detect(img,None)
  img2 = cv2.drawKeypoints(img, kp, color=(255,0,0))

  # print ( all default params
  print ( "Threshold: ", fast.getInt('threshold'))
  print ( "nonmaxSuppression: ", fast.getBool('nonmaxSuppression'))
  #print ( "neighborhood: ", fast.getInt('type')
  print ("Total Keypoints with nonmaxSuppression: ", len(kp))

  cv2.imwrite('fast_true.png',img2)