示例#1
0
    def callback(self, ros_data):
        '''Callback function of subscribed topic.
		Here images get converted and features detected'''
        if VERBOSE:
            print 'received image of type: "%s"' % ros_data.format

        #### direct conversion to CV2 ####
        np_arr = np.fromstring(ros_data, np.uint8)
        image_np = cv2.imdecode(np_arr, cv2.CV_LOAD_IMAGE_COLOR)
        # image_np = cv2.imdecode(np_arr, cv2.IMREAD_COLOR) # OpenCV >= 3.0

        detector = cv2.FeatureDetector_create("SIFT")
        descriptor = cv2.DescriptorExtractor_create("SIFT")

        kp1 = detector.detect(img1)
        kp1, des1 = descriptor.compute(img1, kp1)

        kp2 = detector.detect(img2)
        kp2, des2 = descriptor.compute(img2, kp2)

        bf = cv2.BFMatcher()
        matches = bf.knnMatch(des1, des2, k=2)
        matches = list(
            map(
                lambda a: [a[0]],
                filter(lambda a: a[0].distance < 0.75 * a[1].distance,
                       matches)))

        matches = sorted(matches, key=lambda val: val[0].distance)

        dist = np.mean(list(map(lambda x: x[0].distance, matches[:25])))

        msg = str(dist)

        # if dist < 170:

        # Publish new image
        self.image_pub.publish(msg)

        self.subscriber.unregister()
示例#2
0
def matches(path1, path2):

    img1 = cv2.imread(path1, cv2.CV_LOAD_IMAGE_GRAYSCALE)
    img2 = cv2.imread(path2, cv2.CV_LOAD_IMAGE_GRAYSCALE)

    detector = cv2.FeatureDetector_create("SURF")
    descriptor = cv2.DescriptorExtractor_create("BRIEF")
    matcher = cv2.DescriptorMatcher_create("BruteForce-Hamming")

    # detect keypoints
    kp1 = detector.detect(img1)
    kp2 = detector.detect(img2)

    print '#keypoints in image1: %d, image2: %d' % (len(kp1), len(kp2))

    # descriptors
    k1, d1 = descriptor.compute(img1, kp1)
    k2, d2 = descriptor.compute(img2, kp2)

    print '#keypoints in image1: %d, image2: %d' % (len(d1), len(d2))

    # match the keypoints
    matches = matcher.match(d1, d2)

    # visualize the matches
    print '#matches:', len(matches)
    dist = [m.distance for m in matches]

    print 'distance: min: %.3f' % min(dist)
    print 'distance: mean: %.3f' % (sum(dist) / len(dist))
    print 'distance: max: %.3f' % max(dist)

    # threshold: half the mean
    thres_dist = (sum(dist) / len(dist)) * 0.5

    # keep only the reasonable matches
    sel_matches = [m for m in matches if m.distance < thres_dist]

    print '#selected matches:', len(sel_matches)
    '''	
示例#3
0
    def callback(self, ros_data):
        '''Callback function of subscribed topic. 
        Here images get converted and features detected'''
        if VERBOSE:
            print 'received image of type: "%s"' % ros_data.format

        #### direct conversion to CV2 ####
        np_arr = np.fromstring(ros_data.data, np.uint8)
        image_np = cv2.imdecode(np_arr, cv2.CV_LOAD_IMAGE_COLOR)
        #image_np = cv2.imdecode(np_arr, cv2.IMREAD_COLOR) # OpenCV >= 3.0:

        #### Feature detectors using CV2 ####
        # "","Grid","Pyramid" +
        # "FAST","GFTT","HARRIS","MSER","ORB","SIFT","STAR","SURF"
        method = "GridFAST"
        feat_det = cv2.FeatureDetector_create(method)
        time1 = time.time()

        # convert np image to grayscale
        featPoints = feat_det.detect(cv2.cvtColor(image_np,
                                                  cv2.COLOR_BGR2GRAY))
        time2 = time.time()
        if VERBOSE:
            print '%s detector found: %s points in: %s sec.' % (
                method, len(featPoints), time2 - time1)

        for featpoint in featPoints:
            x, y = featpoint.pt
            cv2.circle(image_np, (int(x), int(y)), 3, (0, 0, 255), -1)

        cv2.imshow('cv_img', image_np)
        cv2.waitKey(2)

        #### Create CompressedIamge ####
        msg = CompressedImage()
        msg.header.stamp = rospy.Time.now()
        msg.format = "jpeg"
        msg.data = np.array(cv2.imencode('.jpg', image_np)[1]).tostring()
        # Publish new image
        self.image_pub.publish(msg)
示例#4
0
def extract_features_sift(image, config):
    detector = cv2.FeatureDetector_create('SIFT')
    descriptor = cv2.DescriptorExtractor_create('SIFT')
    detector.setDouble('edgeThreshold', config.get('sift_edge_threshold', 10))
    sift_peak_threshold = float(config.get('sift_peak_threshold', 0.01))
    while True:
        print 'Computing sift with threshold {0}'.format(sift_peak_threshold)
        t = time.time()
        detector.setDouble("contrastThreshold", sift_peak_threshold)
        points = detector.detect(image)
        print 'Found {0} points in {1}s'.format(len(points), time.time() - t)
        if len(points) < config.get('feature_min_frames',
                                    0) and sift_peak_threshold > 0.0001:
            sift_peak_threshold = (sift_peak_threshold * 2) / 3
            print 'reducing threshold'
        else:
            print 'done'
            break
    points, desc = descriptor.compute(image, points)
    if config.get('feature_root', False): desc = root_feature(desc)
    points = np.array([(i.pt[0], i.pt[1], i.size, i.angle) for i in points])
    return points, desc
示例#5
0
def get_sift_descriptors_and_matches(img1, img2):
	'''
	img1 is test image, img2 is template
	'''
	kp1, des1 = SIFT.detectAndCompute(img1, None)
	kp2, des2 = SIFT.detectAndCompute(img2, None)

	# FLANN parameters
	detector = cv2.FeatureDetector_create("SIFT")
	descriptor = cv2.DescriptorExtractor_create("SIFT")

	kps1 = detector.detect(img1)
	kps1, descr1 = descriptor.compute(img1, kps1)

	kps2 = detector.detect(img2)
	kps2, descr2 = descriptor.compute(img2, kps2)

	flann_params = dict(algorithm=1, trees=10)
	flann = cv2.flann_Index(descr1, flann_params)
	idx, dist = flann.knnSearch(descr2, 1, params={})

	return idx, dist, descr1, descr2
示例#6
0
def MatchAllCaptureGlass(save, maxdist=200):
    from os.path import isfile, join
    import freenect
    from os import listdir
    import cv2
    import numpy as np
    import itertools
    import sys

    #Prepare a list of different training images
    pathGlass = "TrainingImages/Glass/"
    GlassCups = [ f for f in listdir(pathGlass) if isfile(join(pathGlass,f)) and f[0]<>"."]
    
    img, timestamp = freenect.sync_get_video()
    depth, timestamp = freenect.sync_get_depth(format=freenect.DEPTH_REGISTERED)

    detector = cv2.FeatureDetector_create("FAST")
    descriptor = cv2.DescriptorExtractor_create("SIFT")
    skp = detector.detect(img)
    skp, sd = descriptor.compute(img, skp)
    
    KeyPointsTotalList = []
    DistsTotalList = []

    for i in GlassCups:
        temp = cv2.imread(str(pathGlass+"/"+i))
        KeyPointsOut = findKeyPointsDist(img,temp,skp,sd,maxdist)
        KeyPointsTotalList += KeyPointsOut[0]
        DistsTotalList += KeyPointsOut[1]

    indices = range(len(DistsTotalList))
    indices.sort(key=lambda i: DistsTotalList[i])
    DistsTotalList = [DistsTotalList[i] for i in indices]
    KeyPointsTotalList = [KeyPointsTotalList[i] for i in indices]
    img1 = img
    if save == 1:
        saveImageMappedPoints(img, KeyPointsTotalList, 1)
        
    return KeyPointsTotalList, DistsTotalList, img, depth
示例#7
0
    def detectAndDescribe(self, image):
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

        # 版本区别
        if self.isv3:
            # 使用SIFT_create()方法实例化DOG空间关键点
            descriptor = cv2.xfeatures2d.SIFT_create()
            #将关键点与特征向量分离
            (kps, features) = descriptor.detectAndCompute(image, None)

        else:
            detector = cv2.FeatureDetector_create("SIFT")
            kps = detector.detect(gray)

            extractor = cv2.DescriptorExtractor_create("SIFT")
            (kps, features) = extractor.compute(gray, kps)

        # 将关键点转化为Numpy数组
        kps = np.float32([kp.pt for kp in kps])

        # 返回关键点与特征向量的元组
        return (kps, features)
    def detectAndDescribe(self, image):
        # convert the image to grayscale
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

        # check to see if we are using OpenCV 3.X
        if self.isv3:
            # detect and extract features from the image
            descriptor = cv2.xfeatures2d.SIFT_create()
            (kps, features) = descriptor.detectAndCompute(image, None)

        # otherwise, we are using OpenCV 2.4.X
        else:
            # detect keypoints in the image
            detector = cv2.FeatureDetector_create("SIFT")
            kps = detector.detect(gray)

            # extract features from the image
            extractor = cv2.DescriptorExtractor_create("SIFT")
            (kps, features) = extractor.compute(gray, kps)

        # use only keppoints of sand and line
        # print(kps, features)
        court_kps = []
        court_features = []
        for index, kp in enumerate(kps):
            y, x = kp.pt
            b, g, r = image[int(ceil(x)), int(ceil(y))]

            if utils.isSand(r, g, b) or utils.isLine(r, g, b):
                court_kps.append(kp)
                court_features.append(features[index])

        # convert the keypoints from KeyPoint objects to NumPy
        # arrays
        court_kps = np.float32([kp.pt for kp in court_kps])
        court_features = np.asarray(court_features)

        # return a tuple of keypoints and features
        return (court_kps, court_features)
示例#9
0
    def detectPiecesSIFT(self, correct_board):
        self.board = np.zeros((8, 8))

        sift_detector = cv2.FeatureDetector_create("SIFT")
        sift_extractor = cv2.DescriptorExtractor_create("SIFT")
        centers = np.load("feature_data/SIFT/centers.npy")

        probabilities = np.zeros((7, 8, 8))
        for piece in pieces:
            piece_class = piece_classes[piece]
            ratio = piece_to_ratio[piece]
            winSize = (int(64 * ratio), 64)
            classifier = joblib.load("classifiers/classifier_sift_" + piece +
                                     ".pkl")
            for r in xrange(8):
                for f in xrange(8):
                    bounding_box = self.getBoundingBox(r, f, piece)
                    x1 = bounding_box[0]
                    x2 = bounding_box[1]
                    y1 = bounding_box[2]
                    y2 = bounding_box[3]
                    subimage = self.image[y1:y2, x1:x2]
                    subimage = cv2.resize(subimage, winSize)
                    features = preprocessing.generateBOWFeatures(
                        subimage, centers, sift_detector, sift_extractor)
                    prob = classifier.predict_proba(features)
                    probabilities[piece_class, 7 - r, f] = prob[0, 1]

        # print(probabilities[0,:,:])
        # print(probabilities[1,:,:])
        # print(probabilities[2,:,:])
        self.board = np.argmax(probabilities, axis=0)
        self.probabilities = probabilities

        cross_entropy = self.cross_entropy(correct_board)
        detection_accuracy = self.detection_error(correct_board)
        classification_accuracy = self.classification_error(correct_board)

        return (cross_entropy, detection_accuracy, classification_accuracy)
示例#10
0
def extract_features_sift(image, config):
    sift_edge_threshold = config['sift_edge_threshold']
    sift_peak_threshold = float(config['sift_peak_threshold'])
    if context.OPENCV3:
        try:
            detector = cv2.xfeatures2d.SIFT_create(
                edgeThreshold=sift_edge_threshold,
                contrastThreshold=sift_peak_threshold)
        except AttributeError as ae:
            if "no attribute 'xfeatures2d'" in ae.message:
                logger.error('OpenCV Contrib modules are required to extract SIFT features')
            raise
        descriptor = detector
    else:
        detector = cv2.FeatureDetector_create('SIFT')
        descriptor = cv2.DescriptorExtractor_create('SIFT')
        detector.setDouble('edgeThreshold', sift_edge_threshold)
    while True:
        logger.debug('Computing sift with threshold {0}'.format(sift_peak_threshold))
        t = time.time()
        if context.OPENCV3:
            detector = cv2.xfeatures2d.SIFT_create(
                edgeThreshold=sift_edge_threshold,
                contrastThreshold=sift_peak_threshold)
        else:
            detector.setDouble("contrastThreshold", sift_peak_threshold)
        points = detector.detect(image)
        logger.debug('Found {0} points in {1}s'.format(len(points), time.time() - t))
        if len(points) < config['feature_min_frames'] and sift_peak_threshold > 0.0001:
            sift_peak_threshold = (sift_peak_threshold * 2) / 3
            logger.debug('reducing threshold')
        else:
            logger.debug('done')
            break
    points, desc = descriptor.compute(image, points)
    if config['feature_root']:
        desc = root_feature(desc)
    points = np.array([(i.pt[0], i.pt[1], i.size, i.angle) for i in points])
    return points, desc
示例#11
0
def up_to_step_1(imgs):
    """Complete pipeline up to step 1: Detecting features and descriptors"""

    # construct a DoG keypoint detector and a SURF feature extractor
    surf_detector = cv2.FeatureDetector_create("SURF")
    surf_detector.setInt("hessianThreshold", 1500)
    surf_extractor = cv2.DescriptorExtractor_create("SURF")

    for img in imgs:
        # convert to gray image
        img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        # detect keypoints in the image
        kp = surf_detector.detect(img_gray, None)
        # extract features from the image
        (kp, features) = surf_extractor.compute(img_gray, kp)
        # draw features on image
        img = cv2.drawKeypoints(
            image=img,
            outImage=img,
            keypoints=kp,
            flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
    return imgs
def obtainSimilarityScore(img1, img2):
    detector = cv2.FeatureDetector_create("SIFT")
    descriptor = cv2.DescriptorExtractor_create("SIFT")
    skp = detector.detect(img1)
    skp, sd = descriptor.compute(img1, skp)
    tkp = detector.detect(img2)
    tkp, td = descriptor.compute(img2, tkp)
    num1 = 0
    for i in range(len(sd)):
        kp_value_min = np.inf
        kp_value_2min = np.inf
        for j in range(len(td)):
            kp_value = 0
            for k in range(128):
                kp_value = (sd[i][k] - td[j][k]) * (sd[i][k] -
                                                    td[j][k]) + kp_value
            if kp_value < kp_value_min:
                kp_value_2min = kp_value_min
                kp_value_min = kp_value
        if kp_value_min < 0.8 * kp_value_2min:
            num1 = num1 + 1
    num2 = 0
    for i in range(len(td)):
        kp_value_min = np.inf
        kp_value_2min = np.inf
        for j in range(len(sd)):
            kp_value = 0
            for k in range(128):
                kp_value = (td[i][k] - sd[j][k]) * (td[i][k] -
                                                    sd[j][k]) + kp_value
            if kp_value < kp_value_min:
                kp_value_2min = kp_value_min
                kp_value_min = kp_value
        if kp_value_min < 0.8 * kp_value_2min:
            num2 = num2 + 1
    K1 = num1 * 1.0 / len(skp)
    K2 = num2 * 1.0 / len(tkp)
    SimilarityScore = 100 * (K1 + K2) * 1.0 / 2
    return SimilarityScore
示例#13
0
def getKeypointsDescriptors(filenames,detector_type,descriptor_type):
    detector=cv2.FeatureDetector_create(detector_type)
    if not(descriptor_type == 'color'):
        if not (descriptor_type == 'HOG' or descriptor_type == 'LBP'):
            descriptor = cv2.DescriptorExtractor_create(descriptor_type)
        K = []
        D = []
        print 'Extracting Local Descriptors'
        init=time.time()
        for filename in filenames:
            ima=cv2.imread(filename)
            gray=cv2.cvtColor(ima,cv2.COLOR_BGR2GRAY)
            if descriptor_type == 'HOG':
                des = extractHOGfeatures(gray, detector)
                D.append(des)
            elif descriptor_type == 'LBP':
                des = extractLBPfeatures(gray, detector)
                D.append(des)
            else:
                kpts=detector.detect(gray)
                kpts,des=descriptor.compute(gray,kpts)
                K.append(kpts)
                D.append(des)

        end=time.time()

        print 'Done in '+str(end-init)+' secs.'
    if(descriptor_type == 'color'):
        K = []
        print 'Extracting Local Descriptors'
        init=time.time()
        for filename in filenames:
            ima=cv2.imread(filename)
            gray=cv2.cvtColor(ima,cv2.COLOR_BGR2GRAY)
            kpts=detector.detect(gray)
            K.append(kpts)
        D = getLocalColorDescriptors(filenames, K, 0)
    return(K,D)
示例#14
0
    def __init__(self, source=0, bb=None):
        self.mouse_p1 = None
        self.mouse_p2 = None
        self.mouse_drag = False
        self.bb = None
        self.img = None
        self.source = source
        self.detector = cv2.FeatureDetector_create(sys.argv[1])
        self.descriptor = cv2.DescriptorExtractor_create(sys.argv[1])

        self.flann_params = dict(algorithm=1, trees=4)

        if source:
            self.cam = cv2.VideoCapture(source)
        else:
            self.cam = cv2.VideoCapture(0)
        if not bb:
            _, self.img = self.cam.read()
            self.start()
        else:
            self.bb = bb
            _, self.img = self.cam.read()
            self.SIFT()
 def CheckSVM(self, img):
     logData("checking image on svm classifier")
     clf, classes_names, stdSlr, k, voc = joblib.load(
         SVM_TRAINED_FILE_LOCATION)
     fea_det = cv.FeatureDetector_create(
         SVM_FEATURE_DETECTOR_EXTRACTOR_TYPE)
     des_ext = cv.DescriptorExtractor_create(
         SVM_FEATURE_DETECTOR_EXTRACTOR_TYPE)
     des_list = []
     kpts = fea_det.detect(img)
     kpts, des = des_ext.compute(img, kpts)
     des_list.append((self.image_path, des))
     # Stack all the descriptors vertically in a numpy array
     descriptors = des_list[0][1]
     for image_path, descriptor in des_list[0:]:
         descriptors = np.vstack((descriptors, descriptor))
     #
     test_features = np.zeros((self.image_paths_len, k), "float32")
     for i in xrange(1):
         words, distance = vq(des_list[i][1], voc)
         for w in words:
             test_features[i][w] += 1
     # Perform Tf-Idf vectorization
     nbr_occurences = np.sum((test_features > 0) * 1, axis=0)
     idf = np.array(
         np.log(
             (1.0 * self.image_paths_len + 1) / (1.0 * nbr_occurences + 1)),
         'float32')
     # Scale the features
     test_features = stdSlr.transform(test_features)
     # Perform the predictions
     predictions = [classes_names[i] for i in clf.predict(test_features)]
     predictionForReturn = clf.predict(test_features)
     for prediction in predictions:
         logData("SVM PREDICTIONS ARE [" + prediction +
                 "] sending result as is " + str(predictionForReturn[0]))
     return predictionForReturn[0]
示例#16
0
def _extract_feature(X, feature):

    if feature == 'gray' or feature == 'surf':
        X = [cv2.cvtColor(x, cv2.COLOR_BGR2GRAY) for x in X]
    elif feature == 'hsv':
        X = [cv2.cvtColor(x, cv2.COLOR_BGR2HSV) for x in X]

    small_size = (32, 32)
    X = [cv2.resize(x, small_size) for x in X]

    if feature == 'surf':
        surf = cv2.SURF(400)
        surf.upright = True
        surf.extended = True
        num_surf_features = 36

        dense = cv2.FeatureDetector_create("Dense")
        kp = dense.detect(np.zeros(small_size).astype(np.uint8))

        kp_des = [surf.compute(x, kp) for x in X]

        X = [d[1][:num_surf_features, :] for d in kp_des]
    elif feature == 'hog':
        block_size = (small_size[0] / 2, small_size[1] / 2)
        block_stride = (small_size[0] / 4, small_size[1] / 4)
        cell_size = block_stride
        num_bins = 9
        hog = cv2.HOGDescriptor(small_size, block_size, block_stride,
                                cell_size, num_bins)
        X = [hog.compute(x) for x in X]
    elif feature is not None:
        X = np.array(X).astype(np.float32) / 255

        X = [x - np.mean(x) for x in X]

    X = [x.flatten() for x in X]
    return X
示例#17
0
    def _detectAndDescribe(self, image):
        # convert the image to grayscale
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

        # OpenCV 3.X
        if self.isv3:
            # detect and extract features from the image
            descriptor = cv2.xfeatures2d.SIFT_create()
            (kps, features) = descriptor.detectAndCompute(image, None)

        # OpenCV 2.4.X
        else:
            # detect keypoints in the image
            detector = cv2.FeatureDetector_create("SIFT")
            kps = detector.detect(gray)

            # extract features from the image
            extractor = cv2.DescriptorExtractor_create("SIFT")
            (kps, features) = extractor.compute(gray, kps)

        # convert the keypoints from KeyPoint objects to NumPy arrays
        kps = np.float32([kp.pt for kp in kps])

        return (kps, features)
示例#18
0
def generator_centers(data_root, K, downsample=1):
    dense = cv2.FeatureDetector_create("Dense")
    brief = cv2.DescriptorExtractor_create("SIFT")

    image_filenames = []
    for fpathe, dirs, fs in os.walk(data_root):
        for f in fs:
            name = str(f)
            if name.endswith('.jpg'):
                image_filenames.append(os.path.join(fpathe, f))

    cnt = 0
    points = []
    for image_filename in image_filenames:
        cnt += 1
        if cnt % downsample > 0:
            continue
        img = cv2.imread(image_filename, cv2.IMREAD_GRAYSCALE)
        if img.mean() > 250:
            continue
        kp = dense.detect(img, None)
        kp, des = brief.compute(img, kp)
        for t in des:
            points.append(t)
        #print("# kps: {}, descriptors: {}".format(len(kp), des.shape) )

    print('sample points: {0}x{1}'.format(len(points), len(points[0])))
    criteria = (cv2.TERM_CRITERIA_EPS, 3000, 0.01)
    flags = cv2.KMEANS_PP_CENTERS
    ret, labels, centers = cv2.kmeans(np.asarray(points), K, criteria, 10,
                                      flags)
    print('mse: {0}'.format(math.sqrt(ret) / len(points)))

    with open('train1-{0}-{1}.sift.feature'.format(downsample, K), 'w') as f:
        centers.dump(f)
    return centers
示例#19
0
    def JellemzoFelismer(self, image):
        # szürkeárnyalat
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

        # OpenCv ellenőrzés
        if self.isv3:
            # jellemzők felismerése
            descriptor = cv2.xfeatures2d.SIFT_create()
            (kps, features) = descriptor.detectAndCompute(image, None)

        # OpenCv 2.4.X
        else:
            # kulcspont felismerés
            detector = cv2.FeatureDetector_create("SIFT")
            kps = detector.detect(gray)

            # jellemzők meghatározása
            extractor = cv2.DescriptorExtractor_create("SIFT")
            (kps, features) = extractor.compute(gray, kps)

        # NumPy vektorokba konvertálja
        kps = np.float32([kp.pt for kp in kps])

        return (kps, features)
示例#20
0
    def blob_proc(self, img):
        # don't use me, im bad
        d_red = cv2.RGB(150, 55, 65)
        l_red = cv2.RGB(250, 200, 200)

        detector = cv2.FeatureDetector_create('MSER')
        fs = detector.detect(img)
        fs.sort(key=lambda x: -x.size)

        sfs = [x for x in fs if not self.supress(x, fs)]

        for f in sfs:
            cv2.circle(img, (int(f.pt[0]), int(f.pt[1])), int(f.size / 2),
                       d_red, 2, cv2.CV_AA)
            cv2.circle(img, (int(f.pt[0]), int(f.pt[1])), int(f.size / 2),
                       l_red, 1, cv2.CV_AA)

        h, w = orig.shape[:2]
        vis = np.zeros((h, w * 2 + 5), np.uint8)
        vis = cv2.cvtColor(vis, cv2.COLOR_GRAY2BGR)
        vis[:h, :w] = orig
        vis[:h, w + 5:w * 2 + 5] = img

        return vis
示例#21
0
def create_sample_default_detectors(yaml_dir):
    """
    This function creates sample feature yaml files in the directory \
    specified by yaml_dir to provide coverage of all the supported feature \
    detectors in opencv's feat2d module and their parameters

    :param string yaml_dir: full path to write sample feature yaml files
    :returns: None
    """

    global __CV2_FT_DETECTORS__
    for detector in __CV2_FT_DETECTORS__:
        fd = cv2.FeatureDetector_create(detector)
        out_yaml = {
            'detector': {
                'name': detector,
                'parameters': build_parameter_dictionary(fd)
            }
        }
        out_name = os.path.join(yaml_dir +
                                'default_detector_%s.yaml' % detector)
        outfile = file(out_name, 'w')
        yaml.dump(out_yaml, stream=outfile)
        outfile.close()
示例#22
0
	def __init__(self, robot_file):
		rospack = rospkg.RosPack()

		# loads the stock images for left, right and uturn signs 
		self.left_turn = rospack.get_path('computer_vision') + '/images/left_turn_real.png'
		self.right_turn = rospack.get_path('computer_vision') + '/images/right_turn_real.png'
		self.u_turn = rospack.get_path('computer_vision') + '/images/uturn_real.png'

		self.robot_file = robot_file
		descriptor_name = "SIFT"

		self.left_sum = 0
		self.right_sum = 0
		self.u_sum = 0

		self.threshold_sum = 20

		self.detector = cv2.FeatureDetector_create(descriptor_name)
		self.extractor = cv2.DescriptorExtractor_create(descriptor_name)
		self.matcher = cv2.BFMatcher()
		self.im = None

		self.corner_threshold = 0.01
		self.ratio_threshold = .6
def extract_ref(image_path, thumbnail_size):
    img = Image.open(image_path)
    img.load()
    if img == None:
        raise Exception("Error: Cannot read " + image_path)
    if img.size > thumbnail_size:
        img.thumbnail(thumbnail_size)
    img_array = numpy.array(img)
    img_gray = img_array
    if len(img_array.shape) >= 3:
        img_gray = cv2.cvtColor(img_array, cv2.COLOR_RGB2GRAY)

    img_gray = numpy.array(img_gray, numpy.uint8)
    featureDetector = cv2.FeatureDetector_create("SURF")
    keypoints = featureDetector.detect(img_gray)
    kp_angles = []
    if keypoints == None:
        return []

    for keypoint in keypoints:
        kp_angles.append(keypoint.angle)
    bins = range(0, 360, 10)
    histogram = numpy.histogram(kp_angles, bins=bins, density=True)[0]
    return histogram
示例#24
0
def detect_mser(image,
                mask,
                layer_id,
                smooth=True,
                show=False,
                show_now=True,
                save_fig=False):
    if image.ndim == 3:
        image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    detector = cv2.FeatureDetector_create('MSER')
    keypts = detector.detect(image)
    keypts = [kp for kp in keypts if mask[kp.pt[1], kp.pt[0]]]

    if show or save_fig:
        fig = plt.figure(figsize=(24, 14))
        for kp in keypts:
            plt.imshow(image, 'gray', interpolation='nearest')
            plt.title('layer=%i' % (layer_id + 1))
            r = int(0.5 * kp.size)
            x, y = kp.pt
            circ = plt.Circle((x, y), r, color='r', linewidth=2, fill=False)
            plt.gca().add_patch(circ)
        if save_fig:
            fig_dir = '/home/tomas/Dropbox/Work/Dizertace/figures/keypoints/mser/'
            dirs = fig_dir.split('/')
            for i in range(2, len(dirs)):
                subdir = '/'.join(dirs[:i])
                if not os.path.exists(subdir):
                    os.mkdir(subdir)
            fig.savefig(os.path.join(fig_dir,
                                     'mser_layer_%i.png' % (layer_id + 1)),
                        dpi=100,
                        bbox_inches='tight',
                        pad_inches=0)
        if show_now:
            plt.show()
示例#25
0
def compare(filename1, filename2):
	img1 = cv2.imread(filename1)          # queryImage
	img2 = cv2.imread(filename2)          # trainImage

	# Initiate SIFT detector
	detector = cv2.FeatureDetector_create("SIFT")
	descriptor = cv2.DescriptorExtractor_create("SIFT")
	
	# sift = cv2.xfeatures2d.SIFT_create()

	# find the keypoints and descriptors with SIFT
	kp1 = detector.detect(img1)
	kp1, des1 = descriptor.compute(img1, kp1)
	
	kp2 = detector.detect(img2)
	kp2, des2 = descriptor.compute(img2, kp2)

	# kp1, des1 = sift.detectAndCompute(img1,None)
	# kp2, des2 = sift.detectAndCompute(img2,None)

	# BFMatcher with default params
	bf = cv2.BFMatcher()
	# matches = bf.match(des1,des2)
	matches = bf.knnMatch(des1, des2, k = 2)
	matches = list(map(lambda a: [a[0]], filter(lambda a: a[0].distance < 0.75*a[1].distance, matches)))
	
	# matches = sorted(matches, key=lambda val: val.distance)
	matches = sorted(matches, key=lambda val: val[0].distance)

	# img3 = drawMatches(img1,kp1,img2,kp2,matches[:25])
	# img = np.zeros([max(img1.shape[0],img2.shape[0]),img1.shape[1]+img2.shape[1],3])
	# img3 = cv2.drawMatchesKnn(img1,kp1,img2,kp2,matches,img,flags=2)

	dist = np.mean(list(map(lambda x: x[0].distance, matches[:25])))

	print(dist)
    def init(self, img_shape, feature2d_detector, detector_config,
             feature2d_descriptor, framesout, max_dist_as_img_fraction):
        ##
        # Initialization
        ##

        self.max_dist = max(img_shape) / max_dist_as_img_fraction

        self.detector = cv2.FeatureDetector_create(feature2d_detector)
        for name, value in detector_config[feature2d_detector].items():
            cv2_setParam(self.detector, self.detector.paramType(name), name,
                         value)

        self.descriptor = cv2.DescriptorExtractor_create(feature2d_descriptor)
        FLANN_INDEX_KDTREE = 0
        self.matcher = cv2.FlannBasedMatcher(
            dict(algorithm=FLANN_INDEX_KDTREE, trees=5), dict(checks=50))
        self.tracks = TrackCollection()  #

        # clear files
        self.framesout = framesout
        assert ("%04d" in framesout)
        for framefile in glob.glob(framesout.replace("%04d", "*")):
            os.remove(framefile)
示例#27
0
def visualize_boxes_and_labels_on_image_array(image,
                                              boxes,
                                              classes,
                                              scores,
                                              category_index,
                                              instance_masks=None,
                                              keypoints=None,
                                              use_normalized_coordinates=False,
                                              max_boxes_to_draw=20,
                                              min_score_thresh=.5,
                                              agnostic_mode=False,
                                              line_thickness=4):
    box_to_display_str_map = collections.defaultdict(list)
    box_to_color_map = collections.defaultdict(str)
    box_to_instance_masks_map = {}
    box_to_keypoints_map = collections.defaultdict(list)

    ################### CALL  train.pkl ###################################
    clf, classes_names, stdSlr, k, voc = joblib.load("train.pkl")

    if not max_boxes_to_draw:
        max_boxes_to_draw = boxes.shape[0]
    for i in range(min(max_boxes_to_draw, boxes.shape[0])):
        #>>>>>>>>>>>
        elapsed = time.time() - start
        #<<<<<<<<<<<<
        if scores is None or scores[i] > min_score_thresh:

            box = tuple(boxes[i].tolist())

            ####################### SIFT #######################################

            try:
                fea_det = cv2.FeatureDetector_create("SIFT")
                des_ext = cv2.DescriptorExtractor_create("SIFT")

                des_list = []
                y = int(box[0] * 479.000)
                yh = int(box[2] * 479.000)
                x = int(box[1] * 639.000)
                xh = int(box[3] * 639.000)
                #  print y, " ", yh, " ", x, " ",xh

                im = image[y:yh, x:xh]
                kpts = fea_det.detect(im)
                kpts, des = des_ext.compute(im, kpts)
                des_list.append((im, des))
                descriptors = des_list[0][1]
                for image2, descriptor in des_list[0:]:
                    descriptors = np.vstack((descriptors, descriptor))

                test_features = np.zeros((1, k), "float32")
                for i in xrange(1):
                    words, distance = vq(des_list[i][1], voc)
                    for w in words:
                        test_features[i][w] += 1

                test_features = stdSlr.transform(test_features)

                # print clf.predict(test_features) >>>>>> class n. [0] [1]...[n]
                predictions = [
                    classes_names[i] for i in clf.predict(test_features)
                ]
                p = str(predictions[0])
                st = p + "#" + str(y) + "," + str(yh) + "," + str(
                    x) + "," + str(xh)
                #print p
                f.setPredic(st)

                if instance_masks is not None:
                    box_to_instance_masks_map[box] = instance_masks[i]
                if keypoints is not None:
                    box_to_keypoints_map[box].extend(keypoints[i])
                if scores is None:
                    box_to_color_map[box] = 'black'
                else:
                    if not agnostic_mode:
                        display_str = '{}'.format(
                            p)  ############# predictions[0] ##################
                    box_to_display_str_map[box].append(display_str)
                    if agnostic_mode:
                        box_to_color_map[box] = 'DarkOrange'
                    else:
                        box_to_color_map[box] = STANDARD_COLORS[
                            classes[i] % len(STANDARD_COLORS)]
            except:
                print "..."

    # Draw all boxes onto image.
    for box, color in box_to_color_map.items():
        ymin, xmin, ymax, xmax = box
        if instance_masks is not None:
            draw_mask_on_image_array(image,
                                     box_to_instance_masks_map[box],
                                     color=color)
        #return (ymin, ymax, xmin, xmax)
        f.setYmin(ymin)
        f.setYmax(ymax)
        f.setXmin(xmin)
        f.setXmax(xmax)
        f.setimage(image)
        #save_image_array_as_png(image,"/home/uawsscu/PycharmProjects/DetectML/object_recognition_detection/pic/l3l.png")
        # print "(ymin ", ymin, ")(xmin ", xmin,")(ymax ", ymax,")(xmax ", xmax,")"
        draw_bounding_box_on_image_array(
            image,
            ymin,
            xmin,
            ymax,
            xmax,
            color=color,
            thickness=line_thickness,
            display_str_list=box_to_display_str_map[box],
            use_normalized_coordinates=use_normalized_coordinates)

        if keypoints is not None:
            draw_keypoints_on_image_array(
                image,
                box_to_keypoints_map[box],
                color=color,
                radius=line_thickness / 2,
                use_normalized_coordinates=use_normalized_coordinates)
示例#28
0
import cv2
import numpy as np
import matplotlib
import matplotlib.pyplot as plt 
import scipy as sp

img_file = 'cn-tower-1.jpg'

img = cv2.imread(img_file)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
src = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)

sift = cv2.SIFT()

detector = cv2.FeatureDetector_create("SIFT")
descriptor = cv2.DescriptorExtractor_create("SIFT")

src = np.uint8(src)
kp = detector.detect(src, None) 
示例#29
0
from __future__ import print_function
import numpy as np
import cv2
import imutils

# load the image and convert it to grayscale
image = cv2.imread("kp_next.jpg")
orig = image.copy()
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

# handle if we are detecting FAST keypoints in the image for OpenCV 2.4
if imutils.is_cv2():
    detector = cv2.FeatureDetector_create("FAST")
    kps = detector.detect(gray)
else:
    detector = cv2.FastFeatureDetector_create()
    kps = detector.detect(gray, None)

print("# of keypoints: {}".format(len(kps)))

# loop over the keypoints and draw them
for kp in kps:
    r = int(0.5 * kp.size)
    (x, y) = np.int0(kp.pt)
    cv2.circle(image, (x, y), r, (0, 255, 255), 2)

cv2.imshow("Images", np.hstack([orig, image]))
cv2.waitKey(0)
                    orientations=8,
                    pixels_per_cell=(8, 8),
                    cells_per_block=(1, 1),
                    visualise=True)

# Rescale histogram for better display
hog_image_rescaled = exposure.rescale_intensity(hog_image,
                                                in_range=(0, 0.02)) * 8
glcm = greycomatrix(hog_image_rescaled, [5], [0],
                    64,
                    symmetric=True,
                    normed=True)

#http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_feature2d/py_brief/py_brief.html#brief
#BRIEF
star = cv2.FeatureDetector_create("STAR")
# Initiate BRIEF extractor
brief = cv2.DescriptorExtractor_create("BRIEF")
# find the keypoints with STAR
kp = star.detect(img, None)
# compute the descriptors with BRIEF
kp, des = brief.compute(img, kp)
des /= 8
glcm = greycomatrix(des, [5], [0], 64, symmetric=True, normed=True)
#http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_feature2d/py_sift_intro/py_sift_intro.html#sift-intro
#SIFT
sift = cv2.SIFT()
kp, des = sift.detectAndCompute(img, None)
des /= 8
glcm = greycomatrix(des, [5], [0], 64, symmetric=True, normed=True)