def featurePoints(greyscaleImg, mask):
    surf_keypoints, surf_descriptors = cv.ExtractSURF(greyscaleImg, mask,
                                                      cv.CreateMemStorage(),
                                                      (1, 3000, 3, 4))
    img2 = np.array(img)
    for ((x, y), laplacian, size, dir, hessian) in surf_keypoints:
        if laplacian == -1:
            img2[int(y), int(x), 0] = 255
            img2[int(y), int(x), 1] = 0
            img2[int(y), int(x), 2] = 0
        else:
            img2[int(y), int(x), 0] = 0
            img2[int(y), int(x), 1] = 0
            img2[int(y), int(x), 2] = 255
    imsave('/home/cplab/workspace/imageex/src/imageex/static/SURFFFFFF.png',
           img2)

    #params = (maxSize, responseThreshold, lineThresholdProjected, lineThresholdBinarized, suppressNonmaxSize)
    star_keypoints = cv.GetStarKeypoints(greyscaleImg, cv.CreateMemStorage(),
                                         (8, 30, 10, 8, 3))
    img3 = np.array(img)
    for ((x, y), size, response) in star_keypoints:
        if response >= 0:
            img3[int(y), int(x), 0] = 255
            img3[int(y), int(x), 1] = 0
            img3[int(y), int(x), 2] = 0
        else:
            img3[int(y), int(x), 0] = 0
            img3[int(y), int(x), 1] = 0
            img3[int(y), int(x), 2] = 255
    imsave('/home/cplab/workspace/imageex/src/imageex/static/STARRRRR.png',
           img3)
    return []
예제 #2
0
def ExtractSURF(im, min_hessian=300):
    '''
    Uses OpenCV to extract SURF keypoints.  Currently does not compute SURF features.
    
    TODO: An option should be added to also compute and return the SURF descriptors.
    TODO: This should be extended with options for octaves and levels.
    TODO: I believe there are no memory leaks but this should be checked. cvSURFParams?
    '''
    cvim = im.asOpenCVBW()
    #mat = int(cvim.this)
    min_hessian = float(min_hessian)
    #TODO: OpenCV python interface now includes cv.ExtractSURF(cvim, mask, storage, params)
    #This is my (Steve's) attempt at this, but I am concerned we're not returning the
    # some of the information once this gets back to the caller...perhaps the parent
    # class is filtering out the addtnl data that SURF points provide?

    #TODO: Now that we have the descriptors, we need to return them to user if desired.
    (keypts, _) = cv.ExtractSURF(cvim, None, cv.CreateMemStorage(),
                                 (0, min_hessian, 3, 1))

    keypoints = list()
    for ((x, y), laplacian, size, direction, hessian) in keypts:
        keypoints.append((hessian, x, y, size, direction, laplacian))

    return keypoints
예제 #3
0
    def __get_key_points(self, image_path):

        DESCRIPTORS_MIN = 300
        DESCRIPTORS_MAX = 420

        keypoints = 0
        descriptors = 0

        range = 600
        range_gap = 40
        do_surf = True

        image = cv.LoadImage(image_path, cv.CV_LOAD_IMAGE_GRAYSCALE)

        while (do_surf):

            (keypoints, descriptors) = cv.ExtractSURF(image, None,
                                                      cv.CreateMemStorage(),
                                                      (1, range, 3, 1))
            do_surf = False

            print "Descriptors: %d (range=%d)" % (len(descriptors), range)

            if (len(descriptors) < DESCRIPTORS_MIN):
                range = range - range_gap

            if (len(descriptors) > DESCRIPTORS_MAX):
                range = range + range_gap

            if (len(descriptors) >= DESCRIPTORS_MIN
                    and len(descriptors) <= DESCRIPTORS_MAX):
                do_surf = False

        return (keypoints, descriptors)
예제 #4
0
파일: surf.py 프로젝트: wolfram2012/MOSSE
def surf(im,
         mask=None,
         extended=False,
         hessianThreshold=500,
         nOctaves=3,
         nOctaveLayers=4):
    '''
    Keypoints contain a
      0: center point
      1 sign of laplacian (-1,0,+1)
      2 scale - diameter or radius
      3 angle
      4 response value
    
    Descriptors contain 64 floating point numbers
    
    @param im: image to extract features from.
    @type im:  pv.Image
    @param mask: a mask that controls where features are extracted from.
    @type mask:  OpenCV 8bit image
    @return: (keypoints,descriptors)
    '''
    cvim = im.asOpenCVBW()
    keypoints, descriptors = cv.ExtractSURF(
        cvim, mask, cv.CreateMemStorage(),
        (int(extended), hessianThreshold, nOctaves, nOctaveLayers))
    return keypoints, descriptors
예제 #5
0
def detect_surf(image, equalize=False):    
    if equalize: cv.EqualizeHist(image, image)
    keypoints, descriptors = cv.ExtractSURF(image, None, cv.CreateMemStorage(),
                                            (0, 800, 4, 5))
    for keypoint in keypoints:
        x, y = int(keypoint[0][0]), int(keypoint[0][1])
        cv.Circle(image, (x, y), 1, cv.RGB(0, 0, 255), 3, 8, 0)
    return image
예제 #6
0
    def add_features(self, cv_image, face):
        """ Look for any new features around the current feature cloud """
        """ Create the ROI mask"""
        roi = cv.CreateImage(cv.GetSize(cv_image), 8, 1)
        """ Begin with all black pixels """
        cv.Zero(roi)
        """ Get the coordinates and dimensions of the current track box """
        try:
            ((x, y), (w, h), a) = face.track_box
        except:
            logger.info("Track box has shrunk to zero...")
            return
        """ Expand the track box to look for new features """
        w = int(face.expand_roi * w)
        h = int(face.expand_roi * h)

        roi_box = ((x, y), (w, h), a)
        """ Create a filled white ellipse within the track_box to define the ROI. """
        cv.EllipseBox(roi, roi_box, cv.CV_RGB(255, 255, 255), cv.CV_FILLED)
        """ Create the temporary scratchpad images """
        eig = cv.CreateImage(cv.GetSize(self.grey), 32, 1)
        temp = cv.CreateImage(cv.GetSize(self.grey), 32, 1)

        if self.feature_type == 0:
            """ Get the new features using Good Features to Track """
            features = cv.GoodFeaturesToTrack(self.grey,
                                              eig,
                                              temp,
                                              self.max_count,
                                              self.quality,
                                              self.good_feature_distance,
                                              mask=roi,
                                              blockSize=3,
                                              useHarris=0,
                                              k=0.04)

        elif self.feature_type == 1:
            """ Get the new features using SURF """
            features = []
            (surf_features, descriptors) = cv.ExtractSURF(
                self.grey, roi, cv.CreateMemStorage(0),
                (0, self.surf_hessian_quality, 3, 1))
            for feature in surf_features:
                features.append(feature[0])
        """ Append new features to the current list if they are not too
            far from the current cluster """
        for new_feature in features:
            try:
                distance = self.distance_to_cluster(new_feature, face.features)
                if distance > self.add_feature_distance:
                    face.features.append(new_feature)
            except:
                pass
        """ Remove duplicate features """
        face.features = list(set(face.features))
예제 #7
0
 def train_on_file(self, filename, display_name):
     """train the system on the file in filename with the given name"""
     print "loading image %s" % filename
     example_img_gray = cv.LoadImage(filename, iscolor=cv.CV_LOAD_IMAGE_GRAYSCALE)
     (object_keypoints, object_descriptors) = cv.ExtractSURF(example_img_gray, None, cv.CreateMemStorage(), self.surf_params)
     self.keypoints += object_keypoints
     self.descriptors += object_descriptors
     self.displaynames[filename] = display_name
     self.names += len(object_keypoints)*[filename]
     object_image = cv.CreateImage(cv.GetSize(example_img_gray), cv.IPL_DEPTH_8U, 3)
     cv.Set(object_image,0)
     cv.CvtColor(example_img_gray, object_image, cv.CV_GRAY2RGB)
     self.images[filename] = object_image
     self.sizes[display_name] = cv.GetSize(example_img_gray)
     self.modelKeypoints[display_name] = object_keypoints
예제 #8
0
    def get_similar_actresses(self, target_image_path):

        img = cv.LoadImageM(target_image_path , cv.CV_LOAD_IMAGE_GRAYSCALE)

        (keypoints, descriptors) = cv.ExtractSURF(img, None,
                                                       cv.CreateMemStorage(),
                                                       (1 , 500, 3, 4))

        bag_of_keypoints = self._calc_a_bag_of_keypoints(descriptors)


        distance = ((self.pd_feature_data - bag_of_keypoints)**2).sum(axis=1)
        distance.sort(ascending=True)

        # distance = ((self.pd_feature_data * bag_of_keypoints)).sum(axis=1)
        # distance.sort(ascending=False)

        return distance.index.values[0:10]
예제 #9
0
파일: Surf.py 프로젝트: WalterBejar/catena
    def Execute(self):
        images = self.GetInputStageValue(0, "images")
        
        self.StartProcess()
    
        import cv
        MIN_DESC_VAL = -0.5
        MAX_DESC_VAL = 0.8
        CONV_FACTOR = 255.0 / (MAX_DESC_VAL-MIN_DESC_VAL)
        kds = []
        
        for im in images.GetImages():
            keypointDescriptorFile = os.path.join(os.path.splitext(im.GetFilePath())[0]+".key")
            
            if (Common.Utility.ShouldRun(self._properties["Force Run"], keypointDescriptorFile)):
                
                cvim = cv.LoadImageM(im.GetFilePath(), cv.CV_LOAD_IMAGE_GRAYSCALE)
                keypoints, descriptors = cv.ExtractSURF(cvim, None, cv.CreateMemStorage(),
                                                        (1,
                                                         self._properties["Hessian Threshold"],
                                                         self._properties["Number Octaves"],
                                                         self._properties["Number Octave Layers"]))
                l = []
                for i,descriptor in enumerate(descriptors):
                    
                    descr = [int((x-MIN_DESC_VAL)*CONV_FACTOR) for x in descriptor]

                    l.append(FeatureExtraction.KeypointDescriptor(keypoints[i][0][1],
                                                                  keypoints[i][0][0],
                                                                  keypoints[i][2],
                                                                  math.radians(keypoints[i][3]),
                                                                  descr))
                
                kdfl = FeatureExtraction.KeypointDescriptorFileLowe(l)
                kdfl.Write(keypointDescriptorFile)
                kdfl = FeatureExtraction.KeypointDescriptorFileLowe(keypointDescriptorFile, False)
                kds.append(kdfl)
        
        kds = FeatureExtraction.KeypointDescriptors(images.GetPath(), kds, False)
        self.SetOutputValue("keypointDescriptors", kds)
예제 #10
0
def findSurfBlobs(cvImg):
  """ 
  Run the OpenCV ExtractSURF algorithm on the given input cvImage. 
  Render the results as circles with directional lines onto a copy of
  the input image and return this cvImage as the result of the method call.
  @param cvImg: input OpenCV image to perform SURF analysis on.
  @return: a copy of the input image with SURF results rendered onto it.
  """
  #Algorithm params tuple (extended, hessianThreshold, nOctaves, nOctaveLayers):
  #extended: 0 means basic descriptors (64 elements each), 1 means extended (128 each)
  #hessianThreshold: only features with hessian larger than that are extracted. 
  #  good default value is ~300-500 (can depend on the average local contrast 
  #  and sharpness of the image). user can further filter out some features based 
  #  on their hessian values and other characteristics.
  #nOctaves: the number of octaves to be used for extraction. 
  #  With each next octave the feature size is doubled (3 by default)
  #nOctaveLayers: The number of layers within each octave (4 by default)
  storage = cv.CreateMemStorage()
  (keypts, descs) = cv.ExtractSURF(cvImg, None, storage, (1, 100, 4, 1))
  
  cimg = cv.CreateImage((80,60), cv.IPL_DEPTH_8U, 3)
  cv.CvtColor(cvImg, cimg, cv.CV_GRAY2BGR)
  
  maxSize = 1
  for ((x, y), laplacian, size, dir, hessian) in keypts:
    maxSize = max(maxSize, size)
  
  for ((x, y), laplacian, size, dir, hessian) in keypts:
    print "x=%d y=%d laplacian=%d size=%d dir=%f hessian=%f" % (x, y, laplacian, size, dir, hessian)
    if size>=maxSize:
      x = int(x)
      y = int(y)
      cv.Circle(cimg, (x,y), size, cv.CV_RGB(0,0,255))
      deg = radians(dir)
      xd = int((x+(size*cos(deg))))
      yd = int(y+(size*sin(deg)*-1))
      cv.Line(cimg, (x,y), (xd,yd), cv.CV_RGB(0,0,255))
  print " "
  return cimg
예제 #11
0
 def process_image(self, image, visualize=False, min_matches=3, min_distance=0.05, close_objects=[]):
     """
     Process the next image, trying to recognize any trained objects
     Parameters:
         int min_matches: number of minimum keypoint mathches
         int min_distances: minimum distance between keypoints matches
     """
     self.imageSize = cv.GetSize(image)
     
     
     if image.nChannels > 1:
         image_gray = cv.CreateImage(self.imageSize, cv.IPL_DEPTH_8U, 1)
         cv.Set(image_gray,0)
         cv.CvtColor(image, image_gray, cv.CV_RGB2GRAY)
     else:
         image_gray = image
     try:
         (keypoints2, descriptors2) = cv.ExtractSURF(image_gray, None, cv.CreateMemStorage(), self.surf_params)
         neighbours, dists = self.NNdatabase.nn_index(numpy.array(descriptors2), checks=self.db_params["checks"])
         close_enough = self.get_close_indexes(dists, min_distance)
         keys_per_name = self.get_keys_per_name(neighbours, close_enough)
         deletes_counter = 0
         if close_objects != []:
             for key in keys_per_name.keys():
                 if key not in close_objects:
                     del(keys_per_name[key])
                     deletes_counter += 1
             print 'Deleted KPN:\t', deletes_counter
             print 'Remaining KPN:\t', len(keys_per_name)
         detections = self.detect_objects(keys_per_name, self.keypoints, keypoints2, dists, neighbours, image, visualize, min_matches)
         return detections
     except:
         #If no objects detected only show image:
         if visualize:
             cv.ShowImage(self.vision_window, image)
             cv.WaitKey(10)
         return []
예제 #12
0
def extract_surf(jpgfile):
    start = time.time()
    out = os.path.join(os.path.dirname(jpgfile),
                       os.path.basename(jpgfile)[:-4] + 'surf.npy')
    if os.path.exists(out):
        INFO('%s already exists' % out)
        return

    im = cv.LoadImageM(jpgfile, cv.CV_LOAD_IMAGE_GRAYSCALE)
    INFO('cv loaded %dx%d image' % (im.rows, im.cols))

    g, features = cv.ExtractSURF(im, None, cv.CreateMemStorage(),
                                 (0, 500, 3, 4))
    data = np.ndarray(len(features), SURFReader.surf_dtype)

    for i in range(len(features)):
        data[i]['vec'] = np.fromiter(features[i], np.float32)
        data[i]['geom'] = np.fromiter([g[i][0][0], g[i][0][1], g[i][2]],
                                      np.uint16)
        data[i]['index'] = 0


## Simple Quantization into bytes
#  for i in range(len(features)):
#    surfvalues = np.fromiter(features[i], np.float)
#
#    assert max(surfvalues) <= 1.0
#    assert min(surfvalues) >= -1.0
#
#    data[i]['vec'] = np.int8(127*surfvalues)
#    data[i]['geom'] = np.fromiter([g[i][0][0], g[i][0][1], g[i][2]], np.uint16)
#    data[i]['index'] = 0

    save_atomic(lambda d: np.save(d, data), out)
    INFO('cv wrote %d features' % len(features))
    INFO_TIMING('took %f seconds' % (time.time() - start))
예제 #13
0
파일: features.py 프로젝트: wklharry/hrl
def surf(image_gray, params=(1, 3000, 3, 4)):
    surf_stor = cv.CreateMemStorage()
    surf_r = cv.ExtractSURF(image_gray, None, surf_stor, params)
    del surf_stor
    return surf_r
예제 #14
0
            
    # CAPTURE IMAGE
    img = cv.QueryFrame(camera)
    cv.CvtColor(img, img_rgb, cv.CV_BGR2RGB)
    cv.CvtColor(img_rgb, grayscale, cv.CV_BGR2GRAY)
    
    # CONVERT IMAGE to PIL
    img_pil = Image.fromstring("RGB", cv.GetSize(img_rgb),
                           img_rgb.tostring())
    pgimg = pygame.image.frombuffer(img_pil.tostring(),
                                img_pil.size,img_pil.mode)

    
    #descriptors =[]
    try:
        (keypoints, descriptors) =cv.ExtractSURF(grayscale, None, cv.CreateMemStorage(),(0, 2000, 3, 2))
        # DRAW KEYPOINT
        


        
        if DRAW_DESC:
            for ((x, y), laplacian, size, dir, hessian) in keypoints:
                radio = size*1.2/9.*2
                #print "radioOld: ", int(radio)
                color = (255, 0, 0)
                
                if radio < 3:
                    radio = 2
                    color = (255, 0, 200)
                
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.

# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>.
from __future__ import division
import logging
import cv
import numpy as np
import os, pickle

logging.basicConfig(level=logging.DEBUG)

PICS_DIR = "256_ObjectCategories"
CATS = {'moto': '145.motorbikes-101', 'plane': '251.airplanes-101'}

for name, dir in CATS.items():
    dir = os.path.join(PICS_DIR, dir)
    surfs = {}
    for file in os.listdir(dir):
        file = os.path.join(dir, file)
        logging.debug('%s in progress ...' % file)
        image = cv.LoadImage(file)
        image_gray = cv.CreateImage(cv.GetSize(image), image.depth, 1)
        cv.CvtColor(image, image_gray, cv.CV_BGR2GRAY)
        k, v = cv.ExtractSURF(image_gray, None, cv.CreateMemStorage(),
                              (1, 300, 3, 1))
        surfs[file] = np.array(v)
    pickle.dump(surfs, open(name, 'wb'))
예제 #16
0
    def track_lk(self, cv_image, face):
        feature_box = None
        """ Initialize intermediate images if necessary """
        if not face.pyramid:
            face.grey = cv.CreateImage(cv.GetSize(cv_image), 8, 1)
            face.prev_grey = cv.CreateImage(cv.GetSize(cv_image), 8, 1)
            face.pyramid = cv.CreateImage(cv.GetSize(cv_image), 8, 1)
            face.prev_pyramid = cv.CreateImage(cv.GetSize(cv_image), 8, 1)
            face.features = []
        """ Create a grey version of the image """
        cv.CvtColor(cv_image, face.grey, cv.CV_BGR2GRAY)
        """ Equalize the histogram to reduce lighting effects """
        cv.EqualizeHist(face.grey, face.grey)

        if face.track_box and face.features != []:
            """ We have feature points, so track and display them """
            """ Calculate the optical flow """
            face.features, status, track_error = cv.CalcOpticalFlowPyrLK(
                face.prev_grey, face.grey, face.prev_pyramid, face.pyramid,
                face.features, (self.win_size, self.win_size), 3,
                (cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS, 20, 0.01),
                self.flags)
            """ Keep only high status points """
            face.features = [p for (st, p) in zip(status, face.features) if st]

        elif face.track_box and self.is_rect_nonzero(face.track_box):
            """ Get the initial features to track """
            """ Create a mask image to be used to select the tracked points """
            mask = cv.CreateImage(cv.GetSize(cv_image), 8, 1)
            """ Begin with all black pixels """
            cv.Zero(mask)
            """ Get the coordinates and dimensions of the track box """
            try:
                x, y, w, h = face.track_box
            except:
                return None

            if self.auto_face_tracking:
                #                """ For faces, the detect box tends to extend beyond the actual object so shrink it slightly """
                #                x = int(0.97 * x)
                #                y = int(0.97 * y)
                #                w = int(1 * w)
                #                h = int(1 * h)
                """ Get the center of the track box (type CvRect) so we can create the
                    equivalent CvBox2D (rotated rectangle) required by EllipseBox below. """
                center_x = int(x + w / 2)
                center_y = int(y + h / 2)
                roi_box = ((center_x, center_y), (w, h), 0)
                """ Create a filled white ellipse within the track_box to define the ROI. """
                cv.EllipseBox(mask, roi_box, cv.CV_RGB(255, 255, 255),
                              cv.CV_FILLED)
            else:
                """ For manually selected regions, just use a rectangle """
                pt1 = (x, y)
                pt2 = (x + w, y + h)
                cv.Rectangle(mask, pt1, pt2, cv.CV_RGB(255, 255, 255),
                             cv.CV_FILLED)
            """ Create the temporary scratchpad images """
            eig = cv.CreateImage(cv.GetSize(self.grey), 32, 1)
            temp = cv.CreateImage(cv.GetSize(self.grey), 32, 1)

            if self.feature_type == 0:
                """ Find keypoints to track using Good Features to Track """
                face.features = cv.GoodFeaturesToTrack(
                    face.grey,
                    eig,
                    temp,
                    self.max_count,
                    self.quality,
                    self.good_feature_distance,
                    mask=mask,
                    blockSize=self.block_size,
                    useHarris=self.use_harris,
                    k=0.04)

            elif self.feature_type == 1:
                """ Get the new features using SURF """
                (surf_features, descriptors) = cv.ExtractSURF(
                    face.grey, mask, cv.CreateMemStorage(0),
                    (0, self.surf_hessian_quality, 3, 1))
                for feature in surf_features:
                    face.features.append(feature[0])
            #
            if self.auto_min_features:
                """ Since the detect box is larger than the actual face
                    or desired patch, shrink the number of features by 10% """
                face.min_features = int(len(face.features) * 0.9)
                face.abs_min_features = int(0.5 * face.min_features)
        """ Swapping the images """
        face.prev_grey, face.grey = face.grey, face.prev_grey
        face.prev_pyramid, face.pyramid = face.pyramid, face.prev_pyramid
        """ If we have some features... """
        if len(face.features) > 0:
            """ The FitEllipse2 function below requires us to convert the feature array
                into a CvMat matrix """
            try:
                self.feature_matrix = cv.CreateMat(1, len(face.features),
                                                   cv.CV_32SC2)
            except:
                pass
            """ Draw the points as green circles and add them to the features matrix """
            i = 0
            for the_point in face.features:
                if self.show_features:
                    cv.Circle(self.marker_image,
                              (int(the_point[0]), int(the_point[1])), 2,
                              (0, 255, 0, 0), cv.CV_FILLED, 8, 0)
                try:
                    cv.Set2D(self.feature_matrix, 0, i,
                             (int(the_point[0]), int(the_point[1])))
                except:
                    pass
                i = i + 1
            """ Draw the best fit ellipse around the feature points """
            if len(face.features) > 6:
                feature_box = cv.FitEllipse2(self.feature_matrix)
            else:
                feature_box = None
            """ Publish the ROI for the tracked object """
            # try:
            #     (roi_center, roi_size, roi_angle) = feature_box
            # except:
            #     logger.info("Patch box has shrunk to zeros...")
            #     feature_box = None

            # if feature_box and not self.drag_start and self.is_rect_nonzero(face.track_box):
            #     self.ROI = RegionOfInterest()
            #     self.ROI.x_offset = min(self.image_size[0], max(0, int(roi_center[0] - roi_size[0] / 2)))
            #     self.ROI.y_offset = min(self.image_size[1], max(0, int(roi_center[1] - roi_size[1] / 2)))
            #     self.ROI.width = min(self.image_size[0], int(roi_size[0]))
            #     self.ROI.height = min(self.image_size[1], int(roi_size[1]))

            # self.pubROI.publish(self.ROI)

        if feature_box is not None and len(face.features) > 0:
            return feature_box
        else:
            return None
예제 #17
0
 def _extract_surf_features(self, img_path):
     img = cv.LoadImageM(img_path, cv.CV_LOAD_IMAGE_GRAYSCALE)
     _, descriptors = cv.ExtractSURF(img, None, cv.CreateMemStorage(),
                                     (1, 500, 3, 4))
     return descriptors
예제 #18
0
#! /usr/bin/python
# -*- encoding: utf-8 -*-

import cv
from sys import *
'''
引数に画像のパスを指定するとsurfを表示します。
'''
im = cv.LoadImageM(argv[1], cv.CV_LOAD_IMAGE_GRAYSCALE)
(keypoints, descriptors) = cv.ExtractSURF(im, None, cv.CreateMemStorage(),
                                          (1, 500, 3, 4))

for ((x, y), laplacian, size, dir, hessian) in keypoints:
    cv.Circle(im, (int(x), int(y)), int(hessian / 1000), cv.RGB(255, 255, 255),
              1, cv.CV_AA, 0)

cv.SaveImage("face_detected.jpg", im)
#cv.ShowImage("aaa",im)
예제 #19
0
def main(argv=None):
    if argv is None:
        argv = sys.argv
    flann = FLANN()

    try:
        sample_dir = argv[1]
        sample_name = argv[2]
        start_frame = int(argv[3])
        end_frame = int(argv[4])
        hessian_threshold = int(argv[5])
    except (IndexError):
        print "Argument error\n Usage: python tracker.py sample_dir\
sample_filename start_frame end_frame hessian_threshold"

        return

    # Loading first frame
    im = cv.LoadImageM(sample_dir + sample_name + str(start_frame) + '.jpg',
                       cv.CV_LOAD_IMAGE_GRAYSCALE)

    # Extracting SURF descriptors from reference image
    # to make selecting the tracked region easier
    (k, d) = cv.ExtractSURF(im, None, cv.CreateMemStorage(),
                            (0, hessian_threshold, 3, 1))

    draw_features(im, k)

    frames = [im]

    # Create window and controls
    tr = get_tracked_region(im)

    # Extracting descriptors from each target image and
    # calculating the distances to the nearest neighbors
    # Tracked region must be updated in each step
    for x in range(start_frame, end_frame):
        im2 = cv.LoadImageM(sample_dir + sample_name + str(x) + '.jpg',
                            cv.CV_LOAD_IMAGE_GRAYSCALE)
        # Creating mask for extracting features from new image
        mask = tr.get_mask()
        (k2, d2) = cv.ExtractSURF(im2, mask, cv.CreateMemStorage(),
                                  (0, hessian_threshold, 3, 1))
        d2 = array(d2, dtype=float32)
        result = None
        dists = None
        if x == start_frame:
            params = flann.build_index(d2, target_precision=0.9)
            tr.size = 1.1 * tr.size
        else:
            if len(d2) > 0:
                result, dists = flann.nn_index(d2, 1, checks=params['checks'])
                # Creating full neighbor table
                neighbors = []
                nearest_neighbors = []
                for n in range(len(d2)):
                    neighbors.append((d2[n], k2[n], result[n], dists[n]))
                # Removing not-nearest neighbors
                for k in range(len(d2)):
                    k_neighbors = filter(lambda x: x[2] == k, neighbors)
                    num_neighbors = len(k_neighbors)
                    if num_neighbors >= 1:
                        if num_neighbors > 1:
                            nearest = reduce(
                                lambda x, y: x
                                if x[3] < y[3] else y, k_neighbors)
                        else:
                            nearest = k_neighbors[0]
                        nearest_neighbors.append(nearest)
                nearest_keypoints = [kp[1] for kp in nearest_neighbors]
                if len(nearest_keypoints) > 0:
                    centroid = calc_centroid(nearest_keypoints)
                    tr.xpos = centroid[0] - tr.size / 2
                    tr.ypos = centroid[1] - tr.size / 2
                draw_tracked_region(im2, tr)
                #print "Frame %d had %d feature(s)!" % (x, len(d2))
            else:
                #print "Frame %d had no features!" % x
                pass

        frames.append((im2, k2, d2, result, dists))

    cv.NamedWindow("target")

    esc_pressed = False

    while True:
        for frame in frames:
            cv.ShowImage("target", frame[0])
            if cv.WaitKey(50) == 27:
                esc_pressed = True
                break
        if esc_pressed:
            cv.DestroyAllWindows()
            break
    return frames, params
예제 #20
0
	final_image=array(concatenate((img1, img2), axis=1))		# Concatenate the two images
	
	for loopVar1 in range(0, len(matches)):		# For each pair of point in the matched set
		cv2.line(final_image, (int(matches[loopVar1][0][1]), int(matches[loopVar1][0][0])), (int(matches[loopVar1][1][1])+offset, int(matches[loopVar1][1][0])), (255*(loopVar1%4),255*((loopVar1+1)%4),255*((loopVar1+2)%4)), 1)		# Draw a line
		cv2.circle(final_image, (int(matches[loopVar1][0][1]), int(matches[loopVar1][0][0])), 2, (255,0,0), -1) # Draw a point 
		cv2.circle(final_image, (int(matches[loopVar1][1][1])+offset, int(matches[loopVar1][1][0])), 2, (255,0,0), -1) # Draw a point
		
	cv2.imshow('final_matched', final_image)
	cv2.imwrite("matched_SURF.jpg", final_image)		# Save the resultant image
	cv2.waitKey(0) #Wait for key-press
#---------------------------------------------------------------------------------------------#	
									
#---------------------------------------------------------------------------------------------#
# Load image 1
orig_img_1 = cv2.imread(filename1,1)
input_img_1 = cv.LoadImage(filename1,0)

# Load image 2
orig_img_2 = cv2.imread(filename2,1)
input_img_2 = cv.LoadImage(filename2,0)

#---------------------------------------------------------------------------------------------#
# Use OpenCV's built in SURF function to get the corner points and their descriptors
(points1, desc1)=cv.ExtractSURF(input_img_1, None, cv.CreateMemStorage(), (0, surf_th, 3, 1))
(points2, desc2)=cv.ExtractSURF(input_img_2, None, cv.CreateMemStorage(), (0, surf_th, 3, 1))

matches=[]
find_matches(points1, points2, desc1, desc2, matches, score_th, ratio_th) # Use the descriptors to find the matches
draw_lines(orig_img_1, orig_img_2, matches)	# Draw the lines between matches points
print len(matches)				# print the number of matched points
img = cv.LoadImageM("/home/lforet/Downloads/photo.JPG")
tempimage = cv.LoadImageM("/home/lforet/Downloads/eye.JPG")

size = cv.GetSize(img)
size2 = cv.GetSize(tempimage)

width = (size[0] - size2[0] + 1)
height = (size[1] - size2[1] + 1)
resultimage = cv.CreateImage((width, height), cv.IPL_DEPTH_32F, 1)
cv.MatchTemplate(img, tempimage, resultimage, 1)
cv.ShowImage("result", resultimage)
cv.ShowImage("photo", img)
# wait some key to end

img = cv.LoadImageM("2005_Nickel_Proof_Obv.tif", cv.CV_LOAD_IMAGE_GRAYSCALE)
(keypoints, descriptors) = cv.ExtractSURF(img, None, cv.CreateMemStorage(),
                                          (0, 30000, 3, 1))
print len(keypoints), len(descriptors)

for ((x, y), laplacian, size, dir, hessian) in keypoints:
    print "x=%d y=%d laplacian=%d size=%d dir=%f hessian=%f" % (
        x, y, laplacian, size, dir, hessian)
    cv.Circle(img, (x, y), size, (255, 0, 0), 1, cv.CV_AA, 0)

cv.ShowImage("SURF", img)

stor = cv.CreateMemStorage()
seq = cv.FindContours(canny_image, stor, cv.CV_RETR_LIST,
                      cv.CV_CHAIN_APPROX_SIMPLE)

cv.DrawContours(canny_image, seq, (255, 0, 0), (0, 0, 255), 20, thickness=1)
cv.ShowImage("Contours", canny_image)