Ejemplo n.º 1
0
  def train(self, featurefiles, k=100, subsampling=10):
    """Train a vocabulary from features in files listed in |featurefiles| using
    k-means with k words. Subsampling of training data can be used for speedup.
    """
    image_count = len(featurefiles)

    descr = []
    descr.append(sift.read_features_from_file(featurefiles[0])[1])
    descriptors = descr[0]  # Stack features for k-means.
    for i in numpy.arange(1, image_count):
      descr.append(sift.read_features_from_file(featurefiles[i])[1])
      descriptors = numpy.vstack((descriptors, descr[i]))

    # Run k-means.
    self.voc, distortion = vq.kmeans(descriptors[::subsampling, :], k, 1)
    self.word_count = self.voc.shape[0]

    # Project training data on vocabulary.
    imwords = numpy.zeros((image_count, self.word_count))
    for i in range(image_count):
      imwords[i] = self.project(descr[i])

    occurence_count = numpy.sum((imwords > 0)*1, axis=0)
    
    self.idf = numpy.log(image_count / (occurence_count + 1.0))
    self.trainingdata = featurefiles
Ejemplo n.º 2
0
  def train(self,featurefiles,k=100,subsampling=10):
    """ featurefilesに列挙されたファイルから特徴量を読み込み
      k平均法とk個のビジュアルワードを用いてボキャブラリを
      学習する。subsamplingで教師データを間引いて高速化可能 """

    nbr_images = len(featurefiles)
    # ファイルから特徴量を読み込む
    descr = []
    descr.append(sift.read_features_from_file(featurefiles[0])[1])
    descriptors = descr[0] #stack all features for k-means
    for i in arange(1,nbr_images):
      descr.append(sift.read_features_from_file(featurefiles[i])[1])
      descriptors = vstack((descriptors,descr[i]))

    # k平均法:最後の数字で試行数を指定する
    self.voc,distortion = kmeans(descriptors[::subsampling,:],k,1)
    self.nbr_words = self.voc.shape[0]

    # 教師画像を順番にボキャブラリに射影する
    imwords = zeros((nbr_images,self.nbr_words))
    for i in range( nbr_images ):
      imwords[i] = self.project(descr[i])

    nbr_occurences = sum( (imwords > 0)*1 ,axis=0)

    self.idf = log( (1.0*nbr_images) / (1.0*nbr_occurences+1) )
    self.trainingdata = featurefiles
Ejemplo n.º 3
0
 def train(self,featurefiles,k=100,subsampling=10):
     """ Train a vocabulary from features in files listed 
         in featurefiles using k-means with k number of words. 
         Subsampling of training data can be used for speedup. """
     
     nbr_images = len(featurefiles)
     # read the features from file
     descr = []
     descr.append(sift.read_features_from_file(featurefiles[0])[1])
     descriptors = descr[0] #stack all features for k-means
     for i in arange(1,nbr_images):
         descr.append(sift.read_features_from_file(featurefiles[i])[1])
         descriptors = vstack((descriptors,descr[i]))
         
     # k-means: last number determines number of runs
     self.voc,distortion = kmeans(descriptors[::subsampling,:],k,1)
     self.nbr_words = self.voc.shape[0]
     
     # go through all training images and project on vocabulary
     imwords = zeros((nbr_images,self.nbr_words))
     for i in range( nbr_images ):
         imwords[i] = self.project(descr[i])
     
     nbr_occurences = sum( (imwords > 0)*1 ,axis=0)
     
     self.idf = log( (1.0*nbr_images) / (1.0*nbr_occurences+1) )
     self.trainingdata = featurefiles
Ejemplo n.º 4
0
    def train(self, featurefiles, k=100, subsampling=10):
        nbr_images = len(featurefiles)
        descr = []
        descr.append(sift.read_features_from_file(featurefiles[0])[1])
        descriptors = descr[0]
        print "begin loading image feature files..."
        for i in np.arange(1, nbr_images):
            descr.append(sift.read_features_from_file(featurefiles[i])[1])
#                descriptors = np.vstack((descriptors, descr[i]))
            descriptors = np.vstack((descriptors, descr[i][::subsampling,:]))
            if i%100 == 0:
                print i, "images have been loaded..."
        print "finish loading image feature files!"

#        self.voc, distortion = cluster.kmeans(descriptors[::subsampling,:], k, 1)
        print "begin MiniBatchKMeans cluster....patient"
        mbk = MiniBatchKMeans(k, init="k-means++", compute_labels=False, n_init=3, init_size=3*k)
#        mbk.fit(descriptors[::subsampling,:])
        mbk.fit(descriptors)
        self.voc = mbk.cluster_centers_
        print "cluster finish!"
        self.nbr_word = self.voc.shape[0]
        imwords = np.zeros((nbr_images, self.nbr_word))
        for i in xrange(nbr_images):
            imwords[i] = self.project(descr[i])

        nbr_occurences = np.sum((imwords > 0)*1, axis=0)
        self.idf = np.log( (1.0*nbr_images) / (1.0*nbr_occurences+1) )
        self.traindata = featurefiles
Ejemplo n.º 5
0
def get_sift_match(f1, f2):
	fn1, fext1 = os.path.splitext(os.path.basename(f1))
	fn2, fext2 = os.path.splitext(os.path.basename(f2))
	try:
		l1, d1 = sift.read_features_from_file(TMP_DIR + fn1 + '.key')
		l2, d2 = sift.read_features_from_file(TMP_DIR + fn2 + '.key')
		
		return sift.score(d1, d2)
	except:
		return 0.0
Ejemplo n.º 6
0
def get_krt(im1, im2):
    ims = [im1, im2]
    sifts = []
    for x in range(2):
        sifts.append(ims[x][:-3]+"sift")

    # compute features                                                        
    #sift.process_image('../../data/book_frontal.JPG','../../data/im0.sift')
    sift.process_image(ims[0],sifts[0])

    l0,d0 = sift.read_features_from_file(sifts[0])
    #sift.process_image('../../data/book_perspective.JPG','../../data/im1.sift')
    sift.process_image(ims[1],sifts[1])
    l1,d1 = sift.read_features_from_file(sifts[1])
    # match features and estimate homography                                        
    matches = sift.match_twosided(d0,d1)
    ndx = matches.nonzero()[0]
    fp = homography.make_homog(l0[ndx,:2].T)
    ndx2 = [int(matches[i]) for i in ndx]
    print len(ndx2)
    tp = homography.make_homog(l1[ndx2,:2].T)
    model = homography.RansacModel()
    H,ransac_data = homography.H_from_ransac(fp,tp,model)


    # camera calibration
    #K = camera.my_calibration((747,1000))
    K = camera.my_calibration((Image.open(im2).size))
    # 3D points at plane z=0 with sides of length 0.2
    box = cube.cube_points([0,0,0.1],0.1)
    # project bottom square in first image
    cam1 = camera.Camera( hstack((K,dot(K,array([[0],[0],[-1]])) )) )
    # first points are the bottom square
    box_cam1 = cam1.project(homography.make_homog(box[:,:5]))
    # use H to transfer points to the second image
    print dot(H,box_cam1)
    box_trans = homography.normalize(dot(H,box_cam1))
    # compute second camera matrix from cam1 and H
    cam2 = camera.Camera(dot(H,cam1.P))
    A = dot(linalg.inv(K),cam2.P[:,:3])
    A = array([A[:,0],A[:,1],cross(A[:,0],A[:,1])]).T
    cam2.P[:,:3] = dot(K,A)
    # project with the second camera
    box_cam2 = cam2.project(homography.make_homog(box))
    # test: projecting point on z=0 should give the same
    point = array([1,1,0,1]).T
    print homography.normalize(dot(dot(H,cam1.P),point))
    print cam2.project(point)

    import pickle
    with open('%s.pkl' % ims[1][:-4],'w') as f:
        pickle.dump(K,f)
        pickle.dump(dot(linalg.inv(K),cam2.P),f)
    sys.stderr.write("K and Rt dumped to %s.pkl\n" % ims[1][:-4])
Ejemplo n.º 7
0
def cbir_train(train_path, voc_name, db_name, n_subsample=2000, n_cluster=2000, subfeatsampling=10):
    voc_name = voc_name + '_' + str(n_subsample) + '_' + str(n_cluster) + '_' + str(subfeatsampling)
    db_name = db_name[:-3] + '_' + str(n_subsample) + '_' + str(n_cluster) + '_' + str(subfeatsampling) + db_name[-3:]

    imlist, featlist = cbir_utils.create_imglist_featlist(train_path)
    imlist = imlist[:n_subsample]
    featlist = featlist[:n_subsample]

    ### generate sift feature
    nbr_images = len(imlist)
    ''''''
    for i in range(nbr_images):
        sift.process_image(imlist[i], featlist[i], mask = True)

    ### generate visual word
    voc = visual_word.Vocabulary(voc_name)
    voc.train(featlist, n_cluster, subfeatsampling)
    with open(voc_name+'.pkl', 'wb') as f:
        cPickle.dump(voc, f)
    print 'vocabulary is', voc.name, voc.nbr_word

    ### generate image index
    with open(voc_name+'.pkl', 'rb') as f:
        voc = cPickle.load(f)

    indx = image_search.Indexer(db_name, voc)
    indx.create_tables()

    for i in range(nbr_images):
        locs, descr = sift.read_features_from_file(featlist[i])
        indx.add_to_index(imlist[i], descr)

    indx.db_commit()
    print 'generate index finish!'
    print 'training over'
Ejemplo n.º 8
0
def get_descriptors(img):
    # ImageObjectet var, aminek van mar filename_keypoints attributuma
    '''
    returns the image as array, the location of features, and the descriptors
    '''
    loc,desc = sift.read_features_from_file(img.filename_keypoints)
    return loc, desc
def __main__:
	
	nbr_images = len(imlist)
	featlist = [ imlist[i][:-3] + 'sif' for i in range(nbr_images))

	for i in range(nbr_images):
		sift.process_image(imlist[i],featlist[i])

	voc = vocabularly.Vocabulary('ukbenchtest')
	voc.train(featlist,1000,10)

	with open('vocabulary.pkl', 'wb') as f:
		pickle.dump(voc,f)
	print 'vocabulary is:', voc.name, voc.nbr_wods


	nbr_images = len(imlist)

	with open('vocabulary.pkl', 'rb') as f:
		voc = pickle.load(f)


	indx = imagesearch.Indexer('test.db',voc)
	indx.create_tables()

	for i in range(nbr_images)[:100]:
		locs,descr = sift.read_features_from_file(featlist[i])
		indx.add_to_index(imlist[i],descr)

	indx.db_commit()


	con = sqlite.connect('test.db')
	print con.execute('select count (filename) from imlist').fetchone()
	print con.execute('select * from imlist').fetchone()


	src = imagesearch.Searcher('test.db')
	locs,descr = sift.read_features_from_file(featlist[0])
	iw = voc.project(descr)

	print 'ask using a histogram...'
	print src.candidates_from_histogram(iw)[:10]

	print 'try a query...'
	print src.query(imlist[0])[:10]
Ejemplo n.º 10
0
 def runSurf(self):
     #save a grayscale image
     im = self.image.convert("L")
     im.save(self.filename + "_gray.pgm", "PPM")  
     surfexec = surfpath + " -i " +  self.filename + "_gray.pgm" + " -o " + self.filename + "_result.txt"
     print surfexec
     os.system(surfexec)
     self.locators, self.descriptors = sift.read_features_from_file(self.filename + "_result.txt")
Ejemplo n.º 11
0
  def train(self,featurefiles,k=100,subsampling=10):
    """ featurefilesに列挙されたファイルから特徴量を読み込み
      k平均法とk個のビジュアルワードを用いてボキャブラリを
      学習する。subsamplingで教師データを間引いて高速化可能 """

    nbr_images = len(featurefiles)
    # ファイルから特徴量を読み込む
    #points = []
    descr = []
    descr.append(sift.read_features_from_file(featurefiles[0])[1])
    # optional.view feature points.
    #points.append( np.array(sift.read_features_from_file(featurefiles[0])[0][:,0:2]) ) # stock of x,y axis value
    descriptors = descr[0] #stack all features for k-means
    #pointors = points[0]
    for i in arange(1,nbr_images):
      descr.append(sift.read_features_from_file(featurefiles[i])[1])
      #points.append( np.array(sift.read_features_from_file(featurefiles[i])[0][:,0:2]) ) # stock of x,y axis value
      descriptors = vstack((descriptors,descr[i]))
    
    # k平均法:最後の数字で試行数を指定する
    self.voc,distortion = kmeans(descriptors[::subsampling,:],k,1)
    self.nbr_words = self.voc.shape[0]
    
    # 重心を保存しておく
    with open('voc_centroid.pkl','wb') as f:
        pickle.dump(self.voc,f)
    """
    # ワードとx,y座標の辞書作成
    dic = []
    for i in xrange(len(nbr_images)):
        dic[i] = {}
        dic[i][]
    """
    
    # 教師画像を順番にボキャブラリに射影する
    imwords = zeros((nbr_images,self.nbr_words))
    for i in xrange(1): #xrange( nbr_images ):
      # imwords[i] = self.project(descr[i], points[i]) # PLSAを使う場合はこちらを使用する
      imwords[i] = self.project(descr[i])

    nbr_occurences = sum( (imwords > 0)*1 ,axis=0)

    self.idf = log( (1.0*nbr_images) / (1.0*nbr_occurences+1) )
    self.trainingdata = featurefiles
Ejemplo n.º 12
0
def plot_sift_feature(im):
    #imname = ’empire.jpg’
    #im1 = array(Image.open(imname).convert(’L’))
    tmpFile = 'tmp.sift'
    sift.process_image(im,tmpFile)
    l1,d1 = sift.read_features_from_file(tmpFile)
    figure()
    gray()
    sift.plot_features(im,l1,circle=True)
    show()
Ejemplo n.º 13
0
def read_feature_labels(path):
  featlist = [os.path.join(path, f) for f in os.listdir(path) if f.endswith('.dsift')]
  features = []
  for featfile in featlist:
    l, d = sift.read_features_from_file(featfile)
    features.append(d.flatten())

  features = array(features)

  return features
Ejemplo n.º 14
0
 def runsift(self):
     #save a grayscale image
     imsize = self.image.size
     im = self.image.resize((imsize[0]/10, imsize[1]/10))
     im = im.convert("L")
     im.save(self.filename + "_gray.pgm", "PPM")
     siftexec = siftpath + self.filename + "_gray.pgm >" + self.filename + "_result.txt"
     print siftexec
     os.system(siftexec)
     self.locators, self.descriptors = sift.read_features_from_file(self.filename + "_result.txt")
Ejemplo n.º 15
0
def extractSift(input_files):
    all_features_dict = {}
    for i, fname in enumerate(input_files):
        features_fname = fname + '.sift'
        if exists(features_fname) == False:
            print "calculating sift features for", fname
            sift.process_image(fname, features_fname)
        print "gathering sift features for", fname,
        locs, descriptors = sift.read_features_from_file(features_fname)
        print descriptors.shape
        all_features_dict[fname] = descriptors
    return all_features_dict
Ejemplo n.º 16
0
def get_sift_lowe(img):
    features_fname = img + '.sift'
    if os.path.isfile(features_fname) == False:
        is_size_zero = sift.process_image(img, features_fname)
        if is_size_zero:
            os.remove(features_fname)
            sift.process_image(img, features_fname)
    if os.path.isfile(features_fname) and os.path.getsize(features_fname) == 0:
        os.remove(features_fname)
        sift.process_image(img, features_fname)
    locs, desc = sift.read_features_from_file(features_fname)
    return desc
Ejemplo n.º 17
0
def read_feature_labels(path):	
  featlist = [os.path.join(path, f) for f in os.listdir(path) if f.endswith('.' + typeFeats)]
  features = []

  for featfile in featlist:
    l, d = sift.read_features_from_file(featfile)
    features.append(d.flatten())

  features = array(features)
  labels = [featfile.split('/')[-1][0] for featfile in featlist]

  return features, featlist
Ejemplo n.º 18
0
def extractSift(input_files,target_folder):
	all_features_dict = {}
	count=0
	for i,fname in enumerate(input_files):
		features_fname = target_folder+'/'+fname.split('/')[2].split('.')[0]+'.sift'
		if exists(features_fname) == False:
			print("Calculating sift features for ",fname)
			sift.process_image(fname, features_fname,count)
			count+=1
		locs, descriptors = sift.read_features_from_file(features_fname)
		all_features_dict[fname] = (locs,descriptors)
	os.chdir('..')
	return all_features_dict
Ejemplo n.º 19
0
def read_gesture_features_labels(path):
    featlist = [os.path.join(path,f) for f in os.listdir(path) if f.endswith('.dsift')]
   # print featlist
    #read the feature
    features = []
    for featfile in featlist:
        l,d = sift.read_features_from_file(featfile)
        features.append(d.flatten())
    features = np.array(features)
    
    #create labels
    labels = [featfile1.split('/')[-1][0] for featfile1 in featlist]
    
    return features,np.array(labels)
Ejemplo n.º 20
0
def find_matches(image_names, root):
    l = {}
    d = {}
    n = len(image_names)
    for i, im in enumerate(image_names):
        resultname = os.path.join(root, '{}.sift'.format(im))
        if not os.path.isfile(resultname):
            sift.process_image(os.path.join(root, '{}.png'.format(im)), resultname)
        l[i], d[i] = sift.read_features_from_file(resultname)

    matches = {}
    for i in range(n - 1):
        matches[i] = sift.match(d[i + 1], d[i])
    return matches, l, d
Ejemplo n.º 21
0
def read_gesture_feature_labels(path):
    #create list of all files ending in .dsift
    featlist = [os.path.join(path,f) for f in os.listdir(path) if f.endswith('.dsift')]

    #read the features
    features = []
    for featfile in featlist:
        l,d = sift.read_features_from_file(featfile)
        features.append(d.flatten())
    features = array(features)

    #create labels
    labels = [featfile.split('/')[-1][0] for featfile in featlist]
    return features,array(labels)
	def match_keys(self, sceneFile, modelFile):
		sceneFeat = sift.read_features_from_file(sceneFile)[1]
		
		try:
			modelFeat = sift.read_features_from_file(modelFile)[1]
		except:
			return
		
		desc1=sceneFeat
		desc2=modelFeat
		
#		pdb.set_trace()			
		
#		t1=time.time()
		dist_ratio=.6
		desc1_shape = desc1.shape[0]
		
		matchscores=np.zeros(desc1_shape)
		
		dotProds = np.dot(desc1,desc2.T)
		dotProds *=.9999
		
		acos_dots= np.arccos(dotProds)
		indx = np.argsort(acos_dots,axis=1)
		
		range_ind = range(desc1_shape)
#		sig_keys = np.array(acos_dots[0,indx[:,0]] < dist_ratio * acos_dots[0,indx[:,1]])
		sig_keys = np.array(acos_dots[range_ind,indx[:,0]] < dist_ratio * acos_dots[range_ind,indx[:,1]])
		sig_keys_ind = np.nonzero(sig_keys)[0]
		matchscores[sig_keys_ind] = indx[sig_keys_ind,0]
#		print 'T1:',time.time()-t1
#		t2=time.time()
#		other=sift.match(desc1,desc2)
#		print 'T2:',time.time()-t2		
#		pdb.set_trace()

		return matchscores
Ejemplo n.º 23
0
Archivo: test.py Proyecto: ak352/pycv
def compute_homography():
    # compute features
    #sift.process_image('../../data/book_frontal.JPG','../../data/im0.sift')
    im1='../../data/space_front.jpg'
    im2='../../data/space_perspective.jpg'
    im1='../../data/mag_front.jpg'
    im2='../../data/mag_perspective.jpg'
    ims = [im1, im2]
    sifts = []
    for k in range(2):
        sifts.append(ims[k][:-4]+".sift")
    l0,d0 = sift.read_features_from_file(sifts[0])
    l1,d1 = sift.read_features_from_file(sifts[1])

    # match features and estimate homography
    matches = sift.match_twosided(d0,d1)
    ndx = matches.nonzero()[0]
    fp = homography.make_homog(l0[ndx,:2].T)
    ndx2 = [int(matches[i]) for i in ndx]
    tp = homography.make_homog(l1[ndx2,:2].T)
    model = homography.RansacModel()

    H,ransac_data = homography.H_from_ransac(fp,tp,model)
    return H
Ejemplo n.º 24
0
def extractSift(input_files):
	all_features_dict = {}
	count = 0
	for i,fname in enumerate(input_files):
		# path to store resulting sift files
		features_fname = 'sift_output/'+fname.split('/')[2].split('.')[0]+'.sift'
		if count == 0:
			os.chdir('siftDemoV4')
		print("Calculating sift features for ",fname)
		sift.process_image(fname,features_fname,count)
		count+=1
		locs, descriptors = sift.read_features_from_file(features_fname)
		all_features_dict[fname] = descriptors
	os.chdir('..')
	return all_features_dict
Ejemplo n.º 25
0
Archivo: 0619.py Proyecto: ta-oyama/PCV
def read_gesture_features_labels(path):
    # .dsiftで終わるすべてのファイル名のリストを作る
    featlist = [os.path.join(path,f) for f in os.listdir(path) if f.endswith('.dsift')]
    
    #特徴量を読み込む
    features = []
    for featfile in featlist:
        l,d = sift.read_features_from_file(featfile)
        features.append(d.flatten())
    features = np.array(features)
    
    #ラベルを生成する
    labels = [featfile.split('/')[-1][0] for featfile in featlist]
    
    return features, np.array(labels)
Ejemplo n.º 26
0
def _match():
    imlist = []
    featlist = []
    nbr_image = len(imlist)
    
    matchscores = zeros((nbr_images, nbr_images))
    
    for i in xrange(nbr_images):
        for j in xrange(i, nbr_image): # 上三角成分だけを計算する
            print 'comparing', imlist[i], imlist[j]
            
            l1, d1 = sift.read_features_from_file(featlist[i])
            l2, d2 = sift.read_features_from_file(featlist[j]) 
            
            matches = sift.match_twosided(d1, d2)
            
            nbr_matches = sum(matches > 0)
            print 'number or matches = ', nbr_matches
            matchscores[j,i] = nbr_matches
        
    # 値をコピーする
    for i in xrange(nbr_images):
        for j in xrange(i+1, nbr_images): # 対角成分はコピー不要
            matchscores[j,i] = matchscores[i,j]
def read_feature_labels(path):
  if typeFeats == 'dsift':
    featlist = [os.path.join(path,f) for f in os.listdir(path) if f.endswith('.dsift')]
  elif typeFeats == 'sift':
    featlist = [os.path.join(path,f) for f in os.listdir(path) if f.endswith('.sift')]
  else:
    print_error()
  features = []
  for featfile in featlist:
    l,d = sift.read_features_from_file(featfile)  
    features.append(d.flatten())
  features = array(features)

  labels = [featfile.split('/')[-1][0] for featfile in featlist]
  
  return features,array(labels)
Ejemplo n.º 28
0
def extractSift(input_files):
  print "extracting Sift features"
  all_features_dict = {}
  for i, fname in enumerate(input_files):
    rest_of_path = fname[:-(len(os.path.basename(fname)))]
    rest_of_path = os.path.join(rest_of_path, "sift")
    rest_of_path = os.path.join(rest_of_path, os.path.basename(fname))
    features_fname = rest_of_path + '.sift'
    if os.path.exists(features_fname) == False:
      # print "calculating sift features for", fname
      sift.process_image(fname, features_fname)
    # print "gathering sift features for", fname,
    locs, descriptors = sift.read_features_from_file(features_fname)
    # print descriptors.shape
    all_features_dict[fname] = descriptors
  return all_features_dict
Ejemplo n.º 29
0
def extractMF(filename):
    features_fname = filename + '.sift'
    sift.process_image(filename, features_fname)
    locs, descriptors = sift.read_features_from_file(features_fname)
    sh = min(locs.shape[0], 1000)
    res = np.zeros((sh,SIZE_LOCAL_FEATURE)).astype(np.float32)
    extra = [20,False,True,False,0,0,0]
    WIN = 5
    for i in range(sh):
        x = np.int32(round(locs[i][0]))
        y = np.int32(round(locs[i][1]))
        I = Image.open(filename)
        Nx,Ny = I.size
        a = sg.spec(I.crop((max(x-WIN,0),max(y-WIN,0),min(x+WIN,Nx-1),min(y+WIN,Ny-1))),extra)
        res[i] = a
    print res.shape
    return res
def extractSift(input_files):
	print "extracting Sift features"
	all_features_dict = {}

	#all_features = zeros([1,128])
	for i, fname in enumerate(input_files):
		features_fname = fname + '.sift'
		if exists(features_fname) == False:
			print "calculating sift features for", fname
			sift.process_image(fname, features_fname)
		locs, descriptors = sift.read_features_from_file(features_fname)
		# print descriptors.shape
		all_features_dict[fname] = descriptors
		# if all_features.shape[0] == 1:
		# 	all_features = descriptors
		# else:
		# 	all_features = concatenate((all_features, descriptors), axis = 0)
	return all_features_dict
Ejemplo n.º 31
0
#!/usr/bin/env python
# -*- coding=utf8 -*-
"""
# Author: Yoghourt.Lee->lvcr
# Created Time : Wed 25 Apr 2018 06:25:41 PM CST
# File Name: automaticallyFindMatchingCorrespondenceUseSIFT.py
# Description:  使用 SIFT 特征自动找到匹配对应
"""

import sift

featname = ['Univ' + str(i+1) + '.sift' for i in range(5)]
imname = ['Univ' + str(i+1) + '.jpg' fpr i in range(5)]

l = {}
d = {}

for i in range(5):
    sift.process_image(imname[i], featname[i])
    l[i], d[i] = sift.read_features_from_file(featname[i])


matches = {}
for i in range(4):
    matches[i] = sift.match(d[i+1], d[i])
Ejemplo n.º 32
0
with open('vocabulary.pkl', 'rb') as f:
    voc = pickle.load(f)

src = imagesearch.Searcher('test.db', voc)

# 查询图像的索引号和返回的搜索结果数目
q_ind = 50
nbr_results = 20

# 常规查询
res_reg = [w[1] for w in src.query(imlist[q_ind])[:nbr_results]]
print 'top matches (regular):', res_reg

# 载入查询图像特征
q_locs, q_descr = sift.read_features_from_file(featlist[q_ind])
fp = homography.make_homog(q_locs[:, :2].T)

# 用RANSAC 模型拟合单应性
model = homography.RansacModel()

rank = {}
# 载入搜索结果的图像特征
for ndx in res_reg[1:]:
    locs, descr = sift.read_features_from_file(featlist[ndx])

    # 获取匹配数
    matches = sift.match(q_descr, descr)
    ind = matches.nonzero()[0]
    ind2 = matches[ind]
    tp = homography.make_homog(locs[:, :2].T)
Ejemplo n.º 33
0
import sift
from PIL import Image
import os
from numpy import *
from pylab import *
# import cv2

imname = 'tesla-model-s3.jpg'
# img = cv2.imread(imname)
# cv.imshow(img)
im1 = array(Image.open(imname).convert('L'))
# print(im1)
sift.process_image(imname, 'tesla-model-s3.sift')
l1, d1 = sift.read_features_from_file('tesla-model-s3.sift')
figure()
gray()
sift.plot_features(im1, l1, circle=True)
show()
Ejemplo n.º 34
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-

from PIL import Image
from numpy import array
import sift
from pylab import *

imname = 'Penguins.jpg'

im1 = array(Image.open(imname).convert('L'))

sift.process_image(imname, 'Penguins.sift')

l1, d1 = sift.read_features_from_file('Penguins.sift')

figure()
gray()
sift.plot_features(im1, l1, circle=True)
show()
##########################################

nbr_images = len(imlist)

# Get SIFT Features for each image
for i in range(nbr_images):
    im1 = array(Image.open(imlist[i]).convert('L'))
    siftName = basename(imlist[i])
    sift.process_image(imlist[i], "features/" + siftName + ".sift")
    featlist.append("features/" + siftName + ".sift")

matchscores = zeros((nbr_images, nbr_images))
for i in range(nbr_images):
    for j in range(i, nbr_images):
        if imlist[i] != imlist[j]:
            print 'comparing ', imlist[i], imlist[j]

            l1, d1 = sift.read_features_from_file(featlist[i])
            l2, d2 = sift.read_features_from_file(featlist[j])

            matches = sift.match_twosided(d1, d2)

            nbr_matches = sum(matches > 0)
            print 'number of matches = ', nbr_matches
            matchscores[i, j] = nbr_matches

for i in range(nbr_images):
    for j in range(i + 1, nbr_images):
        matchscores[j, i] = matchscores[i, j]
Ejemplo n.º 36
0
import homography
import sfm
import sift
from pylab import *
from PIL import Image

# 标定矩阵
K = array([[2394, 0, 932], [0, 2398, 628], [0, 0, 1]])
# 载入图像,并计算特征
im1 = array(Image.open('../data/alcatraz1.jpg'))
sift.process_image('../data/alcatraz1.jpg', 'im1.sift')
l1, d1 = sift.read_features_from_file('im1.sift')
im2 = array(Image.open('../data/alcatraz2.jpg'))
sift.process_image('../data/alcatraz2.jpg', 'im2.sift')
l2, d2 = sift.read_features_from_file('im2.sift')
# 匹配特征
matches = sift.match_twosided(d1, d2)
ndx = matches.nonzero()[0]
# 使用齐次坐标表示,并使用 inv(K) 归一化
x1 = homography.make_homog(l1[ndx, :2].T)
ndx2 = [int(matches[i]) for i in ndx]
x2 = homography.make_homog(l2[ndx2, :2].T)
x1n = dot(inv(K), x1)
x2n = dot(inv(K), x2)
# 使用 RANSAC 方法估计 E
model = sfm.RansacModel()
E, inliers = sfm.F_from_ransac(x1n, x2n, model)
# 计算照相机矩阵(P2 是 4 个解的列表)
P1 = array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0]])
P2 = sfm.compute_P_from_essential(E)
Ejemplo n.º 37
0
    p.append([c[0] - wid, c[1] - wid, c[2] + wid])
    # 为了绘制闭合图像,和第一个相同
    # 竖直边
    p.append([c[0] - wid, c[1] - wid, c[2] + wid])
    p.append([c[0] - wid, c[1] + wid, c[2] + wid])
    p.append([c[0] - wid, c[1] + wid, c[2] - wid])
    p.append([c[0] + wid, c[1] + wid, c[2] - wid])
    p.append([c[0] + wid, c[1] + wid, c[2] + wid])
    p.append([c[0] + wid, c[1] - wid, c[2] + wid])
    p.append([c[0] + wid, c[1] - wid, c[2] - wid])
    return array(p).T


# 计算特征
sift.process_image('../data/book_frontal.JPG', 'im0.sift')
l0, d0 = sift.read_features_from_file('im0.sift')

sift.process_image('../data/book_perspective.JPG', 'im1.sift')
l1, d1 = sift.read_features_from_file('im1.sift')

# 匹配特征,并计算单应性矩阵
matches = sift.match_twosided(d0, d1)
ndx = matches.nonzero()[0]
fp = homography.make_homog(l0[ndx, :2].T)
ndx2 = [int(matches[i]) for i in ndx]
tp = homography.make_homog(l1[ndx2, :2].T)

model = homography.RansacModel()
H = homography.H_from_ransac(fp, tp, model)

im0 = array(Image.open('../data/book_frontal.JPG'))
Ejemplo n.º 38
0
def cal(queryname , judge):
	features = {}
	i = 0
	model = zeros(128)
	for dirs , root , files in os.walk(datapath):
		for filename in files:
			#sift.process_image(datapath + filename , 'temp.sift')
                        l , d = sift.read_features_from_file('./sifts/' + filename[:-4] + '.sift')
			features[filename] = d
			i += 1
			for i in range(len(d)):
				model = vstack((model , d[i]))

	model = delete(model , 0 , 0)

	query = zeros(128)
	sift.process_image(datapath + queryname , 'temp.sift')
	l , d = sift.read_features_from_file('temp.sift')

	for i in range(len(d)):
		query = vstack((query , d[i]))
	query = delete(query , 0 , 0)

	whitened = whiten(model)
	k , num = kmeans(whitened , 10)

	modelarr , dum = vq(model , k)
	queryarr , disnum = vq(query , k)

	temp = [0] * 10
	for i in modelarr:
		temp[i] += 1

	bignum = 0
	big = -1
	for i in range(10):
		if temp[i] > bignum:
			big = temp[i]
			big = i

	temp = [0] * 10
	for i in queryarr:
		temp[i] += 1


	countdict = {}
	for k , v in features.iteritems():
		countdict[k] = [0] * 10
		featurearr , desnum = vq(features[k] , k)
		for i in featurearr:
			countdict[k][i] += 1

	result = {}
	for k , v in countdict.iteritems():
		numsum = 0
		for i in range(10):
			if i == big:
				temp[i] = 0
			val = countdict[k][i]
			numsum += (val - temp[i]) * (val - temp[i])
		result[k] = numsum

	result = sorted([(v , k) for (k , v) in result.items()] , reverse = False)
	if judge:
		for i in result[:10]:
			print i
	else:
		return result[:10]
Ejemplo n.º 39
0
import sift
import imagesearch
"""After ch07_buildindex.py has built an index in test.db, this program can
query it.
"""

imlist = imtools.get_imlist('/Users/thakis/Downloads/ukbench/first1000')[:100]
imcount = len(imlist)
featlist = [imlist[i][:-3] + 'sift' for i in range(imcount)]

with open('vocabulary.pkl', 'rb') as f:
    voc = pickle.load(f)

searcher = imagesearch.Searcher('test.db', voc)

locs, descr = sift.read_features_from_file(featlist[0])
imwords = voc.project(descr)

print 'ask using a histogram...'
print searcher.candidates_from_histogram(imwords)[:10]

print 'try a query...'
res = searcher.query(imlist[0])[:10]
print res

print 'score:'
# Score a small subset, so this runs fast.
print imagesearch.compute_ukbench_score(searcher, imlist[:10])

# Plot images most similar to imlist[0].
imagesearch.plot_results(searcher, [r[1] for r in res[:6]])
"""
Created on Mon Aug 15 14:10:37 2016

@author: user
"""
from PIL import Image
from pylab import *

from numpy import *

import sift
imname = 'baby_1.jpg'
im1 = array(Image.open(imname).convert('L'))
imshow(im1)
sift.process_image(imname, 'baby.sift')
l1, d1 = sift.read_features_from_file('baby.sift')
figure()
gray()
sift.plot_features(im1, l1, circle=True)
show()
imname1 = 'climbing_1_small.jpg'
imname2 = 'climbing_2_small.jpg'

# process and save features to file
sift.process_image(imname1, imname1 + '.sift')
sift.process_image(imname2, imname2 + '.sift')

# read features and match
l2, d2 = sift.read_features_from_file(imname1 + '.sift')
l3, d3 = sift.read_features_from_file(imname2 + '.sift')
matchscores = sift.match_twosided(d2, d3)

if len(sys.argv) >= 3:
  im1f, im2f = sys.argv[1], sys.argv[2]
else:
  # im1f = '../data/sf_view1.jpg'
  # im2f = '../data/sf_view2.jpg'
  # im1f = '../data/crans_1_small.jpg'
  # im2f = '../data/crans_2_small.jpg'
  im1f = '../data/climbing_1_small.jpg'
  im2f = '../data/climbing_2_small.jpg'
im1 = array(Image.open(im1f))
im2 = array(Image.open(im2f))

#sift.process_image(im1f, 'out_sift_1.txt')
l1, d1 = sift.read_features_from_file('out_sift_1.txt')
figure()
gray()
subplot(121)
sift.plot_features(im1, l1, circle=False)

#sift.process_image(im2f, 'out_sift_2.txt')
l2, d2 = sift.read_features_from_file('out_sift_2.txt')
subplot(122)
sift.plot_features(im2, l2, circle=False)

#matches = sift.match(d1, d2)
matches = sift.match_twosided(d1, d2)
print('{} matches'.format(len(matches.nonzero()[0]))) 

figure()
 def get_all_features(self):
     #		pdb.set_trace()
     for i in range(self.fileCount):
         filename = self.sift_filenames[i]
         #read_features_from_file returns [locs,desciptors].Only look at descriptors
         self.all_features.append(sift.read_features_from_file(filename)[1])
Ejemplo n.º 43
0
import sift
import os
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt

im1name = 'crans_1_small.jpg'
im1sift = im1name + '.sift'
im1 = np.array(Image.open(im1name).convert('L'))
sift.process_image(im1name, im1sift)
l1, d1 = sift.read_features_from_file(im1sift)
print(l1.shape, d1.shape)

im2name = 'crans_2_small.jpg'
im2sift = im2name + '.sift'
im2 = np.array(Image.open(im2name).convert('L'))
sift.process_image(im2name, im2sift)
l2, d2 = sift.read_features_from_file(im2sift)
print(l2.shape, d2.shape)

print("start matching..")
matches = sift.match_twosided(d1, d2)


plt.figure()
plt.gray()
sift.plot_matches(im1, im2, l1, l2, matches)
plt.show()