コード例 #1
0
ファイル: opqCoding.py プロジェクト: arbabenko/Quantizations
def learnCodebooksOPQ(pointsFilename, pointsCount, dim, M, K, vocFilename, ninit=20):
    points = readXvecs(pointsFilename, dim, pointsCount)
    R = np.identity(dim)
    rotatedPoints = np.dot(points, R.T).astype('float32')
    codebookDim = dim / M
    codebooks = np.zeros((M, K, codebookDim), dtype='float32')
    # init vocabs
    for i in xrange(M):
        perm = np.random.permutation(pointsCount)
        codebooks[i, :, :] = rotatedPoints[perm[:K], codebookDim*i:codebookDim*(i+1)].copy()
    # init assignments
    assigns = np.zeros((pointsCount, M), dtype='int32')
    for i in xrange(M):
        (idx, dis) = ynumpy.knn(rotatedPoints[:,codebookDim*i:codebookDim*(i+1)].astype('float32'), codebooks[i,:,:], nt=30)
        assigns[:,i] = idx.flatten()
    for it in xrange(ninit):
        approximations = reconstructPointsOPQ(assigns, codebooks)
        errors = rotatedPoints - approximations
        error = 0
        for pid in xrange(pointsCount):
            error += np.dot(errors[pid,:], errors[pid,:].T)
        print 'Quantization error: ' + str(error / pointsCount)
        U, s, V = np.linalg.svd(np.dot(approximations.T, points), full_matrices=False)
        R = np.dot(U, V)
        rotatedPoints = np.dot(points, R.T).astype('float32')
        for m in xrange(M):
            counts = np.bincount(assigns[:,m])
            for k in xrange(K):
                codebooks[m,k,:] = np.sum(rotatedPoints[assigns[:,m]==k,codebookDim*m:codebookDim*(m+1)], axis=0) / counts[k]
        for m in xrange(M):
            subpoints = rotatedPoints[:,codebookDim*m:codebookDim*(m+1)].copy()
            (idx, dis) = ynumpy.knn(subpoints, codebooks[m,:,:], nt=30)
            assigns[:,m] = idx.flatten()
    error = 0
    for m in xrange(M):
        subpoints = rotatedPoints[:,m*codebookDim:(m+1)*codebookDim].copy()
        (idx, dis) = ynumpy.knn(subpoints, codebooks[m,:,:], nt=2)
        error += np.sum(dis.flatten())
    print 'Quantization error: ' + str(error / pointsCount)
    model = (codebooks, R)
    vocFile = open(vocFilename, 'wb')
    pickle.dump(model, vocFile)
コード例 #2
0
def get_distances(train_images):
	surf = cv2.SURF(hessianThreshold=500, extended=True)
	image_descs = []
	for fnames in train_images:
		try:
    			img = cv2.imread(fnames,0);
		    	kp, des = surf.detectAndCompute(img, None)			
		except:
			continue
		image_descs.append(des)


	all_desc= np.vstack(image_descs)

	k = 128
	n_sample = k * 500

	sample = all_desc
	sample = sample.astype('float32')

	mean = sample.mean(axis = 0)
	sample = sample - mean
	cov = np.dot(sample.T, sample)

	eigvals, eigvecs = np.linalg.eig(cov)
	perm = eigvals.argsort()
	pca_transform = eigvecs[:, perm[32:128]]

	sample = np.dot(sample, pca_transform)
	gmm = ynumpy.gmm_learn(sample, k)

	image_fvs = []
	for image_desc in image_descs:
		image_desc = np.dot(image_desc - mean, pca_transform)
		fv = ynumpy.fisher(gmm, image_desc, include = 'mu')
		image_fvs.append(fv)

	image_fvs = np.vstack(image_fvs)
	image_fvs = np.sign(image_fvs) * np.abs(image_fvs) ** 0.5
	norms = np.sqrt(np.sum(image_fvs ** 2, 1))
	image_fvs /= norms.reshape(-1, 1)

	image_fvs[np.isnan(image_fvs)] = 100

	query_imnos = range(0,len(image_fvs)-1);
	query_fvs = image_fvs#[query_imnos]

	results, distances = ynumpy.knn(query_fvs, image_fvs, nnn = len(image_fvs))
	s_results = np.argsort(results, axis = 1)
	s_distances = distances*0
	for i in range(distances.shape[0]):
	    s_distances[i,:] = distances[i,s_results[i,:]]
	
	return s_distances
コード例 #3
0
	def CompareImages(image_names, image_fvs):
		# get the indices of the query images (the subset of images that end in "0")
		query_imnos = [i for i, name in enumerate(image_names) if name[-1:] == "0"]

		# corresponding descriptors
		query_fvs = image_fvs[query_imnos]

		# get the 8 NNs for all query images in the image_fvs array
		results, distances = ynumpy.knn(query_fvs, image_fvs, nnn = 8)
		
		return query_imnos, results, distances
コード例 #4
0
ファイル: tubeClust.py プロジェクト: caomw/apt
def getTrackNeighborsYaelYnumpy(feats, k):
    from yael import ynumpy, yael

    nrTracks = feats.shape[0]
    print '\tFinding %d nearest neighbors, for %d trajectories' % (k,nrTracks), 
    sys.stdout.flush()
   
    (simMatIDs, simMat) = ynumpy.knn(feats, feats, nnn=k+1, distance_type=12, nt=1)
    
    print '.'
    
    return simMatIDs[:,1:], simMat[:,1:]
コード例 #5
0
def GetKnn(ID):
    print ID
    info = Info.GetVideoInfo(ID)
    frame_sift_lst = [
        x for x in sorted(os.listdir(info['frame_sift_path']))
        if x.endswith('.sift')
    ]
    pano_sift_lst = [
        x for x in sorted(os.listdir(info['pano_sift_path']))
        if x.endswith('.sift')
    ]
    #print pano_sift_lst
    frame_desc = []
    pano_desc = []
    for one in frame_sift_lst:
        f_name = info['frame_sift_path'] + '/' + one
        desc = ReadSift.ReadSift(f_name)[1]
        if desc.size == 0:
            desc = np.zeros((0, 128), dtype='uint8')
        frame_desc.append(desc)
    for one in pano_sift_lst:
        f_name = info['pano_sift_path'] + '/' + one
        desc = ReadSift.ReadSift(f_name)[1]
        if desc.size == 0:
            desc = np.zeros((0, 128), dtype='uint8')
        pano_desc.append(desc)
    data = np.load(Info.Config.ROOT_PATH + '/gmm_2step.npz')
    gmm = (data['a'], data['b'], data['c'])
    mean = data['mean']
    pca_transform = data['pca_transform']

    image_fvs = []
    for image_dec in (frame_desc + pano_desc):
        image_dec = np.dot(image_dec - mean, pca_transform)
        fv = ynumpy.fisher(gmm, image_dec, include='mu')
        image_fvs.append(fv)
    image_fvs = np.vstack(image_fvs)
    image_fvs = np.sign(image_fvs) * np.abs(image_fvs)**0.5
    norms = np.sqrt(np.sum(image_fvs**2, 1))
    image_fvs /= norms.reshape(-1, 1)
    image_fvs[np.isnan(image_fvs)] = 100

    frame_fvs = image_fvs[0:len(frame_sift_lst)]
    pano_fvs = image_fvs[len(frame_sift_lst):]

    results, distances = ynumpy.knn(frame_fvs, pano_fvs, nnn=10)
    #print results
    #print distances
    np.save(info['pano_path'] + '/fisher_results', results)
コード例 #6
0
ファイル: pqCoding.py プロジェクト: arbabenko/Quantizations
def getQuantizationErrorPQ(testFilename, dim, pointsCount, codebooksFilename):
    codebooks = pickle.load(open(codebooksFilename, 'rb'))
    points = readXvecs(testFilename, dim, pointsCount)
    M = codebooks.shape[0]
    if dim % M != 0:
        raise Exception('Dim is not a multiple of M!')
    else:
        codebooksDim = dim / M
    errors = np.zeros(pointsCount)
    for m in xrange(M):
        codebook = codebooks[m,:,:]
        subpoints = points[:,m*codebooksDim:(m+1)*codebooksDim].copy()
        (idx, dis) = ynumpy.knn(subpoints, codebook, nt=3)
        errors += np.reshape(dis, pointsCount)
    return np.mean(errors)
コード例 #7
0
ファイル: pqCoding.py プロジェクト: arbabenko/Quantizations
def encodeDatasetPQ(baseFilename, pointsCount, vocabFilename, codeFilename, threadsCount=30):
    codebooks = pickle.load(open(vocabFilename, 'rb'))
    M = codebooks.shape[0]
    dim = codebooks.shape[2] * M
    vocabDim = codebooks.shape[2]
    codes = np.zeros((pointsCount, M), dtype='int32')
    basePoints = readXvecs(baseFilename, dim, pointsCount)
    for m in xrange(M):
        codebook = codebooks[m,:,:]
        subpoints = basePoints[:,m*vocabDim:(m+1)*vocabDim].copy()
        (idx, dis) = ynumpy.knn(subpoints, codebook, nt=threadsCount)
        codes[:,m] = idx.flatten()
    codeFile = open(codeFilename, 'w')
    pickle.dump(codes, codeFile)
    codeFile.close()
コード例 #8
0
ファイル: opqCoding.py プロジェクト: arbabenko/Quantizations
def getQuantizationErrorOPQ(codebooksFilename, pointsFilename, pointsCount):
    model = pickle.load(open(codebooksFilename, 'rb'))
    R = model[1]
    codebooks = model[0]
    codebookDim = codebooks.shape[2]
    M = codebooks.shape[0]
    dim = codebookDim * M
    points = readXvecs(pointsFilename, dim, pointsCount)
    rotatedPoints = np.dot(points, R.T).astype('float32')
    errors = 0.0
    for m in xrange(M):
        subpoints = rotatedPoints[:,m*dim/M:(m+1)*dim/M].copy()
        (idx, dis) = ynumpy.knn(subpoints, codebooks[m,:,:], nt=2)
        errors += np.sum(dis.flatten())
    print errors / pointsCount
コード例 #9
0
ファイル: pqCoding.py プロジェクト: macyli01/Quantizations
def getQuantizationErrorPQ(testFilename, dim, pointsCount, codebooksFilename):
    codebooks = pickle.load(open(codebooksFilename, 'rb'))
    points = readXvecs(testFilename, dim, pointsCount)
    M = codebooks.shape[0]
    if dim % M != 0:
        raise Exception('Dim is not a multiple of M!')
    else:
        codebooksDim = dim / M
    errors = np.zeros(pointsCount)
    for m in xrange(M):
        codebook = codebooks[m, :, :]
        subpoints = points[:, m * codebooksDim:(m + 1) * codebooksDim].copy()
        (idx, dis) = ynumpy.knn(subpoints, codebook, nt=3)
        errors += np.reshape(dis, pointsCount)
    return np.mean(errors)
コード例 #10
0
def getTrackNeighborsYaelYnumpy(feats, k):
    from yael import ynumpy, yael

    nrTracks = feats.shape[0]
    print '\tFinding %d nearest neighbors, for %d trajectories' % (k,
                                                                   nrTracks),
    sys.stdout.flush()

    (simMatIDs, simMat) = ynumpy.knn(feats,
                                     feats,
                                     nnn=k + 1,
                                     distance_type=12,
                                     nt=1)

    print '.'

    return simMatIDs[:, 1:], simMat[:, 1:]
コード例 #11
0
ファイル: opqCoding.py プロジェクト: arbabenko/Quantizations
def encodeDatasetOPQ(baseFilename, pointsCount, vocabFilename, codeFilename, threadsCount=30):
    model = pickle.load(open(vocabFilename, 'rb'))
    codebooks = model[0]
    R = model[1]
    M = codebooks.shape[0]
    dim = codebooks.shape[2] * M
    codes = np.zeros((pointsCount, M), dtype='int32')
    basePoints = readXvecs(baseFilename, dim, pointsCount)
    basePoints = np.dot(basePoints, R.T).astype('float32')
    error = 0
    for m in xrange(M):
        subpoints = basePoints[:,m*dim/M:(m+1)*dim/M].copy()
        (idx, dis) = ynumpy.knn(subpoints, codebooks[m,:,:], nt=threadsCount)
        codes[:,m] = idx.flatten()
        error += np.sum(dis.flatten())
    codeFile = open(codeFilename, 'wb')
    pickle.dump(codes, codeFile)
    codeFile.close()
コード例 #12
0
def GetKnn(ID):
    print ID
    info = Info.GetVideoInfo(ID)
    frame_sift_lst = [x for x in sorted(os.listdir(info['frame_sift_path'])) if x.endswith('.sift')]
    pano_sift_lst = [x for x in sorted(os.listdir(info['pano_sift_path'])) if x.endswith('.sift')]
    #print pano_sift_lst
    frame_desc = []
    pano_desc = []
    for one in frame_sift_lst:
        f_name = info['frame_sift_path'] + '/' + one
        desc = ReadSift.ReadSift(f_name)[1]
        if desc.size == 0:
            desc = np.zeros((0, 128), dtype = 'uint8')
        frame_desc.append(desc)
    for one in pano_sift_lst:
        f_name = info['pano_sift_path'] + '/' + one
        desc = ReadSift.ReadSift(f_name)[1]
        if desc.size == 0:
            desc = np.zeros((0, 128), dtype = 'uint8')
        pano_desc.append(desc)
    data = np.load(Info.Config.ROOT_PATH + '/gmm_2step.npz')
    gmm = (data['a'], data['b'], data['c'])
    mean = data['mean']
    pca_transform = data['pca_transform']

    image_fvs = []
    for image_dec in (frame_desc + pano_desc):
        image_dec = np.dot(image_dec - mean, pca_transform)
        fv = ynumpy.fisher(gmm, image_dec, include = 'mu')
        image_fvs.append(fv)
    image_fvs = np.vstack(image_fvs)
    image_fvs = np.sign(image_fvs) * np.abs(image_fvs) ** 0.5
    norms = np.sqrt(np.sum(image_fvs ** 2, 1))
    image_fvs /= norms.reshape(-1,1)
    image_fvs[np.isnan(image_fvs)] = 100
    
    frame_fvs = image_fvs[0:len(frame_sift_lst)]
    pano_fvs = image_fvs[len(frame_sift_lst):]
    
    results, distances = ynumpy.knn(frame_fvs, pano_fvs, nnn = 10)
    #print results 
    #print distances
    np.save(info['pano_path'] + '/fisher_results', results)
コード例 #13
0
ファイル: pqCoding.py プロジェクト: macyli01/Quantizations
def encodeDatasetPQ(baseFilename,
                    pointsCount,
                    vocabFilename,
                    codeFilename,
                    threadsCount=30):
    codebooks = pickle.load(open(vocabFilename, 'rb'))
    M = codebooks.shape[0]
    dim = codebooks.shape[2] * M
    vocabDim = codebooks.shape[2]
    codes = np.zeros((pointsCount, M), dtype='int32')
    basePoints = readXvecs(baseFilename, dim, pointsCount)
    for m in xrange(M):
        codebook = codebooks[m, :, :]
        subpoints = basePoints[:, m * vocabDim:(m + 1) * vocabDim].copy()
        (idx, dis) = ynumpy.knn(subpoints, codebook, nt=threadsCount)
        codes[:, m] = idx.flatten()
    codeFile = open(codeFilename, 'w')
    pickle.dump(codes, codeFile)
    codeFile.close()
コード例 #14
0
def bovw(codebook, X, nt=1):
    inds, dists = ynumpy.knn(X, codebook, nnn=1, distance_type=2, nt=1)
    bins, _ = np.histogram(inds[:, 0],
                           bins=INTERNAL_PARAMETERS['bovw_codebook_k'])

    return bins
コード例 #15
0
ファイル: test_ynumpy.py プロジェクト: GarfieldEr007/yael
nnn = 2

base = numpy.array([range(i, i+d) for i in range(5)], 
                   dtype = numpy.float32)

queries = numpy.array([[x + 0.25 for x in range(i, i+d)]
                       for i in range(nq)], 
                      dtype = numpy.float32)

print "base="
print base

print "queries="
print queries

idx, dis = ynumpy.knn(base, queries, nnn, distance_type = 1)

print "indices="
print idx 

print "distances="
print dis


try: 
    # v, meta = ynumpy.siftgeo_read('/Users/matthijs//Desktop/papers/lhl/trunk/data/test_query_10k.siftgeo')
    v, meta = ynumpy.siftgeo_read('/scratch2/bigimbaz/dataset/holidays/siftgeo/hesaff_norm/128300.siftgeo')

    v = v.astype('float32')
    
except Exception, e: 
コード例 #16
0
ファイル: demo.py プロジェクト: bityangke/yael
# L2 normalize
norms = np.sqrt(np.sum(image_fvs ** 2, 1))
image_fvs /= norms.reshape(-1, 1)

# handle images with 0 local descriptor (100 = far away from "normal" images)
image_fvs[np.isnan(image_fvs)] = 100

# get the indices of the query images (the subset of images that end in "00")
query_imnos = [i for i, name in enumerate(image_names) if name[-2:] == "00"]

# corresponding descriptors
query_fvs = image_fvs[query_imnos]

# get the 8 NNs for all query images in the image_fvs array
results, distances = ynumpy.knn(query_fvs, image_fvs, nnn = 8)

aps = []
for qimno, qres in zip(query_imnos, results):
    qname = image_names[qimno]
    # collect the positive results in the dataset
    # the positives have the same prefix as the query image
    positive_results = set([i for i, name in enumerate(image_names)
                            if name != qname and name[:4] == qname[:4]])
    #
    # ranks of positives. We skip the result #0, assumed to be the query image
    ranks = [i for i, res in enumerate(qres[1:]) if res in positive_results]
    #
    # accumulate trapezoids with this basis
    recall_step = 1.0 / len(positive_results)
    ap = 0
コード例 #17
0
ファイル: search_ukbench.py プロジェクト: pioneer911/yael
# "power normalization"
dataset = numpy.sqrt(numpy.abs(dataset)) * numpy.sign(dataset)

# per-Fisher vector L2 normalization
norms = numpy.sqrt((dataset**2).sum(axis=1))
dataset /= norms[:, None]

print("Searching 1 image per group")

# select the images that will serve as queries
query_idx = numpy.nonzero(image_range % 4 == 0)[0]
queries = dataset[query_idx].copy()

# compute all search results at once
results, distances = ynumpy.knn(queries, dataset, nnn=4)

if show:
    fig = plt.figure()

for i, query_results in zip(query_idx, results):
    imno = image_range[i]
    results_imno = image_range[query_results]
    n_ok = (results_imno / 4 == imno / 4).sum()
    print("  Image %d:" % imno, results_imno, "n_ok=", n_ok)

    if show:
        fig.canvas.set_window_title("Query image %d" % imno)
        for idx, j in enumerate([i] + list(query_results)):
            imagename = "%s/ukbench%05d.jpg" % (image_directory,
                                                image_range[j])
コード例 #18
0
def bovw(codebook, X, nt=1):
    inds, dists = ynumpy.knn(X, codebook, nnn=1, distance_type=2, nt=1)
    bins, _ = np.histogram(inds[:,0], bins=INTERNAL_PARAMETERS['bovw_codebook_k'])

    return bins
コード例 #19
0
ファイル: test_ynumpy.py プロジェクト: ybrs/yael
n = 5
nq = 3
nnn = 2

base = numpy.array([range(i, i + d) for i in range(5)], dtype=numpy.float32)

queries = numpy.array([[x + 0.25 for x in range(i, i + d)] for i in range(nq)],
                      dtype=numpy.float32)

print "base="
print base

print "queries="
print queries

idx, dis = ynumpy.knn(base, queries, nnn, distance_type=1)

print "indices="
print idx

print "distances="
print dis

try:
    # v, meta = ynumpy.siftgeo_read('/Users/matthijs//Desktop/papers/lhl/trunk/data/test_query_10k.siftgeo')
    v, meta = ynumpy.siftgeo_read(
        '/scratch2/bigimbaz/dataset/holidays/siftgeo/hesaff_norm/128300.siftgeo'
    )

    v = v.astype('float32')
コード例 #20
0
ファイル: search_ukbench.py プロジェクト: Erotemic/yael
# "power normalization"
dataset = numpy.sqrt(numpy.abs(dataset)) * numpy.sign(dataset)

# per-Fisher vector L2 normalization
norms = numpy.sqrt((dataset ** 2).sum(axis=1))
dataset /= norms[:, None]

print("Searching 1 image per group")

# select the images that will serve as queries
query_idx = numpy.nonzero(image_range % 4 == 0)[0]
queries = dataset[query_idx].copy()

# compute all search results at once
results, distances = ynumpy.knn(queries, dataset, nnn=4)

if show:
    fig = plt.figure()

for i, query_results in zip(query_idx, results):
    imno = image_range[i]
    results_imno = image_range[query_results]
    n_ok = (results_imno / 4 == imno / 4).sum()
    print("  Image %d:" % imno, results_imno, "n_ok=", n_ok)

    if show:
        fig.canvas.set_window_title("Query image %d" % imno)
        for idx, j in enumerate([i] + list(query_results)):
            imagename = "%s/ukbench%05d.jpg" % (
                image_directory, image_range[j])