コード例 #1
0
ファイル: process_images.py プロジェクト: gciotto/workspace
                     garbor_v[ start_i : stop_i ] = np.load(os.path.join(images_path, 'garbor_%d.npy' % (r + 1)))
                 
                 else:
                     
                     if img is None:
                 
                         img = misc.imread(os.path.join(images_path, 'im%d.jpg' % (r + 1) ))
             
                         if useGreyScale:
                             img = rgb2gray(img)
                     
                     print 'Calculating GARBOR for file ', os.path.join(images_path, 'im%d.jpg' % (r + 1))
                     
                     try:
                         start_time = time.time()   
                         garbor_v[ start_i : stop_i ] = np.resize(garbor_features(img, kernels) , (1, len(kernels) * 2))
                         elapsed_time = time.time() - start_time
                         
                         print 'It took %ds to calculate GARBOR...' % elapsed_time
                         
                         np.save(os.path.join(images_path, 'garbor_%d.npy' % (r + 1)), garbor_v[ start_i : stop_i ])
                         
                     except:
                         failures_garbor.append(r+1) 
 
 print 'Failures COARSENESS ', failures_coarseness
 print 'Failures CONTRAST ', failures_contrast
 print 'Failures DIRECTIONALITY ', failures_directionality
 print 'Failures GARBOR ', failures_garbor   
 
 if not histogram_hasBeenCalculated:
コード例 #2
0
ファイル: find_image.py プロジェクト: gciotto/workspace
    j = j + 1

if useTamuraContrast:
    learning_set_features_image[j] = contrast(img)
    j = j + 1

if useTamuraDirectionality:
    learning_set_features_image[j] = degreeDirect(img, threshold, neigh)
    j = j + 1

if useGarbor: 
    
    start_i = j
    stop_i = start_i + n_kernels * 2
    
    learning_set_features_image[start_i : stop_i] = np.resize(garbor_features(img, kernels) , (1, n_kernels * 2)) 

for i in range (n_colums_features):
    learning_set_features_image[i] = learning_set_features_image[i] / standard_deviations_features[i]


# Images from 1o level Kmeans
(index, dist) = spy.vq.vq(np.array([learning_set_features_image]), centroids_codebook_features)

path_2 = "%s/arrays/level2_%d_%d" % (images_path, kCentroids, cIter)

images_index = np.load('%s/centroids_codebook_images_index_%d_%d_%d_%s_%s_%s_%s.npy' % (path_2, index[0] , kCentroids, cIter, useTamuraCoarseness, useTamuraContrast, useTamuraDirectionality, useGarbor))
centroids_codebook_histogram = np.load('%s/centroids_codebook_histogram_%d_%d_%d_%s_%s_%s_%s.npy' % (path_2, index[0] , kCentroids, cIter, useTamuraCoarseness, useTamuraContrast, useTamuraDirectionality, useGarbor))
vq_codes_obs_histogram = np.load('%s/vq_codes_obs_histogram_%d_%d_%d_%s_%s_%s_%s.npy' % (path_2, index[0] , kCentroids, cIter, useTamuraCoarseness, useTamuraContrast, useTamuraDirectionality, useGarbor))
standard_deviations_histogram = np.load('%s/standard_deviations_histogram_%d_%d_%d_%s_%s_%s_%s.npy' % (path_2, index[0] , kCentroids, cIter, useTamuraCoarseness, useTamuraContrast, useTamuraDirectionality, useGarbor))