Ejemplo n.º 1
0
             tamura_directionality_v[r] = np.load(os.path.join(images_path, 'tamura_directionality_%d.npy' % (r + 1)))
         else : 
             
             if img is None:
         
                 img = misc.imread(os.path.join(images_path, 'im%d.jpg' % (r + 1) ))
     
                 if useGreyScale:
                     img = rgb2gray(img)
             
             print 'Calculating Tamura DIRECTIONALITY for file ', os.path.join(images_path, 'im%d.jpg' % (r + 1))
             
             
             try:                        
                 start_time = time.time()   
                 tamura_directionality_v[r] = degreeDirect(img, threshold, neigh)
                 elapsed_time = time.time() - start_time
                 np.save(os.path.join(images_path, 'tamura_directionality_%d.npy' % (r + 1)),tamura_directionality_v[r])
                 
             except:
                 failures_directionality.append(r+1)
                 
             
             print 'It took %ds to calculate DIRECTIONALITY...' % elapsed_time
             
             
         
         #print tamura_directionality_v[r]
 
 if useGarbor:
     
Ejemplo n.º 2
0
vq_codes_obs_features = np.load('%s/vq_codes_obs_features_%d_%d_%s_%s_%s_%s.npy' % (path_1, kCentroids_features, cIter_features, useTamuraCoarseness, useTamuraContrast, useTamuraDirectionality, useGarbor))
standard_deviations_features = np.load('%s/standard_deviations_features_%d_%d_%s_%s_%s_%s.npy' % (path_1, kCentroids_features, cIter_features, useTamuraCoarseness, useTamuraContrast, useTamuraDirectionality, useGarbor))

learning_set_features_image = np.zeros(n_colums_features)
j = 0

if useTamuraCoarseness:
    learning_set_features_image[j] = coarseness(img)
    j = j + 1

if useTamuraContrast:
    learning_set_features_image[j] = contrast(img)
    j = j + 1

if useTamuraDirectionality:
    learning_set_features_image[j] = degreeDirect(img, threshold, neigh)
    j = j + 1

if useGarbor: 
    
    start_i = j
    stop_i = start_i + n_kernels * 2
    
    learning_set_features_image[start_i : stop_i] = np.resize(garbor_features(img, kernels) , (1, n_kernels * 2)) 

for i in range (n_colums_features):
    learning_set_features_image[i] = learning_set_features_image[i] / standard_deviations_features[i]


# Images from 1o level Kmeans
(index, dist) = spy.vq.vq(np.array([learning_set_features_image]), centroids_codebook_features)