def test_daisy_normalization(): img = img_as_float(data.lena()[:64, :64].mean(axis=2)) descs = daisy(img, normalization='l1') for i in range(descs.shape[0]): for j in range(descs.shape[1]): assert_almost_equal(np.sum(descs[i, j, :]), 1) descs_ = daisy(img) assert_almost_equal(descs, descs_) descs = daisy(img, normalization='l2') for i in range(descs.shape[0]): for j in range(descs.shape[1]): assert_almost_equal(sqrt(np.sum(descs[i, j, :]**2)), 1) orientations = 8 descs = daisy(img, orientations=orientations, normalization='daisy') desc_dims = descs.shape[2] for i in range(descs.shape[0]): for j in range(descs.shape[1]): for k in range(0, desc_dims, orientations): assert_almost_equal( sqrt(np.sum(descs[i, j, k:k + orientations]**2)), 1) img = np.zeros((50, 50)) descs = daisy(img, normalization='off') for i in range(descs.shape[0]): for j in range(descs.shape[1]): assert_almost_equal(np.sum(descs[i, j, :]), 0) assert_raises(ValueError, daisy, img, normalization='does_not_exist')
def test_daisy_normalization(): img = img_as_float(data.astronaut()[:64, :64].mean(axis=2)) descs = daisy(img, normalization='l1') for i in range(descs.shape[0]): for j in range(descs.shape[1]): assert_almost_equal(np.sum(descs[i, j, :]), 1) descs_ = daisy(img) assert_almost_equal(descs, descs_) descs = daisy(img, normalization='l2') for i in range(descs.shape[0]): for j in range(descs.shape[1]): assert_almost_equal(sqrt(np.sum(descs[i, j, :] ** 2)), 1) orientations = 8 descs = daisy(img, orientations=orientations, normalization='daisy') desc_dims = descs.shape[2] for i in range(descs.shape[0]): for j in range(descs.shape[1]): for k in range(0, desc_dims, orientations): assert_almost_equal(sqrt(np.sum( descs[i, j, k:k + orientations] ** 2)), 1) img = np.zeros((50, 50)) descs = daisy(img, normalization='off') for i in range(descs.shape[0]): for j in range(descs.shape[1]): assert_almost_equal(np.sum(descs[i, j, :]), 0) assert_raises(ValueError, daisy, img, normalization='does_not_exist')
def bouquet(image, step=4, radius=6, rings=2, visualize=False): if visualize: descs, descs_img = daisy(image, step=step, radius=radius, rings=rings, histograms=6, orientations=8, visualize=True) plt.imshow(descs_img, interpolation='nearest') return (descs, descs_img) else: descs = daisy(image, step=step, radius=radius, rings=rings, histograms=6, orientations=8, visualize=False) return descs
def _extract_rgb(self, rgb): kwargs = dict(step=rgb.shape[0] / 5, radius=rgb.shape[0] / 10, rings=2, histograms=6, orientations=8) self.daisyextractor_xd = np.hstack( daisy(rgb[:, :, i], **kwargs).ravel() for i in [0, 1]) return np.hstack(daisy(rgb[:, :, i], **kwargs).ravel() for i in [0, 1])
def test_descs_shape(): img = img_as_float(data.astronaut()[:256, :256].mean(axis=2)) radius = 20 step = 8 descs = daisy(img, radius=radius, step=step) assert(descs.shape[0] == ceil((img.shape[0] - radius * 2) / float(step))) assert(descs.shape[1] == ceil((img.shape[1] - radius * 2) / float(step))) img = img[:-1, :-2] radius = 5 step = 3 descs = daisy(img, radius=radius, step=step) assert(descs.shape[0] == ceil((img.shape[0] - radius * 2) / float(step))) assert(descs.shape[1] == ceil((img.shape[1] - radius * 2) / float(step)))
def test_descs_shape(): img = img_as_float(data.astronaut()[:256, :256].mean(axis=2)) radius = 20 step = 8 descs = daisy(img, radius=radius, step=step) assert (descs.shape[0] == ceil((img.shape[0] - radius * 2) / float(step))) assert (descs.shape[1] == ceil((img.shape[1] - radius * 2) / float(step))) img = img[:-1, :-2] radius = 5 step = 3 descs = daisy(img, radius=radius, step=step) assert (descs.shape[0] == ceil((img.shape[0] - radius * 2) / float(step))) assert (descs.shape[1] == ceil((img.shape[1] - radius * 2) / float(step)))
def test_daisy_desc_dims(): img = img_as_float(data.astronaut()[:128, :128].mean(axis=2)) rings = 2 histograms = 4 orientations = 3 descs = daisy(img, rings=rings, histograms=histograms, orientations=orientations) assert(descs.shape[2] == (rings * histograms + 1) * orientations) rings = 4 histograms = 5 orientations = 13 descs = daisy(img, rings=rings, histograms=histograms, orientations=orientations) assert(descs.shape[2] == (rings * histograms + 1) * orientations)
def extract_daisy_and_hog_features_from_image(file_path, daisy_step_size=32, daisy_radius=32, hog_pixels_per_cell=16, hog_cells_per_block=1): img = io.imread(file_path) img_gray = rgb2gray(img) img = skimage.transform.resize( img_gray, (512, 512) ) ##resize to a suitable dimension, avg size of images in the dataset #original, histograms=6 descs = daisy(img, step=daisy_step_size, radius=daisy_radius, rings=2, histograms=8, orientations=8, visualize=False) #calculate daisy feature descriptors descs_num = descs.shape[0] * descs.shape[1] daisy_desriptors = descs.reshape(descs_num, descs.shape[2]) hog_desriptor = hog(img, orientations=8, pixels_per_cell=(hog_pixels_per_cell, hog_pixels_per_cell), cells_per_block=(hog_cells_per_block, hog_cells_per_block), visualise=False, feature_vector=True) return daisy_desriptors, hog_desriptor
def extract_features(dataset): nimgs = dataset.getLength() features = list() ni = 0 total_time = 0 for cl in dataset.getClasses(): paths = dataset.paths[cl] for impath in paths: t1 = time() im = sio.imread(impath, as_grey=True) feats = daisy(im, step=4) feats = feats.reshape((-1, 200)) features.append(feats) t2 = time() t3 = t2 - t1 total_time += t3 ni += 1 print "Image {0}/{1} [{2:0.2f}/{3:0.2f} sec]".format( ni, nimgs, t3, t3 * (nimgs - ni)) print "Stacking all features..." t1 = time() stacked = np.vstack(features) t2 = time() total_time += t2 - t2 print "Total time: {0:0.2f} sec".format(total_time) return stacked
def getFeatures(image, opt): image = cv2.imread(image) image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) if opt == "sift": sift = cv2.xfeatures2d_SIFT.create() _, d = sift.detectAndCompute(image, None) return d elif opt == "hog": d, _ = hog(image, orientations=8, pixels_per_cell=(16, 16), cells_per_block=(2, 2), visualize=True, multichannel=False) return d elif opt == "daisy": d, _ = daisy(image, step=5, radius=24, rings=2, histograms=6, orientations=8, visualize=True) return d
def extract_features(dataset): #ottieni il numero totale di immagini nel dataset nimgs = dataset.getLength() #crea una lista vuota di features features = list() ni = 0 #numero di immagini analizzate finora total_time = 0 for cl in dataset.getClasses(): paths = dataset.paths[cl] for impath in paths: t1 = time() #timestamp attuale im = sio.imread(impath, as_grey=True) #carica immagine in scala di grigi feats = daisy(im) #estrai features feats = feats.reshape((-1, 200)) #reshape dell'array' features.append(feats) #aggiungi features alla lista t2 = time() #timestamp attuale t3 = t2 - t1 #tempo trascorso total_time += t3 #Stampa un messaggio di avanzamento, con la stima del tempo rimanente ni += 1 #aggiorna il numero di immagini analizzate finora if nimgs - ni == 5: print("...") if nimgs - ni < 5: print("Image {0}/{1} [{2:0.2f}/{3:0.2f} sec]".format( ni, nimgs, t3, t3 * (nimgs - ni))) print("Stacking all features...") t1 = time() stacked = np.vstack( features) #metti insieme le feature estratte da tutte le immagini t2 = time() total_time += t2 - t2 print("Total time: {0:0.2f} sec".format(total_time)) return stacked
def extract(self, image): features = np.array([]) vec = [] if 'raw' in self.features: vec = image.flatten() features = np.append(features, vec) vec = [] if 'textons' in self.features: import gen_histogram as tx vec = np.array(tx.histogram(image, self.centers)) features = np.append(features, vec) vec = [] if 'hog' in self.features: vec = hog(image, cells_per_block=(3, 3)) vec = np.append(vec, hog(image, cells_per_block=(4, 4))) vec = np.append(vec, hog(image, cells_per_block=(1, 1))) vec = np.append(vec, hog(image, cells_per_block=(2, 2))) features = np.append(features, vec) vec = [] if 'lbp' in self.features: vec = local_binary_pattern(image, 24, 3).flatten() features = np.append(features, vec) vec = [] if 'daisy' in self.features: vec = daisy(image).flatten() features = np.append(features, vec) return features
def get_words_labels(dataset, image_names, vq): all_superpixels, all_counts, all_labels = [], [], [] for image_name in image_names: image = dataset.get_image(image_name) # compute features and vector-quantize features = daisy(rgb2gray(image), step=4) words = vq.predict(features.reshape(-1, 200)) # compute and store segmentation segmentation = slic(image, compactness=20, sigma=0) segmentation_clean, _ = merge_small_sp(image, label(segmentation), min_size=200) all_superpixels.append(segmentation_clean) # find feature locations in the image, correspondence to superpixels radius = 15 gridx, gridy = np.mgrid[radius:image.shape[0]-radius:4, radius:image.shape[1]-radius:4] superpixels_for_features = segmentation_clean[gridx.ravel(), gridy.ravel()] # create bag-of-word histograms counts = coo_matrix((np.ones(len(words)), (superpixels_for_features, words)), shape=(segmentation_clean.max() + 1, 300)).toarray() all_counts.append(counts) # compute superpixel labels labels = msrc.get_ground_truth(image_name) votes = coo_matrix((np.ones(labels.size), (segmentation_clean.ravel(), labels.ravel()))).toarray() print("votes shape: %s" % str(votes.shape)) superpixel_labels = np.argmax(votes, axis=1) all_labels.append(superpixel_labels) return all_counts, all_superpixels, all_labels
def extract_image_features(image_url): # Read image image = io.imread(image_url) # Pre-process image width = 255 height = 255 image = resize( image, (width, height), mode='reflect') # Ensuring all images have the same dimension greyscale_image = color.rgb2gray( image) # Restricting the dimension of our data from 3D to 2D # Extract featuresdes #featureVector = feature.hog(greyscale_image, orientations=9, pixels_per_cell=(8,8), cells_per_block=(1,1), block_norm='L2-Hys') featureVector = feature.daisy(greyscale_image, step=4, radius=15, rings=3, histograms=8, orientations=8, normalization='l2', sigmas=None, ring_radii=None, visualize=False) return featureVector
def GenFeatures(): PATCH_DIR = consts.SAVE_PATCH_PATH file_list = os.listdir(PATCH_DIR) t_start = time.clock() clip_num = int(min(len(file_list), N) / consts.COUNT_CLIP) for c in range(consts.COUNT_CLIP): data = [] for i in range(c * clip_num, c * clip_num + clip_num): try: (filename, extension) = os.path.splitext(file_list[i]) if extension != '.png': continue full_path = os.path.join(PATCH_DIR, file_list[i]) image_array = scipy.misc.imread(full_path) daisy_descriptor = list(daisy(image_array, rings=2)[0][0]) data.append((daisy_descriptor, file_list[i])) except Exception as e: print(i, file_list[i], e) if i % 1000 == 0: print( '%r files processed, %r valid features generated, %ds cost' % (i, len(data), int(time.clock() - t_start))) save_file_path = os.path.join( consts.SAVE_PATCH_CLUSTER_CENTER_PATH, consts.FEATURE_SAVE_NAME + str(c) + '.txt') with open(save_file_path, 'w+') as f: f.write(str(data))
def daisy_features(train_data_images, train_data_split_images, test_data_images, IMG_SIZE): canny(train_data_images, train_data_split_images, test_data_images, IMG_SIZE) train_data_features = [] test_data_features = [] train_data = [] test_data = [] train_data_split_crossfold = [] print(4) #bow_train = cv2.BOWKMeansTrainer(8) #flann_params = dict(algorithm = 1, trees = 5) #matcher = cv2.FlannBasedMatcher(flann_params, {}) #detect = cv2.xfeatures2d.SIFT_create() #extract = cv2.xfeatures2d.SIFT_create() #bow_extract = cv2.BOWImgDescriptorExtractor(extract, matcher) #help(bow_train) #help(bow_extract) for image in train_data_images: img = imread(image, as_grey=True) resized_image = resize(img, (40,40)) train_data.append(resized_image) for image in train_data_split_images: img = imread(image, as_grey=True) resized_image = resize(img, (40,40)) train_data_split_crossfold.append(resized_image) for image in test_data_images: img = imread(image, as_grey=True) resized_image = resize(img, (40,40)) test_data.append(resized_image) print(6) des = [] des_cross = [] des_test = [] radius = 5 for image in train_data: descs = daisy(image, radius=radius) des.append(descs) train_data_features = bow(des, train_data) del des print('oi1') #for image in train_data_split_crossfold: #descs = daisy(image, radius=radius) #des_cross.append(descs) print('oi1') #for image in test_data: #descs = daisy(image, radius=radius) #des_test.append(descs) print('oi1')
def generate_daisy_features(filename): image_arr = io.imread(filename, as_grey=True) return daisy(image_arr, step=8, radius=30, rings=3, histograms=6, orientations=8)
def daisy_features(image, name, label, step_ = 4, rings_=3, histograms_=2, orientations_=8): if(image.shape[0] < 32 or image.shape[1] < 32): image = resize(image, (32, 32)) a = daisy(image,step = step_, rings = rings_, histograms = histograms_, orientations=orientations_) result = a.reshape(-1) result = np.asarray(result) return result,name,label
def daisy_features(image): features = feature.daisy(image, step=8, radius=20, rings=2, histograms=4, orientations=4) return features.flatten()
def resize_image_and_get_features(dir_of_file_full_path, filename, num_rows, num_cols, recursive_file_search, dir_upstream_of_edited): if recursive_file_search: resized_filename = resize_image.resize_image_recursive_file_search( dir_of_file_full_path, filename, num_rows, num_cols, dir_upstream_of_edited) else: resized_filename = resize_image.resize_image(dir_of_file_full_path, filename, num_rows, num_cols) resized_filename_camera = resize_image(curr_path, file, num_rows, num_cols) im = imread(resized_filename) index = 0 min_frequency = 10 max_frequency = 1000 gabor_filter_features = get_gabor_filter_features(im, index, min_frequency, max_frequency) print(gabor_filter_features.shape) # return gabor_filter_features im_shape = im.shape # print 'The length of the shape is %s' % len(im_shape) num_dim_image = len(im_shape) if num_dim_image == 3: im1 = im[:, :, 0] im2 = im[:, :, 1] im3 = im[:, :, 2] else: im1 = im descs1, descs_img = daisy(im1, step=180, radius=58, rings=2, histograms=6, orientations=8, visualize=True) # descs2, descs_img2 = daisy(im1, step=180, radius=58, rings=2, histograms=6, # orientations=8, visualize=True) # descs3, descs_img3 = daisy(im1, step=180, radius=58, rings=2, histograms=6, # orientations=8, visualize=True) # FULL feature set using gabor, daisy, and image features: # descs = np.hstack((descs1, descs2, descs3)) # flat_descs = descs.flatten() # im_flat = np.hstack((im1, im2, im3)).flatten() # im_descs_stack = np.hstack((im_flat, flat_descs)) # flat_features_full = im_descs_stack.flatten() # descs = np.hstack((descs1, descs2, descs3)) flat_descs = descs1.flatten() im_flat = im1.flatten() im_descs_stack = np.hstack((im_flat, flat_descs)) flat_features_full = im_descs_stack.flatten() # flat_features = descs1.flatten() # return flat_features return np.hstack((gabor_filter_features, flat_descs))
def generate_daisy(self): gray_image = color.rgb2gray(self.image) return daisy(gray_image, step=180, radius=32, rings=2, histograms=6, orientations=8, visualize=True)
def extract_and_describe(img, kmeans): #estrai le feature da una immagine features = daisy(rgb2grey(img)).reshape((-1, 200)) #assegna le feature locali alle parole del vocabolario assignments = kmeans.predict(features) #calcola l'istogramma histogram, _ = np.histogram(assignments, bins=500, range=(0, 499)) #restituisci l'istogramma normalizzato return histogram
def test_daisy_desc_dims(): img = img_as_float(data.astronaut()[:128, :128].mean(axis=2)) rings = 2 histograms = 4 orientations = 3 descs = daisy(img, rings=rings, histograms=histograms, orientations=orientations) assert (descs.shape[2] == (rings * histograms + 1) * orientations) rings = 4 histograms = 5 orientations = 13 descs = daisy(img, rings=rings, histograms=histograms, orientations=orientations) assert (descs.shape[2] == (rings * histograms + 1) * orientations)
def daisy_extractor(img): """ Use daisy binary descriptor to extract features""" descs, descs_img = daisy(img, step=180, radius=58, rings=2, histograms=6, orientations=8, visualize=True) return (descs, descs_img)
def descs(greyimg): descs = daisy(greyimg, step=180, radius=58, rings=2, histograms=6, orientations=8) descs_num = descs.shape[0] * descs.shape[1] return np.mean(descs.reshape(descs_num, descs.shape[2]), axis=0)
def _daisy(self, img, normalize=True): image = color.rgb2gray(img) descs = daisy(image, step=step, radius=radius, rings=rings, histograms=histograms, orientations=n_orient) descs = descs.reshape(-1, R) # shape=(N, R) hist = np.mean(descs, axis=0) # shape=(R,) if normalize: hist = np.array(hist) / np.sum(hist) return hist
def extractDAISY(images): featureVecs = [] for image in images: featureVecs.append(feature.daisy(image, step=8, radius=8, rings=3).flatten()) featureVecs = np_array(featureVecs) return featureVecs
def compute_daisy(img): """ computes DAISY features of the image """ radius = 58 padd_img = np.pad(img, ((radius, radius), (radius, radius)), 'mean') d = daisy(padd_img, step=1, radius=radius, rings=1, histograms=3, orientations=8) return d
def get_daisy_feature(self, gray): daisy_desc, daisy_img = feature.daisy(gray, step=100, radius=18, rings=2, histograms=8, orientations=8, visualize=True) return daisy_desc, daisy_img
def daisy(self): grayscale_painting = rgb2gray(self.resize_painting()) # daisy_features = feature.daisy(grayscale_painting, step=180, radius=58, rings=2, histograms=6, # orientations=8) daisy_features = feature.daisy(grayscale_painting) features_flattened = [] for x in daisy_features: for y in x: for z in y: features_flattened.append(z) return features_flattened
def daisy_feature(): camera = data.camera() daisy_feat, daisy_img = daisy(camera, step=180, radius=58, rings=2, histograms=6, visualize=True) print(daisy_img.shape) plt.imshow(daisy_img) plt.show()
def daisy_feat(img): img = color.rgb2gray(img) descs, descs_img = daisy(img, step=180, radius=58, rings=2, histograms=6, orientations=8, visualize=True) return descs.ravel()
def ExtractDaisy(dataInput): """ descs : array Grid of DAISY descriptors for the given image as an array dimensionality (P, Q, R) where ``P = ceil((M - radius*2) / step)`` ``Q = ceil((N - radius*2) / step)`` ``R = (rings * histograms + 1) * orientations`` descs_img : (M, N, 3) array (only if visualize==True) Visualization of the DAISY descriptors. :param dataInput: :return: """ if isinstance(dataInput, np.ndarray): # examinate input type img = dataInput.copy() else: img = scipy.misc.imread(dataInput, mode='RGB') image = color.rgb2gray(img) t_start_1 = time.clock() descs = daisy(image, step=E, radius=R, rings=Q, histograms=T, orientations=H) print('Time used: %r' % (time.clock() - t_start_1)) t_start_2 = time.clock() descs, descs_img = daisy(image, step=E, radius=R, rings=Q, histograms=T, orientations=H, visualize=True) print('Time used: %r' % (time.clock() - t_start_2)) plt.imshow(img) plt.figure() plt.imshow(descs_img) plt.show()
def daisy(image): """ Extract DAISY feature descriptors densely for the given image. :param image: Input image (grayscale) :return: Grid of DAISY descriptors for the given image as an array dimensionality (P, Q, R) where P = ceil((M - radius*2) / step) Q = ceil((N - radius*2) / step) R = (rings * histograms + 1) * orientations """ return feature.daisy(color.rgb2gray(image), visualize=False)
def read_image(self, image_name, size=None): options = self.get_options() if size: im = np.array(Image.open(image_name).convert("L").resize(size)) else: im = np.array(Image.open(image_name).convert("L")) options["image"] = im feature = daisy(**options) return feature.reshape( (1, feature.shape[0] * feature.shape[1] * feature.shape[2]))[0]
def Feature_Extractor_Fn(vid,num_frames,frame_no,new_shape=(360,480),step=80, radius=45): """Extract Daisy feature for a frame of video """ if frame_no<num_frames-1: frame = vid.get_data(frame_no) frame_resized=resize(frame, new_shape) frame_gray= rgb2gray(frame_resized) daisy_desc = daisy(frame_gray,step=step, radius=radius) daisy_1D=np.ravel(daisy_desc) """Extract Daisy feature for a patch from the frame of video """ N=4 step_glove=int(step/N) radius_glove=int(radius/N) patch_shape_x=int(new_shape[0]/N) patch_shape_y=int(new_shape[1]/N) patchs_arr = view_as_blocks(frame_gray, (patch_shape_x,patch_shape_y)) patch_num_row=patchs_arr.shape[0] patch_num_col=patchs_arr.shape[1] final_daisy_length=daisy(patchs_arr[0,0,:,:],step=step_glove, radius=radius_glove).size patch_daisy_arr=np.zeros((patch_num_row,patch_num_col,final_daisy_length)) for i in xrange(patch_num_row): for k in xrange(patch_num_col): patch=patchs_arr[i,k,:,:] patch_daisy_desc = daisy(patch,step=step_glove, radius=radius_glove) patch_daisy_1D=np.ravel(patch_daisy_desc) patch_daisy_arr[i,k,:]=patch_daisy_1D #sift = cv2.xfeatures2d.SIFT_create() # (sift_kps, sift_descs) = sift.detectAndCompute(frame, None) # print("# kps: {}, descriptors: {}".format(len(sift_kps), sift_descs.shape)) # surf = cv2.xfeatures2d.SURF_create() # (surf_kps, surf_descs) = surf.detectAndCompute(frame, None) # print("# kps: {}, descriptors: {}".format(len(surf_kps), surf_descs.shape)) else: print("Frame number is larger than the length of video") # return (daisy_1D,surf_descs,sift_descs) return patch_daisy_arr,daisy_1D
def extract_descriptor_process(image_array, thumbnail_size, feature_type="SIFT", descriptor_type="SIFT"): #Converte para cinza se tiver mais que 3 canais (RGB) img_gray = image_array if len(image_array.shape) >= 3: img_gray = cv2.cvtColor(image_array, cv2.COLOR_RGB2GRAY) img_gray = numpy.array(img_gray, numpy.uint8) if descriptor_type == "DAISY": try: daisy_descs = [] descs = daisy(img_gray, step=15, radius=15, rings=14, histograms=6, orientations=8) for row in range(descs.shape[0]): for col in range(descs.shape[1]): daisy_descs.append(descs[row][col]) #print len(daisy_descs) return None, daisy_descs except: print "Error while extracting Daisy descriptor" return None, None if descriptor_type == "HOG": descs = hog(img_gray, orientations=8, pixels_per_cell=(16, 16), cells_per_block=(1, 1)) if len(descs) < 512: return None, None hog_descs = [] hog_descs.append(descs[0:648]) return None, hog_descs if feature_type == "ASIFT" or descriptor_type == "ASIFT": detector, matcher = init_feature('sift-flann') kp, descs = affine_detect(detector, img_gray) return kp, descs #Normalize #img_gray = img_gray.astype(numpy.float32) #img_gray = (img_gray - img_gray.mean())/img_gray.std() #img_gray = cv2.normalize(img_gray, img_gray,0, 255, cv2.NORM_MINMAX) #img_gray = img_gray.astype(numpy.uint8) featureDetector = cv2.FeatureDetector_create(feature_type) descriptorDetector = cv2.DescriptorExtractor_create(descriptor_type) keypoints = featureDetector.detect(img_gray) keypoints, descriptors = descriptorDetector.compute(img_gray, keypoints) return keypoints, descriptors
def Daisy_Extractor_Fn(vid,frame_no,new_shape=(120,180),step=50, radius=20): #filename="v_shooting_16_03.avi" #filename=="v_biking_08_01.avi" #filename="v_biking_06_04.avi" #filename="v_swing_23_02.avi" #frame_no=111 if frame_no<num_frames: frame = vid.get_data(frame_no) frame_resized=resize(frame, new_shape) frame_gray= rgb2gray(frame_resized) daisy_desc = daisy(frame_gray,step=step, radius=radius) descs_1D=np.ravel(daisy_desc) else: print("Frame number is larger than the length of video") return descs_1D
def bow_train(featfunc, opts, canon_opts, params): descs = [] files = dataset.training_files(opts['num_train_images']) for img_file, depth_file in print_progress(files): img, segmask = canonize(img_file, depth_file, canon_opts, params) if featfunc == 'daisy': h = daisy(img_as_float(img), **opts['feature_opts']) if featfunc == 'jet': h = jet(img_as_float(img), **opts['feature_opts']) segmask = sp.misc.imresize(segmask, h.shape[:2]) for i in range(h.shape[0]): for j in range(h.shape[1]): if segmask[i,j] != 0: descs.append(h[i,j,:].flatten()) descs = [descs[i] for i in range(0, len(descs), opts['feature_step'])] descs = np.vstack(tuple(descs)) print '# K-means clustering of %i features of dimensionality %i'%(descs.shape[0], descs.shape[1]) kmeans = KMeans(opts['num_clusters'], n_jobs=options.num_threads) kmeans.fit(descs) return kmeans
def extract_features(self): for idx in xrange(len(self.characters)): self.feature_fnames.append("{0}/{1}/{2}/{3}.npy".format(self.output_fname, self.plate_number, idx, self.plate_number[idx])) img = cv2.resize(self.characters[idx][1], self.character_shape) if self.feature == 'hog': self.feature_vectors.append(feature.hog(img, orientations=4, pixels_per_cell=(8, 8), cells_per_block=(1, 1))) elif self.feature == 'lbp': lbp = feature.local_binary_pattern(img, P=8, R=1, method='default') n_bins = lbp.max() + 1 self.feature_vectors(np.histogram(lbp.ravel(), bins=n_bins, range=(0,n_bins), normed=True)[0]) elif self.feature == 'daisy': self.feature_vectors(feature.daisy(img, step=4, radius=15, rings=3, histograms=8, orientations=8, normalization='l1', sigmas=None, ring_radii=None, visualize=False)) else: threshold, feats = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU) self.feature_vectors.append(feats.reshape(1,-1)) self.feature_fnames = np.array(self.feature_fnames) self.feature_vectors = np.array(self.feature_vectors)
""" =============================== Dense DAISY feature description =============================== The DAISY local image descriptor is based on gradient orientation histograms similar to the SIFT descriptor. It is formulated in a way that allows for fast dense extraction which is useful for e.g. bag-of-features image representations. In this example a limited number of DAISY descriptors are extracted at a large scale for illustrative purposes. """ from skimage.feature import daisy from skimage import data import matplotlib.pyplot as plt img = data.camera() descs, descs_img = daisy(img, step=180, radius=58, rings=2, histograms=6, orientations=8, visualize=True) plt.axis('off') plt.imshow(descs_img) descs_num = descs.shape[0] * descs.shape[1] plt.title('%i DAISY descriptors extracted:' % descs_num) plt.show()
def preprocess(img, demo=False): """ Turn raw pixel values into features. """ def _demo_plot(img, stage="", is_ints=False, axes_idx=0): """ Utility to visualize the features we're building """ if demo: axes[axes_idx].imshow(img / 255. if is_ints else img, cmap=bees_cm) axes[axes_idx].set_title(stage) return axes_idx + 1 if demo: fig, axes = plt.subplots(3, 2, figsize=(15, 20)) axes = axes.flatten() # track which subplot we're plotting to axes_idx = 0 axes_idx = _demo_plot(img, stage="Raw Image", is_ints=True, axes_idx=axes_idx) # FEATURE 1: Raw image and color data if demo: color_info = extract_rgb_info(img, ax=axes[axes_idx]) axes_idx += 1 else: color_info = extract_rgb_info(img) # remove color information (hog and daisy only work on grayscale) gray = rgb2gray(img) axes_idx = _demo_plot(gray, stage="Convert to grayscale", axes_idx=axes_idx) # equalize the image gray = equalize_hist(gray) axes_idx = _demo_plot(gray, stage="Equalized histogram", axes_idx=axes_idx) # FEATURE 2: histogram of oriented gradients features hog_features = hog(gray, orientations=12, pixels_per_cell=(8, 8), cells_per_block=(1, 1), visualise=demo) # if demo, we actually got a tuple back; unpack it and plot if demo: hog_features, hog_image = hog_features axes_idx = _demo_plot(hog_image, stage="HOG features", axes_idx=axes_idx) # FEATURE 3: DAISY features - sparser for demo so can be visualized params = {'step': 25, 'radius': 25, 'rings': 3} if demo \ else {'step': 10, 'radius': 15, 'rings': 4} daisy_features = daisy(gray, histograms=4, orientations=8, normalization='l1', visualize=demo, **params) if demo: daisy_features, daisy_image = daisy_features axes_idx = _demo_plot(daisy_image, stage="DAISY features", axes_idx=axes_idx) # return a flat array of the raw, hog and daisy features return np.hstack([color_info, hog_features, daisy_features.flatten()])
def scan_img(path=None, link=None): """ get ONE face GET eye rect approximate nose location rescale and crop everything calc DAISY descr for each image cluster descriptors with hierarchical clustering make BoW feed SVM """ img = cv2.imread(path, 0) face = impros.detect_face(img=img) if len(face) == 0: print('No face found') os.remove(path) return None x,y,w,h = face face_img = img[y:y+h, x:x+w] eyes_rect = impros.detect_eyes(face_img, CONFIG['haar_conf']['eye_clf']) rect_is_found=True if len(eyes_rect) == 0: # print('Eye rectangle not found') eyes_rect = impros.detect_eyes(face_img, CONFIG['haar_conf']['eye_clf1']) rect_is_found=False if len(eyes_rect) != 2: # print('Eyes not detected correctly') os.remove(path) return None if rect_is_found: eyes_rect = eyes_rect[0] else: leye = eyes_rect[0] if eyes_rect[0][0] < eyes_rect[1][0] else eyes_rect[1] reye = eyes_rect[1] if eyes_rect[0][0] < eyes_rect[1][0] else eyes_rect[0] x = leye[0] miny = min(leye[1], reye[1]) maxy = max(leye[1], reye[1]) width = abs(reye[0] + reye[2] - leye[0]) _height = leye[3] if leye[1] == maxy else reye[3] height = maxy + _height - miny eyes_rect = (x, miny, width, height) x,y,w,h = eyes_rect # print('Eyes rect:', eyes_rect) left_eye = face_img[y:y+h, x:x+w/2] right_eye = face_img[y:y+h, x+w/2:x+w] # fig, (ax1, ax2) = plt.subplots(1, 2, sharex=True, sharey=True) # ax1.imshow(left_eye, cmap='gray') # ax2.imshow(right_eye, cmap='gray') # plt.show() nose_obj = impros.detect_nose(face_img, eyes_rect) if nose_obj is None: os.remove(path) return; x,y,w,h,angle = nose_obj nose_img = impros.rotate_img(face_img, angle)[y:y+h, x:x+w] # cv2.imshow('img', nose_img) # cv2.waitKey(0) # os.remove(path) face_img = impros.resize_img(face_img, CONFIG['daisy_conf']['face_size']) left_eye = impros.resize_img(left_eye, CONFIG['daisy_conf']['le_size']) right_eye = impros.resize_img(right_eye, CONFIG['daisy_conf']['re_size']) nose_img = impros.resize_img(nose_img, CONFIG['daisy_conf']['nose_size']) face_daisy = feature.daisy(face_img, step=8, radius=24, rings=3, histograms=6, orientations=8) le_daisy = feature.daisy(left_eye, step=8, radius=16, rings=3, histograms=6, orientations=8) re_daisy = feature.daisy(right_eye, step=8, radius=16, rings=3, histograms=6, orientations=8) nose_daisy = feature.daisy(nose_img, step=8, radius=12, rings=3, histograms=6, orientations=8) print('shapes:', face_daisy.shape, le_daisy.shape, re_daisy.shape, nose_daisy.shape) def write_csv(postfix, data, user_id): with open('src/image_processing/data/daisy/data_daisy_'+postfix+'.csv', 'a+') as csvfile: writer = csv.writer(csvfile, strict=True) for x in range(data.shape[0]): for y in range(data.shape[1]): writer.writerow(np.append(data[x,y], [user_id])) user_id = path.split('/')[-2] # print(user_id) write_csv('face', face_daisy, user_id) write_csv('le', le_daisy, user_id) write_csv('re', re_daisy, user_id) write_csv('nose', nose_daisy, user_id)
def test_daisy_color_image_unsupported_error(): img = np.zeros((20, 20, 3)) with testing.raises(ValueError): daisy(img)
folder_path = os.path.join(path, folder) os.chdir(folder_path) folders2 = os.listdir(os.getcwd()) for folder2 in folders2: if folder2 != '.DS_Store' and folder2[0:4] != 'Icon': os.chdir(os.path.join(folder_path, folder2)) images = os.listdir(os.getcwd()) if '.DS_Store' in images: images.remove('.DS_Store') n = len(images) oui = 0 non = 1 for image in images: if image[0] == 'f': face = cv2.imread(image, 0) descs = daisy(face, step=5) descs = np.asarray(descs).reshape(-1) descs.reshape(1, -1) descs = pca.transform(descs) predict = clf.predict(descs) if predict[0] == 1: oui += 1 percent = float(oui)/n if percent >= threshold: YES.append((str(folder)+ '/' + folder2, percent)) else: NO.append((str(folder)+ '/' + folder2, percent)) YES.sort(key=sort_tuple_folder) NO.sort(key=sort_tuple_folder)
def _extract_rgb(self, rgb): kwargs = dict(step=rgb.shape[0]/5, radius=rgb.shape[0] / 10, rings=2, histograms=6, orientations=8) return np.hstack(daisy(rgb[:, :, i], **kwargs).ravel() for i in [0, 1])
from skimage.feature import daisy for index,label in enumerate(labels): # Crop image to the cell of interest non_zero_indices = np.nonzero(seg==label) y_min = np.min(non_zero_indices[0]) y_max = np.max(non_zero_indices[0]) x_min = np.min(non_zero_indices[1]) x_max = np.max(non_zero_indices[1]) cell_img = img[y_min:y_max,x_min:x_max] # Extract DAISY features and build a feature space with a subset of them # Note that some of the segmentation artefacts can cause an error here; # these cells are marked as NaN (not a number) and are excluded below. try: daisy_features = daisy(cell_img, step=2, radius=8)[0] fspace2[index,:] = daisy_features[0,:] except Exception: fspace2[index,:] = np.NaN # Exclusion of cells that gave errors. # By deleting them from both the feature space and 'labels', the feature space # remains mapped onto the segmentation correctly. keep = [] for index,label in enumerate(labels): if not np.isnan(np.sum(fspace2[index,:])): keep.append(index) labels2 = labels[keep] fspace2 = fspace2[keep]
def daisy_extractor(img): """ Use daisy binary descriptor to extract features""" descs, descs_img = daisy(img, step=180, radius=58, rings=2, histograms=6, orientations=8, visualize=True) return(descs,descs_img)
def daisyFeatureDetector(self, window): return feature.daisy(window).ravel()
def _daisy_feature(im): return feature.daisy(im)
# We set the path from where we get the data from data_path=os.path.join(os.getcwd(), 'ReconSarko') os.chdir(data_path) # We create the train matrix by associating to each face image its daisy descriptor face_folders = os.listdir(os.getcwd()) X, y, id_target = [], [], 1 for face_folder in face_folders: if face_folder != '.DS_Store': os.chdir(face_folder+'/processed_faces') faces_list = os.listdir(os.getcwd()) faces_list.remove('.DS_Store') for face_path in faces_list: face = cv2.imread(face_path, 0) descriptor = daisy(face, step=2) flat_descriptor = np.asarray(descriptor).reshape(-1) X.append(flat_descriptor) y.append(id_target) id_target -= 1 os.chdir(data_path) # We train/test split the matrix of daisy descriptors X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=42) # We apply PCA to the train and test matrix in order to reduce their size pca = PCA(n_components=400) pca.fit(X_train) X_train_pca = pca.transform(X_train) X_test_pca = pca.transform(X_test)
def DAISY_extractor(image, step=4, radius=15, rings=3, num_histograms=6, orientations=8, visualize=False, normalization='daisy'): """Calculates daisy descriptors and puts each in one row of an array. The step size needs to be quite small to get as many descriptors for the number to be as large as the dense patches. Parameters ---------- step: int (default: 4) Distance between each sampling point radius: in (default: 15) radius of outermost ring in pixels rings: int (default: 3) Number of rings around sampling point num_histograms: int (default: 6) Number of "petals" in each ring orientations: int (default: 6) Number of orientations (directions) per histogram visualize: bool (default: False) If true creates a visualisation of the daisy on the image given normalization: ['l1', 'l2', 'daisy', 'off'], (default: 'daisy') What type of normalization to use. 'daisy' does the l2 norm for each histogram Returns ------- out: ndarray An array with each row corresponding to a daisy featurevector, which has been normalized along the rows. For items created with the same histograms, orientations and rings the width of this array will be constant at (rings * histograms + 1) * orientations """ if visualize: descs, descs_img = daisy(image, step=step, radius=radius, rings=rings, histograms=num_histograms, orientations=orientations, visualize=visualize, normalization=normalization) fig, ax = plt.subplots() ax.axis('off') ax.imshow(descs_img) name = 'daisy_step{}_radius{}_rings{}.png'.format(step, radius, rings) plt.savefig(name) else: descs = daisy(image, step=step, radius=radius, rings=rings, histograms=num_histograms, orientations=orientations, visualize=visualize, normalization=normalization) # reshapes the daisy outputs so each pixels histogram is a row in an array. # takes the "band" dimension and puts it at the front. shp should be of the # format 40 x 40 x 152 (where the 40s are the spatial part of the image, # making the 152 the histogram of each pixel, which we would like ravelled) shp = descs.shape descs_shaped = np.rollaxis(descs, 2) # It has to be done like this otherwise the wrong elements will be in the # wrong places and then transpose it. out = descs_shaped.reshape((shp[2], shp[0]*shp[1])).T # Normalize the data m = out.mean(axis = 1) # Have to transpose as it won't take subtract along columns properly out = (np.transpose(out) - m).transpose() # Make unit length along the rows out = normalize(out, axis = 1) return out
############################make data all_features=aiot.read_file_to_arr('../../BOW_daisy_features_normalized' ) print("all_features.shape",all_features.shape) n_clusters=200 km=KMeans(n_clusters=n_clusters,n_jobs=-1) km.fit(all_features) print("cluster_centers_.shape:",km.cluster_centers_.shape) print("labels_:",km.labels_[:10],km.labels_.shape) all_words=[] for i in range(len(X_test)): color_img=X_test[i,:,:,:] gray_img=rgb2gray(color_img) gray_img=normalize(gray_img) features=daisy(gray_img,step=2,radius=8) words=km.predict(features.reshape(-1,200)) #print(words) words,bin_edges=np.histogram(words,bins=range(n_clusters)) #print('words.shape',words.shape) all_words.append(words) all_words=np.vstack(all_words) print('all_words.shape: ',all_words.shape) print('all_words[:2,:] : ',all_words[:2,:]) aiot.write_arr_to_file(all_words,'test_BOW_daisy_normalized_kmean200') reduced_X_tr=aiot.read_file_to_arr('../../BOW_daisy_normalized_kmean200' )
def test_daisy_sigmas_and_radii(): img = img_as_float(data.astronaut()[:64, :64].mean(axis=2)) sigmas = [1, 2, 3] radii = [1, 2] daisy(img, sigmas=sigmas, ring_radii=radii)
def test_daisy_incompatible_sigmas_and_radii(): img = img_as_float(data.astronaut()[:64, :64].mean(axis=2)) sigmas = [1, 2] radii = [1, 2] with testing.raises(ValueError): daisy(img, sigmas=sigmas, ring_radii=radii)
def make_features_daisy(self,gray_imgs,**kwargs): #for key,value in kwargs.iteritems(): #setattr(self,key,value) all_features=[(daisy(gray_img,**kwargs)).reshape(-1).astype(np.float32) for gray_img in gray_imgs] return np.array(all_features)
def test_daisy_visualization(): img = img_as_float(data.astronaut()[:32, :32].mean(axis=2)) descs, descs_img = daisy(img, visualize=True) assert(descs_img.shape == (32, 32, 3))
def find_features(img): #img = preprocess_img(img) #features = houghlines(img, 20) #features = features360_avg(img) #features = features360(img, preprocess=True, coin_center=None, step360=1, averaging=False, classID=0) #features = binary_compare(img) #features = goodfeatures(img) #print img, type(img) #gray scale the image if neccessary #if img.shape[2] == 3: # img = img.mean(2) #img = mahotas.imread(imname, as_grey=True) #features = mahotas.features.haralick(img).mean(0) #f2 = features #print 'haralick features:', features, len(features), type(features[0]) #features = mahotas.features.lbp(img, 1, 8) #f2 = np.concatenate((f2,features)) #print 'LBP features:', features, len(features), type(features[0]) #features = mahotas.features.tas(img) #f2 = np.concatenate((f2,features)) #print 'TAS features:', features, len(features), type(features[0]) #features = mahotas.features.zernike_moments(np.mean(img,2), 2, degree=8) #print 'ZERNIKE features:', features, len(features), type(features[0]) #f2 = np.concatenate((f2,features)) #hu_moments = [] #hu_moments = np.array(cv.GetHuMoments(cv.Moments(cv.fromarray(img)))) #print "HU_MOMENTS: ", hu_moments #features = flatten(hu_moments) #f2 = np.concatenate((f2,features)) #features = f2 #DAISY #gray scale the image if neccessary if img.shape[2] != None: img = img.mean(2) img_step = int(img.shape[1]/4) img_radius = int(img.shape[1]/10) descs, descs_img = daisy(img, step=img_step, radius=img_radius, rings=2, histograms=8, orientations=8, normalization='l2', visualize=True) features = descs.ravel() print type(descs_img), type(array2cv(descs_img)) cv2.imwrite("descs_img.png", cv2array(array2cv(descs_img))) #raw_input ("press enter") #plt.axis('off') #plt.imshow(descs_img) #descs_num = descs.shape[0] * descs.shape[1] #plt.title('%i DAISY descriptors extracted:' % descs_num) #plt.show() #print len(features.ravel()) #print len(features[0][0]) #print "All Features: ", features, len(features) ''' #features_surf = surf.surf(np.mean(img,2)) #print "SURF:", features_surf, " len:", len(features_surf) try: import milk # spoints includes both the detection information (such as the position # and the scale) as well as the descriptor (i.e., what the area around # the point looks like). We only want to use the descriptor for # clustering. The descriptor starts at position 5: descrs = features_surf[:,5:] # We use 5 colours just because if it was much larger, then the colours # would look too similar in the output. k = 5 surf_pts_to_ID = 50 values, _ = milk.kmeans(descrs, k) colors = np.array([(255-52*i,25+52*i,37**i % 101) for i in xrange(k)]) except: values = np.zeros(100) colors = [(255,0,0)] surf_img = surf.show_surf(np.mean(img,2), features_surf[:surf_pts_to_ID], values, colors) #imshow(surf_img) #show() ''' #houghlines opencv #try: #gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) #gray = CVtoGray(numpy2CV(img)) #print gray #except: # print "no houghlines available" #img1 = mahotas.imread('temp.png') #T_otsu = mahotas.thresholding.otsu(img1) #seeds,_ = mahotas.label(img > T_otsu) #labeled = mahotas.cwatershed(img1.max() - img1, seeds) #imshow(labeled) #show() ''' for x in hu_moments[0]: if x < 0: x = (x * -1) print math.log10(x) distmin = 0 degree = 0 for x in range(359): img2 = cv.CloneImage(array2cv(grey)) #img2 = rotate_image(img2, x) #print type(img2) img2 = CVtoPIL(img2) img2 = img2.rotate(x, expand=1) #print type(img2) img2 = PILtoCV(img2,1) cv.ShowImage("45", img2) cv.WaitKey() #print type(img2) hu_moments2 = [] hu_moments2 = np.array(cv.GetHuMoments(cv.Moments(cv.GetMat(img2)))) hu_moments2 = hu_moments2.reshape(1, (hu_moments2.shape[0])) distance_btw_images = scipy.spatial.distance.cdist(hu_moments, hu_moments2,'euclidean') if (distance_btw_images < distmin): degree = x print x, ": ", log10(distance_btw_images ) #print "HUMOMENTS2: ", hu_moments2 #for x in hu_moments2: # print math.log10(x) print "degree = ", degree ''' return features
def test_daisy_visualization(): img = img_as_float(data.lena()[:128, :128].mean(axis=2)) descs, descs_img = daisy(img, visualize=True) assert(descs_img.shape == (128, 128, 3))