def __init__(self): for angle in self.angles: angle = angle/4.0 * np.pi for frequency in self.frequencies: self.debugging_kernels.append(gabor_kernel(frequency=frequency, theta=angle)) for scale in self.scales: kernel = np.real(gabor_kernel(frequency = frequency, theta = angle, sigma_x = scale, sigma_y = scale)) self.kernels.append(kernel)
def get_gabor(self, ms_img): kernel_x = gabor_kernel(1, theta=0) kernel_y = gabor_kernel(1, theta=np.pi / 2) Gx = np.zeros((ms_img.shape[0] + 4, ms_img.shape[1] + 4, ms_img.shape[2])) Gy = np.zeros((ms_img.shape[0] + 4, ms_img.shape[1] + 4, ms_img.shape[2])) for i in range(ms_img.shape[2]): Gx[:,:,i] = np.real(convolve(kernel_x, ms_img[:,:,i])) Gy[:,:,i] = np.real(convolve(kernel_y, ms_img[:,:,i])) # Gx = np.real(convolve(kernel_x, ms_img)) # Gy = np.real(convolve(kernel_y, ms_img)) return Gx, Gy
def make_gabor_kernel(theta, frequency, sigma, bandwidth): kernels = [] for t in range(theta): t = t / float(theta) * np.pi for f in frequency: if sigma: for s in sigma: kernel = gabor_kernel(f, theta=t, sigma_x=s, sigma_y=s) kernels.append(kernel) if bandwidth: for b in bandwidth: kernel = gabor_kernel(f, theta=t, bandwidth=b) kernels.append(kernel) return kernels
def get_gabor_kernel(): ''' 调用skimage中的函数创建gabor核 :return: ''' gk = gabor_kernel(frequency=0.2) plt.figure() io.imshow(gk.real) io.show() # more ripples (equivalent to increasing the size of the # Gaussian spread) gk = gabor_kernel(frequency=0.2, bandwidth=0.1) plt.figure() io.imshow(gk.real) io.show()
def get_feats(file_name): img_feats = [] img = io.imread("Images\\" + str(file_name) + ".png", as_gray=True) # Calculate LBP features lbp = local_binary_pattern(img, n_points, radius, METHOD) n_bins = int(lbp.max() + 1) hist, _ = np.histogram(lbp, density=True, bins=n_bins, range=(0, n_bins)) img_feats.append(hist.mean()) img_feats.append(hist.var()) # Calculate Gabor features kernels = [] for theta in range(4): theta = theta / 4. * np.pi for sigma in (1, 3): for frequency in (0.05, 0.25): kernel = np.real( gabor_kernel(frequency, theta=theta, sigma_x=sigma, sigma_y=sigma)) kernels.append(kernel) shrink = (slice(0, None, 3), slice(0, None, 3)) img_shrunk = img_as_float(img)[shrink] for k, kernel in enumerate(kernels): filtered = ndi.convolve(img, kernel, mode='wrap') img_feats.append(filtered.mean()) img_feats.append(filtered.var()) return img_feats
def __init__(self, theta=np.array( [0.0, np.pi / 4.0, np.pi / 2.0, 3.0 * np.pi / 4.0], dtype=np.double), freq=np.array([3.0 / 4.0, 3.0 / 8.0, 3.0 / 16.0], dtype=np.double), sigma=np.array([1.0, 2 * np.sqrt(2.0)], dtype=np.double), normalized=True): """ Initialize the Gabor kernels (only real part). Args: theta: numpy.ndarray (vector) Contains the orientations of the filter; defaults to [0, pi/4, pi/2, 3*pi/4]. freq: numpy.ndarray (vector) The frequencies of the Gabor filter; defaults to [3/4, 3/8, 3/16]. sigma: numpy.ndarray (vector) The sigma parameter for the Gaussian smoothing filter; defaults to [1, 2*sqrt(2)]. normalized: bool If true, the kernels are normalized """ self.kernels_ = [ np.real(gabor_kernel(frequency=f, theta=t, sigma_x=s, sigma_y=s)) for f in freq for s in sigma for t in theta ] if normalized: for k, krn in enumerate(self.kernels_): self.kernels_[k] = krn / np.sqrt((krn**2).sum()) return
def get_gabor_filters(inchannels, outchannels, kernel_size=(3, 3)): delta = 1e-4 freqs = (math.pi / 2) * math.sqrt(2)**(-np.random.randint(0, 5, (outchannels, inchannels))) thetas = (math.pi / 8) * np.random.randint(0, 8, (outchannels, inchannels)) sigmas = math.pi / freqs psis = math.pi * np.random.rand(outchannels, inchannels) x0, y0 = np.ceil(np.array(kernel_size) / 2) y, x = np.meshgrid( np.linspace(-x0 + 1, x0 + 0, kernel_size[0]), np.linspace(-y0 + 1, y0 + 0, kernel_size[1]), ) filterbank = [] for i in range(outchannels): for j in range(inchannels): freq = freqs[i][j] theta = thetas[i][j] sigma = sigmas[i][j] psi = psis[i][j] rotx = x * np.cos(theta) + y * np.sin(theta) roty = -x * np.sin(theta) + y * np.cos(theta) g = np.exp(-0.5 * ((rotx**2 + roty**2) / (sigma + delta)**2)) g = g * np.cos(freq * rotx + psi) g = g / (2 * math.pi * sigma**2) g = gabor_kernel(frequency=freq, bandwidth=sigma, theta=theta, n_stds=0).real filterbank.append(g) return filterbank
def gabor_feature(image, include_kernel=False): ''' image: the 2D image array Extract 40 Gabor features, 8 different direction and 5 different frequency ''' results = [] kernels = [] kernel_params = [] # 8 direction for theta in range(8): theta = theta / 4. * np.pi # 5 frequency for frequency in range(1, 10, 2): frequency = frequency * 0.1 kernel = gabor_kernel(frequency, theta=theta) params = 'theta=%d,\nfrequency=%.2f' % (theta * 180 / np.pi, frequency) kernel_params.append(params) # Save kernel and the power image for each image results.append(power(image, kernel)) kernels.append(kernel) if include_kernel: return results, kernels, kernel_params else: return results
def gabor(image): # prepare filter bank kernels kernels = [] for theta in range(4): theta = theta / 4. * np.pi for sigma in (1, 3): for frequency in (0.05, 0.25): kernel = np.real( gabor_kernel(frequency, theta=theta, sigma_x=sigma, sigma_y=sigma)) kernels.append(kernel) shrink = (slice(0, None, 3), slice(0, None, 3)) image = img_as_float(image) # prepare reference features feats = np.zeros((len(kernels), 2), dtype=np.double) for k, kernel in enumerate(kernels): filtered = ndi.convolve(image, kernel, mode='wrap') feats[k, 0] = filtered.mean() feats[k, 1] = filtered.var() ref_feats = feats return ref_feats
def get_gabor_filter_bank(frequencies, num_orientations): """ Parameters ---------- frequencies : list-like Set of frequencies used to build the Gabor filter bank. num_orientations : int or list-like Number of orientations used to build the Gabor filter bank. If an integer is provided, the corresponding number of orientations will be used for each scale (determined by `gabor_frequencies`). If a tuple is provided, each element will determine the number of orientations that must be used at its matching scale (determined by `gabor_frequencies`) - thus the tuple must match the length of `frequencies`. Returns ------- kernels : list-like List of kernel 2-D arrays that correspond to the filter bank """ kernels = [] for frequency, _num_orientations in zip(frequencies, num_orientations): for orientation_i in range(_num_orientations): theta = orientation_i / _num_orientations * np.pi kernel = np.real(gabor_kernel(frequency, theta=theta)) kernels.append(kernel) return kernels
def do_gabors(weights, config, **kwargs): num_theta = weights.shape[0] / 4 num_frequ = num_theta / 4 configuration = config frequency = (0.05, 0.20, 0.30, 0.45) # sigma = (1.5, 2) # stds = (2, 3) # offset = (0, 1, -1) choices = [(1.5, 2, 0), (1.5, 2, 1), (1.5, 2, -1), (1.5, 3, 0), (1.5, 3, 1), (1.5, 3, -1), (2, 2, 0), (2, 2, 1), (2, 2, -1), (2, 3, 0), (2, 3, 1), (2, 3, -1)] idx = 0 for theta in range(4): theta = theta / 4. * np.pi for frequency in (0.05, 0.20, 0.30, 0.45): configs = random.sample(range(12), int(num_frequ)) for config in configs: kernel = np.real(gabor_kernel(frequency, theta=theta, sigma_x=choices[config][0], sigma_y=choices[config][0], n_stds=choices[config][1], offset=choices[config][2])) if kernel.shape[0] > 7: overlap = int((kernel.shape[0] - 7) / 2) length = kernel.shape[0] kernel = kernel[overlap:length - overlap, overlap:length - overlap] if configuration['reshape']: kernel = reshape_with_project(kernel) weights[idx, 0] = kernel weights[idx, 1] = kernel weights[idx, 2] = kernel idx += 1 random_order = random_state.permutation(weights.shape[0]) weights = weights[random_order] show_kernels(weights, 'fixed') return weights
def gaborFeature(normalizedIrisPatch, regions): # regions: [(x1, x2), (x3, x4), (x5, x6), ...] upperCutHeight = 10 # Gabor Features kernels = [] freqs = [0.1, 0.2, 0.3, 0.4, 0.5] nTheta = 8 for theta in range(nTheta): theta = theta / float16(nTheta) * pi sigma = 1 for frequency in freqs: kernel = real( gabor_kernel(frequency, theta=theta, sigma_x=sigma, sigma_y=sigma)) kernels.append(kernel) gaborFea = [] for reg in regions: croppedImage = normalizedIrisPatch[upperCutHeight:, reg[0]:reg[1]] gaborFea_cur = [] for k, kernel in enumerate(kernels): filteredIris = ndi.convolve(croppedImage, kernel, mode='wrap') gaborFea_cur.append(mean(filteredIris * filteredIris)) gaborFea_cur = array(gaborFea_cur, float32) gaborFea.append(gaborFea_cur) gaborFea = array(gaborFea, dtype=float32) gaborFea = reshape(gaborFea, (gaborFea.shape[0] * gaborFea.shape[1], 1)) gaborFea = gaborFea.tolist() return gaborFea
def filtrosGabor(self): """ Extrai Atributos Filtros de Gabor """ names = [] results2 = [] def compute_feats(image, kernels): # np.zeros(forma, tipo, ordem'opcional)-> Retorna uma nova matriz de det forma e tipo, cheio de zeros feats = np.zeros((len(kernels), 2), dtype=np.double) for k, kernel in enumerate( kernels ): # enumerate-> Retorna uma tupla contendo uma contagem filtered = ndi.convolve( image, kernel, mode='wrap' ) # ndi.convolve-> Retorna o resul da convolução de entrada com pesos feats[k, 0] = filtered.mean() feats[k, 1] = filtered.var() results2.append(feats[k, 0]) # print ("Mean: %.4f" % feats[k, 0]) results2.append(feats[k, 1]) # print ("Variance: %.4f" % feats[k, 1]) return feats # feats é uma matriz def power(image, kernel): image = (image - image.mean()) / image.std() return np.sqrt( ndi.convolve(image, np.real(kernel), mode='wrap')**2 + ndi.convolve(image, np.imag(kernel), mode='wrap')**2) # Prepare filter bank kernels indice = 0 kernels = [] for theta in range(4): theta = theta / 4. * np.pi for sigma in (1, 3): for frequency in (0.05, 0.25): kernel = np.real( gabor_kernel(frequency, theta=theta, sigma_x=sigma, sigma_y=sigma)) kernels.append(kernel) # print ("Thet_%f_Sigma_%i_Frequencia_%.2f" % (theta, sigma, frequency)) for tipo in ("Mean", "Variance"): names.append("Thet_%f_Sigma_%i_Frequencia_%.2f_%s" % (theta, sigma, frequency, tipo)) # Takes pictures shrink = (slice(0, None, 3), slice(0, None, 3)) image = img_as_float(self.imagemTonsDeCinza)[shrink] compute_feats(image, kernels) tipos = [numerico] * len(names) return names, tipos, results2
def _divisive_normalization_function(func_idx, cpd, vox_id): cache = _divisive_normalization_cache[func_idx] r = Kay2013_normalization_r[vox_id] s = Kay2013_normalization_s[vox_id] ev = stimulus_edge_value[func_idx] cpp = cpd / normalized_pixels_per_degree[func_idx] if (r, s) not in cache: cache[(r, s)] = dict() cache = cache[(r, s)] if isinstance(cpd, set): return { x: _divisive_normalization_function(func_idx, x, vox_id) for x in cpd } elif hasattr(cpd, '__iter__'): return [ _divisive_normalization_function(func_idx, x, vox_id) for x in cpd ] elif cpd in cache: return cache[cpd] else: func = stimulus_contrast_functions[func_idx] im0 = normalized_stimulus_images[func_idx] imsmooth = ndi.convolve(im0, np.abs(gabor_kernel(cpp)), mode='constant', cval=ev) filtered = func(cpd) normalized = np.sum([(v**r) / (s**r + imsmooth**r) for v in filtered.itervalues()], axis=0) normalized.setflags(write=False) cache[cpd] = normalized return normalized
def _sample_kernel(self): """Sample a random Gabor kernel. Returns ------- kernel : instance of skimage.filters.gabor_kernel A 2D Gabor kernel (K x K). kernel_params: dict A dictionary of keys and values of the corresponding 2D Gabor ``kernel`` parameters. Raises ------ ImportError if ``scikit-image`` is not installed. """ frequency = rng.rand() # spatial frequency theta = rng.uniform() * 2 * np.pi # orientation in radians bandwidth = rng.uniform() * 5 # bandwidth of the filter n_stds = rng.randint(1, 4) # get the random kernel kernel_params = { "frequency": frequency, "theta": theta, "bandwidth": bandwidth, "n_stds": n_stds, } kernel = gabor_kernel(**kernel_params) return kernel, kernel_params
def get_gabor_filters(angle_inc=3, fre_num=30): ori_num = 180 // angle_inc gaborfilter = np.zeros((ori_num, fre_num), dtype=object) for i in range(ori_num): ori = i * angle_inc / 180.0 * math.pi for j in range(fre_num): if j < 5: continue kernel = gabor_kernel(j * 0.01, theta=ori, sigma_x=3, sigma_y=3) kernel = kernel.real # plt.imshow(kernel,cmap='gray') # plt.show() # plt.close() kernel = kernel - np.mean(kernel) norm = np.linalg.norm(kernel) kernel = kernel / (norm + 0.00001) kernel = kernel.real * 255 #kernel = kernel.astype(np.int32) t = np.asarray(kernel, np.int16) gaborfilter[i, j] = t return gaborfilter
def calcFeatures(img, sigma, frequency, shrinkage=0): angles = 4 resultMean = numpy.zeros(angles) resultStd = numpy.zeros(angles) filteredImg = numpy.zeros(img.shape) # Generate Filter Bank for angle in range(angles): theta = angle / float(angles) * numpy.pi aKernel = numpy.real( gabor_kernel(frequency, theta=theta, sigma_x=sigma, sigma_y=sigma)) # Image Domain ndimage.filters.convolve(img, aKernel, output=filteredImg, mode='wrap') if shrinkage > 0: imgSize = filteredImg.shape filteredImg = filteredImg[shrinkage:imgSize[0] - shrinkage, shrinkage:imgSize[1] - shrinkage] resultMean[angle] = filteredImg.mean() resultStd[angle] = filteredImg.std() return numpy.array([resultMean.mean(), resultStd.mean()])
def get_gabor_response2(theta, img, w0): kernel = gabor_kernel(w0, theta) new_img = cv2.filter2D(img, -1, np.real(kernel), borderType=cv2.BORDER_CONSTANT) return new_img
def generate_kernel(theta=0): frequency = .1 sigma_x = 1 # left right axis. Bigger this number, smaller the width sigma_y = 2 # right left axis. Bigger this number, smaller the height kernel = np.real(gabor_kernel(frequency, theta, sigma_x, sigma_y)) kernel -= np.mean(kernel) return kernel
def scaled_gabor_kernel(cpp, theta=0, zero_mean=True, **kwargs): ''' scaled_gabor_kernel(...) is identical to gabor_kernel(...) except that the resulting kernel is scaled such that the response of the kernel to a grating of identical frequency and angle and min/max values of -/+ 1 is 1. scaled_gabor_kernel has one additional argument, zero_mean=True, which specifies that the kernel should (or should not) be given a zero mean value. The gabor_kernel function alone does not do this, but scaled_gabor_kernel does this by default, unless zero_mean is set to False. ''' if pimms.is_quantity(cpp): cpp = cpp.to(units.cycle / units.px).m if pimms.is_quantity(theta): theta = theta.to(units.rad).m kern = gabor_kernel(cpp, theta=theta, **kwargs) # First, zero-mean them if zero_mean: kern = (kern.real - np.mean(kern.real)) + 1j * (kern.imag - np.mean(kern.imag)) # Next, make the max response grating (n, m) = kern.shape (cn, cm) = [0.5 * (q - 1) for q in [n, m]] (costh, sinth) = (np.cos(theta), np.sin(theta)) mtx = (2 * np.pi * cpp) * np.asarray( [[costh * (col - cm) + sinth * (row - cn) for col in range(m)] for row in range(n)]) re = kern.real / np.sum(np.abs(kern.real * np.cos(mtx))) im = kern.imag / np.sum(np.abs(kern.imag * np.sin(mtx))) return np.asarray(re + 1j * im)
def compute_gabor(img, params): if not params["shared_dict"].get("gabor_kernels", False): gabor_theta = int(params.get("gabor_theta", 4)) gabor_sigma = make_tuple(params.get("gabor_sigma", "(1,3)")) gabor_frequency = make_tuple( params.get("gabor_frequency", "(0.05, 0.25)")) kernels = [] for theta in range(gabor_theta): theta = theta / 4. * np.pi for sigma in gabor_sigma: for frequency in gabor_frequency: kernel = np.real( gabor_kernel(frequency, theta=theta, sigma_x=sigma, sigma_y=sigma)) kernels.append(kernel) params["shared_dict"]["gabor_kernels"] = kernels kernels = params["shared_dict"]["gabor_kernels"] imgg = rgb2gray(img) feats = np.zeros((imgg.shape[0], imgg.shape[1], len(kernels)), dtype=np.double) for k, kernel in enumerate(kernels): filtered = ndi.convolve(imgg, kernel, mode='wrap') feats[:, :, k] = filtered return feats
def __init__(self, theta=np.array([0.0, np.pi / 4.0, np.pi / 2.0, 3.0 * np.pi / 4.0], dtype=np.double), freq=np.array([3.0 / 4.0, 3.0 / 8.0, 3.0 / 16.0], dtype=np.double), sigma=np.array([1.0, 2 * np.sqrt(2.0)], dtype=np.double), normalized=True): """ Initialize the Gabor kernels (only real part). Args: theta: numpy.ndarray (vector) Contains the orientations of the filter; defaults to [0, pi/4, pi/2, 3*pi/4]. freq: numpy.ndarray (vector) The frequencies of the Gabor filter; defaults to [3/4, 3/8, 3/16]. sigma: numpy.ndarray (vector) The sigma parameter for the Gaussian smoothing filter; defaults to [1, 2*sqrt(2)]. normalized: bool If true, the kernels are normalized """ self.kernels_ = [np.real(gabor_kernel(frequency=f, theta=t, sigma_x=s, sigma_y=s)) for f in freq for s in sigma for t in theta] if normalized: for k, krn in enumerate(self.kernels_): self.kernels_[k] = krn / np.sqrt((krn ** 2).sum()) return
def gabor_initializer( ): #creates a filter bank of 43x43 gabors with 16 orientations and 6 spatial frequencies. from skimage.filters import gabor_kernel #import skimage.filters.gabor_kernel as gabor_kernel #doesn't work either... from skimage import io import matplotlib.pyplot as plt import math import numpy as np kernels = np.zeros((43, 43, 3, 96)) bandwidths = (.16, .24, .32, .48, .9, 1.6) #makes each filter 43x43 for sfIndex, spatFreq in enumerate( (1 / 2., 1 / 3., 1 / 4., 1 / 6., 1 / 11., 1 / 18.)): for oriIndex, orientation in enumerate(range(1, 17)): newKernel = gabor_kernel(frequency=spatFreq, theta=math.pi * 2. * (orientation / 16.), bandwidth=bandwidths[sfIndex]) newKernel = newKernel.real kernels[:, :, :, sfIndex * 16 + oriIndex] = np.zeros((43, 43, 3)) mismatch = 43 - len(newKernel) border = int(np.floor(mismatch / 2.)) kernels[border:border + len(newKernel), border:border + len(newKernel), 0, sfIndex * 16 + oriIndex] = newKernel kernels[border:border + len(newKernel), border:border + len(newKernel), 1, sfIndex * 16 + oriIndex] = newKernel kernels[border:border + len(newKernel), border:border + len(newKernel), 2, sfIndex * 16 + oriIndex] = newKernel kernels = np.float32(kernels) return kernels
def plot_gabor_filters(): kernels = [] labels = [] for theta in range(4): theta = theta / 4. * np.pi for sigma in (1.5, 2): for frequency in (0.05, 0.20, 0.30, 0.45): for stds in (2, 3): for offset in (0, 1, -1): kernel = np.real( gabor_kernel(frequency, theta=theta, sigma_x=sigma, sigma_y=sigma, n_stds=stds, offset=offset)) if kernel.shape[0] > 7: overlap = int((kernel.shape[0] - 7) / 2) length = kernel.shape[0] kernel = kernel[overlap:length - overlap, overlap:length - overlap] kernels.append(kernel) labels.append( f'Theta {theta:.2}\n sigma {sigma}, std {stds}\n frequency {frequency}, \noffset {offset}' ) print( f'Theta {theta}, sigma {sigma}, frequency {frequency}, stds {stds}' ) plot_images(kernels, 10, labels, f'Theta_{theta:.2}') kernels = []
def _stimulus_contrast_function(k, cpd): cache = _stimulus_contrast_cache[k] # want to make sure that cpd is a float cpd = float(cpd) if isinstance(cpd, set): return {x: _stimulus_contrast_function(k, x) for x in cpd} elif hasattr(cpd, '__iter__'): return [_stimulus_contrast_function(k, x) for x in cpd] elif cpd in cache: return cache[cpd] else: # switch to cycles per pixel im = imgs[k] cpp = cpd / d2ps[k] c = evs[k] kerns = [(kn.real, kn.imag) for th in orients for kn in [gabor_kernel(cpp, theta=th)]] # The filtered orientations filtered_orientations = { th: np.sum([ ndi.convolve(im, kern_part, mode='constant', cval=c)**2 for kern_part in re_im_kern ], axis=0) for (th, re_im_kern) in zip(orients, kerns) } # now, collapse them down to a single filtered image # filtered = np.sum(filtered_orientations.values(), axis=0) # filtered_orientations.setflags(write=False) cache[cpd] = filtered_orientations return filtered_orientations
def extract_features(images, vector_size=32): options = ["ORB", "SIFT", "LBP", "Gabor", "Entropy", "LBP and Entropy"] res = ui.prompt("Choose a feature selection algorithm:", options) type = options[int(res)] data = [] for img in pb.progressbar(images): # Process each image if type == "ORB": # Corner features alg = cv2.ORB_create() descriptor_size = 32 data.append( describe_keypoints(img, alg, vector_size, descriptor_size)) elif type == "SIFT": # Corner features (patented) alg = cv2.xfeatures2d.SIFT_create() descriptor_size = 128 data.append( describe_keypoints(img, alg, vector_size, descriptor_size)) elif type == "LBP": # Simple texture recognition alg = LocalBinaryPatterns(32, 16) grey = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) data.append(alg.describe(grey)) elif type == "Gabor": # prepare filter bank kernels kernels = [] for theta in range(4): theta = theta / 8. * np.pi for sigma in (1, 3): for frequency in (0.05, 0.25): kernel = np.real( gabor_kernel(frequency, theta=theta, sigma_x=sigma, sigma_y=sigma)) kernels.append(kernel) shrink = (slice(0, None, 3), slice(0, None, 3)) img_shrink = img_as_float(cv2.cvtColor(img, cv2.COLOR_BGR2GRAY))[shrink] feats = compute_feats(img_shrink, kernels).flatten() hist = exposure.histogram(img_shrink, nbins=16) data.append(np.append(feats, hist)) elif type == "Entropy": grey = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) grey = entropy(grey, disk(5)) hist = exposure.histogram(grey, nbins=16)[0] data.append(hist) elif type == "LBP and Entropy": grey = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) alg = LocalBinaryPatterns(32, 16) entropy_grey = entropy(grey, disk(5)) hist = exposure.histogram(entropy_grey, nbins=16)[0] data.append(np.append(alg.describe(grey), hist)) else: print("ERROR: Type " + type + " not found (features.extract_features())\n") return 1 return data, type
def convolve_raw(self, frame, frequency=0.5, theta=0): kernel = gabor_kernel(frequency, theta=theta) real = ndi.convolve(image, np.real(kernel), mode='wrap') imag = ndi.convolve(image, np.imag(kernel), mode='wrap') return real, imag
def kernels(self, params): kernels = [] for frequency in params[0]: for theta in params[1]: theta = (theta / 360.) * 2. * np.pi kernel = gabor_kernel(frequency, theta=theta, bandwidth=5) kernels.append(kernel) return kernels
def convolve(self, frame, frequency=0.4): # do not change original. not actually sure this is needed frame = np.array(frame, dtype=frame.dtype) theta = 0 # in (0, 1) kernel = gabor_kernel(frequency, theta=theta) return self.power(frame, kernel)
def compute_kernels(): kernels = [] for theta in range(4): #direction (0,1,2,3) theta = theta / 4. * np.pi for sigman in range (1,3) kernel = np.real(gabor_kernel(frequency = 0.1, theta=theta, sigma_x=sigma, sigma_y=sigma)) kernels.append(kernel) return kernels
def plot_gabor(images, filename=None): #, patch_size): #images = images.reshape(images.shape[0], patch_size, patch_size) results = [] kernel_params = [] thetas = [0, 1, 2, 3] for theta in thetas: theta = theta / 4. * np.pi for frequency in (0.1, 0.4): kernel = gabor_kernel(frequency, theta=theta) params = 'theta=%d,\nfrequency=%.2f' % (theta * 180 / np.pi, frequency) kernel_params.append(params) # Save kernel and the power image for each image results.append((kernel, [power(img, kernel) for img in images])) fig, axes = plt.subplots(nrows=2*len(thetas)+1, ncols=11, figsize=(11, 6)) plt.gray() if not filename: fig.suptitle('Image responses for (a selection of) Gabor filter kernels', fontsize=16) axes[0][0].axis('off') # Plot original images for label, img, ax in zip([str(i) for i in range(0, len(images))], images, axes[0][1:]): ax.imshow(img, cmap=plt.cm.gray, interpolation='nearest') ax.set_title(label, fontsize=9) ax.set_xticks([]) ax.set_yticks([]) ax.axis('off') for label, (kernel, powers), ax_row in zip(kernel_params, results, axes[1:]): # Plot Gabor kernel ax = ax_row[0] ax.imshow(np.real(kernel), cmap=plt.cm.gray, interpolation='nearest') ax.set_ylabel(label, fontsize=7) ax.set_xticks([]) ax.set_yticks([]) # Plot Gabor responses with the contrast normalized for each filter vmin = np.min(powers) vmax = np.max(powers) for patch, ax in zip(powers, ax_row[1:]): ax.imshow(patch, vmin=vmin, vmax=vmax, cmap=plt.cm.gray, interpolation='nearest') ax.axis('off') if filename: save_to = filename + '.gabor.png' plt.savefig(save_to) return save_to else: plt.show()
def GaborFunctions(sigma, frequency, angles): kernels = [] for theta in range(angles): theta = theta / angles * np.pi kernel = np.real( gabor_kernel(frequency, theta=theta, sigma_x=sigma, sigma_y=sigma)) kernels.append(kernel) return kernels
def detect_shape(Image): """ args - image returns - 1 if triangle 2 if square and 0 otherwise """ img = img_as_float(Image) features = [] for theta in (0, np.pi/4.0, np.pi/2.0): #3 filtersimplemented kernel = gabor_kernel(0.1, theta=theta, offset = 0) kernel1 = gabor_kernel(0.1, theta, offset = np.pi/2.0) features.append(generate_features(img, kernel, kernel1)) if check_triangle(features)==1: return 0 # For triangle elif check_square(features)==1: return 1 # For Square else: return 2 # Neither of them
def create_gabor_filters(frequencies, thetas, sigmaX, sigmaY): kernels = [] for frequency in frequencies: for theta in thetas: # keep it real kernel = np.real(gabor_kernel(frequency, theta, sigmaX, sigmaY)) kernels.append(kernel) return np.array(kernels)
def generate_kernels(num): kernels = [] for theta in range(num): theta = theta / float(num) * np.pi for sigma in (1, 3): for frequency in (0.05, 0.25): kernel = np.real(gabor_kernel(frequency, theta=theta, sigma_x=sigma, sigma_y=sigma)) kernels.append(kernel) return kernels
def compute_gist_descriptor(img_loc): # build average feature map: def make_square(img): """turns an image into a square assumes the image is grayscale for sides divisible by 4 """ r, c = img.shape side4 = (int(min([r,c])/4)) * 4 one_edge = side4/2 img1 = img[((r/2)-one_edge):((r/2)+one_edge), ((c/2)-one_edge):((c/2)+one_edge)] r, c = img1.shape return img1[:min([r,c]), :min([r,c])] def compute_avg(img): img = make_square(img) r,c = img.shape chunks_row = np.split(np.array(range(r)), 4) chunks_col = np.split(np.array(range(c)), 4) grid_images = [] for row in chunks_row: for col in chunks_col: grid_images.append(np.mean(img[np.min(row):np.max(row), np.min(col):np.max(col)])) return np.array(grid_images).reshape((4,4)) def power_single(gs): (kernel, powers) = gs return powers*255 images = cv2.imread(img_loc, cv2.COLOR_GRAY2BGR) # shrink makes the image smaller... images = make_square(images) images = images/255.0 # Plot a selection of the filter bank kernels and their responses. results = [] kernel_params = [] for theta in (0, 1, 2, 3, 4, 5, 6, 7): theta = theta / 8. * np.pi for frequency in (0.1, 0.2, 0.3, 0.4): #for frequency in (0.1, 0.2, 0.4, 0.6, 0.8): kernel = gabor_kernel(frequency, theta=theta) params = 'theta=%d,\nfrequency=%.2f' % (theta * 180 / np.pi, frequency) kernel_params.append(params) # Save kernel and the power image for each image results.append((kernel, power(images, kernel))) return np.array([compute_avg(power_single(img)) for img in results]).reshape(512,)
def __init__(self, script_path, conf, flag_gpu=False): self.labels = [ "normal", "fresh snow", "melted snow", "compacted snow", "ice", "slippery ice" ] self.road_class = "" self.rect_car = [] self.pred_car_score = 0 self.block_size = 20 # ORB-feature self.ft_orb = cv2.ORB_create() # cascade for car self.cascade_car = cv2.CascadeClassifier(conf["lbp_cascade_car"]) # SVM model for classification self.classifier_road = joblib.load(conf["model_roadcond"]) self.classifier_car = joblib.load(conf["model_car"]) # HOG for pedestrian self.dt_hog = cv2.HOGDescriptor() self.dt_hog.setSVMDetector( cv2.HOGDescriptor_getDefaultPeopleDetector()) # Gabor filter self.gabor_kernels = [] for theta in range(4): theta = theta / 4. * np.pi for sigma in (1, 3): for frequency in (0.05, 0.25): kernel = np.real(gabor_kernel(frequency, theta=theta, sigma_x=sigma, sigma_y=sigma)) self.gabor_kernels.append(kernel) # test path self.test_car_dir = os.path.normpath( os.path.join( script_path, conf["test_car_dir"] )) self.test_roadcond_dir = os.path.normpath( os.path.join( script_path, conf["test_roadcond_dir"] )) self.test_roadarea_dir = os.path.normpath( os.path.join( script_path, conf["test_roadarea_dir"] ))
def gabor_filter(inputs, theta, sigma, frequency): from skimage.filters import gabor_kernel from scipy import ndimage as ndi new_data = [] kernel = np.real(gabor_kernel(frequency=frequency, theta=theta, sigma_x=sigma,sigma_y=sigma)) for i in inputs: i = np.reshape(i,(32,32)) filtered = ndi.convolve(i, kernel, mode='wrap') new_data.append(filtered.reshape(1024)) return np.array(new_data)
def show_kernels(): len_of_figure = len(frequencies) for j in range(number_of_thetas): for i,freq in enumerate(frequencies): print(j,i) kernel = filters.gabor_kernel(frequency=freq,theta=np.pi/(number_of_thetas)*j) ax = plt.subplot(len_of_figure,number_of_thetas,i*number_of_thetas + j+1) ax.axis('off') plt.imshow(np.pad(kernel.real,((40-kernel.shape[0])//2,(40-kernel.shape[1])//2),'constant'),cmap =cm.Greys_r) plt.show()
def get_simple_cell_style_gabor_kernels(frequency=0.25, nOrientations=4): gb_reals = [] gb_ims = [] for i in range(nOrientations): theta = i / 4. * np.pi gbs_skimage = gabor_kernel(frequency=frequency, theta=theta, n_stds=3 ) r = np.real(gbs_skimage) im = np.imag(gbs_skimage) gb_reals.append(r) gb_ims.append(im) return gb_reals, gb_ims
def get_kernel(): # prepare filter bank kernels kernels = [] for theta in range(4): theta = theta / 4. * np.pi for sigma in (1, 3): for frequency in (0.05, 0.25): kernel = np.real(gabor_kernel(frequency, theta=theta, sigma_x=sigma, sigma_y=sigma)) kernels.append(kernel) return kernels
def create_gabor_filters(frequencies, thetas, sigmaX, sigmaY): """ :param frequencies: :param thetas: Orientations :param sigmaX: Gaussian component sigma :param sigmaY: In another direction :return: A list of gabor kernels """ kernels = [] for frequency in frequencies: for theta in thetas: kernel = np.real(gabor_kernel(frequency, theta=theta, sigma_x=sigmaX, sigma_y=sigmaY)) kernels.append(kernel) return kernels
def main(): # theta 是顺时针旋转角度 # frequency 是频率 kernel = np.real(gabor_kernel(0.1, theta=30*np.pi/180.0)) img = data.lena() img = color.rgb2gray(img) img = convolve2d(img, kernel, mode='same') print img axe2 = plt.axes([0, 0, 1, 1]) axe2.imshow(img, cmap=cm.gray) axe = plt.axes([0, 0, 0.2, 0.2]) axe.imshow(kernel, cmap=cm.gray) plt.show()
def single_gist(images): images = make_square(images) images = images/255.0 results = [] kernel_params = [] for theta in (0, 1, 2, 3, 4, 5, 6, 7): theta = theta / 8. * np.pi for frequency in (0.1, 0.2, 0.3, 0.4): #for frequency in (0.1, 0.2, 0.4, 0.6, 0.8): kernel = gabor_kernel(frequency, theta=theta) params = 'theta=%d,\nfrequency=%.2f' % (theta * 180 / np.pi, frequency) kernel_params.append(params) # Save kernel and the power image for each image results.append((kernel, power(images, kernel))) return np.array([compute_avg(power_single(img)) for img in results]).reshape(512,)
def gabor_features(data_in): #create gabor kernels kernels = [] for theta in range(8): theta = theta / 8.0 * np.pi for sigma in (1, 3, 5): for frequency in (.10, 0.25, 1.0): kernel = np.real(gabor_kernel(frequency, theta=theta, sigma_x=sigma, sigma_y=sigma)) kernels.append(kernel) #iterate over data points, and compute the result of applying each gabor kernel m,n = data_in.shape gabor_features = [] for i in range(data_in.shape[0]): gabor_features.append(compute_feats(data_in[i].reshape(32,32), kernels)) return np.vstack(gabor_features)
def GaborKernelBank(n_freq=10, freq_range=(.05, .2), n_theta=6, win_size=(15., 15.)): """Function to create the bank of Gabor filters Parameters ---------- n_freq = int Number of frequency in order to create the different kernels. freq_range: tuple of double The range of frequencies in which to sample. n_theta: int Number of angle in order to create the different kernels. win_size: tuple of double Size in pixel of the kernel Returns ------- kernels: list of 2D array Array containing the different kernels """ # Affect the right values for the sigmas s_y = (win_size[0] - 1.) / 6. s_x = (win_size[1] - 1.) / 6. # Generate the values for the different thetas thetas = np.linspace(0, np.pi, int(n_theta)) # Generate the values for the different frequencies freqs = np.linspace(freq_range[0], freq_range[1], int(n_freq)) kernels = [] kernels_params = [] for theta in thetas: for freq in freqs: kernel = np.real(gabor_kernel(freq, theta=theta, sigma_x=s_x, sigma_y=s_y)) kernels_params.append((freq, theta, s_y, s_x)) kernels.append(kernel) return (kernels, kernels_params)
def gabor_feature_real(img, sigmax=8, sigmay=1): from skimage.filters import gabor_kernel # the following *max* array will only store max value, others are discarded # for space concern max_orientation = np.empty(img.shape, dtype=np.double) max_magnitude = np.zeros(img.shape, dtype=np.double) max_frequency = np.empty(img.shape, dtype=np.double) for theta in range(180): print theta theta = -np.pi / 2.0 + theta * np.pi / 180.0 for frequency in (0.05, 0.25): kernel = gabor_kernel(frequency, theta=theta, sigma_x=sigmax, sigma_y=sigmay) max_orientation, max_magnitude, max_frequency = compute_feature_real( img, kernel, theta, frequency, max_orientation, max_magnitude, max_frequency ) return max_orientation, max_magnitude, max_frequency
def gabor_create(frequency=1, orientation=0, bandwidth=1.4, aspect_ratio=1, n_stds=3, offset=0, ppd=42): """Create a Gabor kernel. Wrapper for skimage's gabor_kernal function that allows you to specify different parameters. Args: frequency: Spatial frequency in cycles per degree (pixels per degree must be set appropriately). orientation: Orientation *orthogonal to the carrier modulation* (that is, orientation specifies the "edges" found by the filters, not the orientation of the grating) in range 0 (rightward horizontal) to pi (left horizontal). Vertical is pi/2. Note this orientation is 90 degrees different to the skimage gabor_kernel function. """ ppd = float(ppd) # convert parameter values from degrees to pixels: freq = frequency / ppd # gabor filter "freq" # convert orientation into "edge" coordinates, running counterclockwise:. orientation = (np.pi / 2. - orientation) # compute sigma x and y from bandwidth, including aspect ratio: sigma_x = _sigma_prefactor(bandwidth) / freq sigma_y = aspect_ratio * (_sigma_prefactor(bandwidth) / freq) # make a complex kernel at this frequency, orientation return gabor_kernel(freq, theta=orientation, sigma_x=sigma_x, sigma_y=sigma_y, n_stds=n_stds, offset=offset)
def slic_data(): global images_train_hsv global images_train_filtered global images_train_indices global gt_images_train for i in range(uu_num_train+uu_num_valid+uu_num_test): #print "data %d" %(i+1) img_name = '' if i < 10: img_name = '0' + str(i) else: img_name = str(i) #Read first 70 images as floats #img = io.imread('../data/training/image_2/uu_0000' + img_name + '.png') if i < uu_num_train: img = io.imread(training_files[i]) gt_img = img_as_float(io.imread(training_files_gt[i])) print "training" print i+1, training_files[i], training_files_gt[i] elif i >= uu_num_train and i < uu_num_train+uu_num_valid: img = io.imread(validation_files[i-uu_num_train]) gt_img = img_as_float(io.imread(validation_files_gt[i-uu_num_train])) print "validation" print i+1, validation_files[i-uu_num_train], validation_files_gt[i-uu_num_train] elif i >= uu_num_train+uu_num_valid and i < uu_num_train+uu_num_valid+uu_num_test: img = io.imread(testing_files[i-uu_num_train-uu_num_valid]) gt_img = img_as_float(io.imread(testing_files_gt[i-uu_num_train-uu_num_valid])) print "testing" print i+1, testing_files[i-uu_num_train-uu_num_valid], testing_files_gt[i-uu_num_train-uu_num_valid] #gt_img = img_as_float(io.imread('../data/training/gt_image_2/uu_road_0000' + img_name + '.png')) img_hsv = color.rgb2hsv(img) img_ycbcr = cv2.cvtColor(img,cv2.COLOR_BGR2YCR_CB) img_ycbcr = img_as_float(img_ycbcr) (a,b,c) = img_ycbcr.shape img_y = np.zeros((a,b)) for n in range(a): for m in range(b): img_y[n][m] = img_ycbcr[n][m][0] img_filtered = np.zeros_like(img_y) for theta in range(4): theta = theta/4.0 * np.pi for sigma in (0.1,0.3): for frequency in (0.2,0.3,0.5,0.6): kernel = np.real(gabor_kernel(frequency = frequency, theta = theta, sigma_x =sigma, sigma_y=sigma)) val = ndi.convolve(img_y, kernel, mode='wrap') img_filtered = np.maximum(img_filtered,val,img_filtered) #img_filtered = (img_filtered-img_filtered.mean())/img_filtered.std() img_filtered /= img_filtered.max() img_filtered = img_filtered.flatten() #Create superpixels for training images image_segment = slic(img, n_segments = numSegments, sigma = 5) t, train_indices = np.unique(image_segment, return_index=True) images_train_indices.append(train_indices) #image = np.reshape(img,(1,(img.shape[0]*img.shape[1]),3)) #img_hsv = ndi.gaussian_filter(img_hsv, sigma=(1,1,1)) image_hsv = np.reshape(img_hsv,(1,(img_hsv.shape[0]*img_hsv.shape[1]),3)) val1 = 0.0 val2 = 0.0 val3 = 0.0 gabor_val = 0.0 temp = [] temp_gabor = [] for k in train_indices[:950]: val1 = 0.0 val2 = 0.0 val3 = 0.0 gabor_val = 0.0 for l in range(10): val1 += image_hsv[0][k+l][0]*1.0 val2 += image_hsv[0][k+l][1]*1.0 val3 += image_hsv[0][k+l][2]*1.0 gabor_val += img_filtered[k+l] val1 /= 10 val2 /= 10 val3 /= 10 gabor_val /= 10 temp.append([val1,val2,val3]) temp_gabor.append(gabor_val) images_train_hsv.append(temp) #images_train_hsv[j] = temp #image_ycbcr = np.reshape(img_ycbcr,(1,(img_ycbcr.shape[0]*img_ycbcr.shape[1]),3)) images_train_filtered.append(temp_gabor) #images_train_filtered.append([img_filtered[i] for i in train_indices]) #images_train.append([image[0][i] for i in train_indices]) #images_train_ycbcr.append([image_ycbcr[0][i] for i in train_indices]) #images_train_hsv.append([image_hsv[0][i] for i in train_indices]) #Create gt training image values index at train_indices and converted to 1 or 0 gt_image = np.reshape(gt_img, (1,(gt_img.shape[0]*gt_img.shape[1]),3)) gt_image = [1 if gt_image[0][z][2] > 0 else 0 for z in train_indices] gt_images_train.append(gt_image) images_train_hsv = np.asarray(images_train_hsv) images_train_filtered = np.asarray(images_train_filtered)
plt.imshow(image) plt.title('Original image') # Generate a group of gabor filters and apply it to the brick image plt.figure(figsize=(16, 8)) for j, scale in enumerate(2 ** np.arange(J)): for l, theta in enumerate(np.arange(L) / float(L) * np.pi): sigma = sigma_xi * scale xi = xi_psi / scale sigma_x = sigma sigma_y = sigma / slant freq = xi / (np.pi * 2) gabor = gabor_kernel(freq, theta=theta, sigma_x=sigma_x, sigma_y=sigma_y) im_filtered = np.abs(ndi.convolve(image, gabor, mode='wrap')) plt.subplot(J, L, j * L + l + 1) plt.imshow(np.real(im_filtered), interpolation='nearest') plt.viridis() plt.suptitle('Gabor (different scales and orientations)') # Generate a group of morlet filters and apply it to the brick image plt.figure(figsize=(16, 8)) for j, scale in enumerate(2 ** np.arange(J)): for l, theta in enumerate(np.arange(L) / float(L) * np.pi): sigma = sigma_xi * scale
return np.sqrt( nd.convolve(image, np.real(kernel), mode="wrap") ** 2 + nd.convolve(image, np.imag(kernel), mode="wrap") ** 2 ) # return nd.convolve(image, np.real(kernel), mode='wrap')**2 # Plot a selection of the filter bank kernels and their responses. results = [] results1 = [] kernel_params = [] # img = cv2.resize(img,(256,256)) for theta in np.arange(0, 8, 0.5): theta = theta / 4.0 * np.pi for frequency in (0.1, 0.2): kernel = gabor_kernel(frequency, theta=theta) params = "theta=%d,\nfrequency=%.2f" % (theta * 180 / np.pi, frequency) kernel_params.append(params) # Save kernel and the power image for each image results1.append(power(img4, kernel)) #%% output imgResult = results1[16:] imgMax = np.zeros((256, 256, 16)) for i in range(16): imgMax[:, :, i] = imgResult[i] imgmax = np.reshape(imgMax, (256 * 256, 16)) imgmax = np.reshape(np.amax(imgmax, axis=1), (256, 256)) plt.figure
def slic_data(): global images_train_filtered global images_train_hsv global gt_images_train global images_train_indices global indices1s global indices0s global images_train_filtered_flat global images_train_hsv_flat global gt_images_flat print "starting slic data" for i in range(uu_num_train+uu_num_valid+uu_num_test): #print "data %d" %(i+1) img_name = '' if i < 10: img_name = '0' + str(i) else: img_name = str(i) if i < uu_num_train: img = io.imread(training_files[i]) gt_img = img_as_float(io.imread(training_files_gt[i])) #print "training" #print i+1, training_files[i], training_files_gt[i] elif i >= uu_num_train and i < uu_num_train+uu_num_valid: img = io.imread(validation_files[i-uu_num_train]) gt_img = img_as_float(io.imread(validation_files_gt[i-uu_num_train])) #print "validation" #print i+1, validation_files[i-uu_num_train], validation_files_gt[i-uu_num_train] elif i >= uu_num_train+uu_num_valid and i < uu_num_train+uu_num_valid+uu_num_test: img = io.imread(testing_files[i-uu_num_train-uu_num_valid]) gt_img = img_as_float(io.imread(testing_files_gt[i-uu_num_train-uu_num_valid])) #print "testing" #print i+1, testing_files[i-uu_num_train-uu_num_valid], testing_files_gt[i-uu_num_train-uu_num_valid] img_hsv = color.rgb2hsv(img) img_ycbcr = cv2.cvtColor(img,cv2.COLOR_BGR2YCR_CB) img_ycbcr = img_as_float(img_ycbcr) (a,b,c) = img_ycbcr.shape img_y = np.zeros((a,b)) for n in range(a): for m in range(b): img_y[n][m] = img_ycbcr[n][m][0] img_filtered = np.zeros_like(img_y) for theta in range(4): theta = theta/4.0 * np.pi for sigma in (0.1,0.3): for frequency in (0.2,0.3,0.5,0.6): kernel = np.real(gabor_kernel(frequency = frequency, theta = theta, sigma_x=sigma,sigma_y=sigma)) val = ndi.convolve(img_y, kernel, mode='wrap') img_filtered = np.maximum(img_filtered, val, img_filtered) img_filtered /= img_filtered.max() img_filtered = img_filtered.flatten() #Create superpixels for training images image_segment = slic(img, n_segments = numSegments, sigma = 5) t, train_indices = np.unique(image_segment, return_index=True) images_train_indices.append(train_indices) image_hsv = np.reshape(img_hsv,(1,(img_hsv.shape[0]*img_hsv.shape[1]),3)) val1 = 0.0 val2 = 0.0 val3 = 0.0 gabor_val = 0.0 temp = [] temp_gabor = [] for k in train_indices[:950]: val1 = 0.0 val2 = 0.0 val3 = 0.0 gabor_val = 0.0 for l in range(20): val1 += image_hsv[0][k+l][0]*1.0 val2 += image_hsv[0][k+l][1]*1.0 val3 += image_hsv[0][k+l][2]*1.0 gabor_val += img_filtered[k+l] val1 /= 20 val2 /= 20 val3 /= 20 gabor_val /= 20 temp.append([val1,val2,val3]) temp_gabor.append(gabor_val) images_train_hsv.append(temp) images_train_filtered.append(temp_gabor) gt_image = np.reshape(gt_img, (1,(gt_img.shape[0]*gt_img.shape[1]),3)) gt_image = [1 if gt_image[0][z][2] > 0 else 0 for z in train_indices[:950]] gt_images_train.append(gt_image) gt_images_train = np.asarray(gt_images_train) print gt_images_train.shape gt_images_flat = np.reshape(gt_images_train,(1,(gt_images_train.shape[0]* gt_images_train.shape[1]))) print gt_images_flat.shape indices1s = [index for index,value in enumerate(gt_images_flat[0]) if value == 1] indices0s = [index for index,value in enumerate(gt_images_flat[0]) if value == 0] print len(indices1s) print len(indices0s) images_train_hsv = np.asarray(images_train_hsv) images_train_hsv_flat = np.reshape(images_train_hsv,(1,(images_train_hsv.shape[0]*images_train_hsv.shape[1]),3)) images_train_filtered = np.asarray(images_train_filtered) images_train_filtered_flat = np.reshape(images_train_filtered,(1,images_train_filtered.shape[0]*images_train_filtered.shape[1]))
def generate_kernel(frequency, theta, sigma): return np.real(gabor_kernel(frequency, theta=theta, sigma_x=sigma, sigma_y=sigma))
# gabor filters num_freq = 4 # 6 num_angl = 6 num_filt = num_freq * num_angl filterval = np.zeros((imWid, imHei, num_filt)) if fullalg == 0: fig2 = plt.figure(figsize=(60, 27)) # 20, 50 angs = [2, 3, 5] for j in range(num_freq): for i in range(num_angl): # 0.1+0.15 # 0.05+0.075 # ang = angs[i] g = gabor_kernel(0.1 + 0.15 * j, theta=(i / 6.0) * np.pi) # g = gabor_kernel(1/(2.0**j), theta= (i * 0.25) * np.pi) filteredr = np.sqrt( nd.convolve(image, np.real(g), mode="wrap") ** 2 + nd.convolve(image, np.imag(g), mode="wrap") ** 2 ) filterval[:, :, (j * num_angl + i)] = filteredr # if j==4: # filterval[:,:,2*(j*num_angl+i)+1] = filteredr if fullalg == 0: b = fig2.add_subplot(num_freq, num_angl, j * num_angl + i + 1) imgplot2 = plt.imshow(filterval[:, :, (j * num_angl + i)], cmap=cm.gray) # a.set_title(1/(2.0**j))
def get_gabor(freq,ori): gk = gabor_kernel(frequency= (np.pi)/(2*(np.sqrt(2))**freq), theta=(ori*np.pi)/8, bandwidth=1, sigma_x=2*np.pi, sigma_y=2*np.pi, n_stds=3, offset=0) gk /= (1.5*gk.sum()) return gk
def get_gabor(img, frequency, theta): kernel = np.real(gabor_kernel(frequency, theta=theta*np.pi/180.0)) result = convolve2d(img, kernel, mode='same') return result
min_i = None for i in range(ref_feats.shape[0]): error = np.sum((feats - ref_feats[i, :])**2) if error < min_error: min_error = error min_i = i return min_i # prepare filter bank kernels kernels = [] for theta in range(4): theta = theta / 4. * np.pi for sigma in (1, 3): for frequency in (0.05, 0.25): kernel = np.real(gabor_kernel(frequency, theta=theta, sigma_x=sigma, sigma_y=sigma)) kernels.append(kernel) shrink = (slice(0, None, 3), slice(0, None, 3)) brick = img_as_float(data.load('brick.png'))[shrink] grass = img_as_float(data.load('grass.png'))[shrink] wall = img_as_float(data.load('rough-wall.png'))[shrink] image_names = ('brick', 'grass', 'wall') images = (brick, grass, wall) # prepare reference features ref_feats = np.zeros((3, len(kernels), 2), dtype=np.double) ref_feats[0, :, :] = compute_feats(brick, kernels) ref_feats[1, :, :] = compute_feats(grass, kernels) ref_feats[2, :, :] = compute_feats(wall, kernels)
image_y = img_as_float(image_y) image_y = (image_y - image_y.mean())/image_y.std() image_h = img_as_float(image_h) image_h = (image_h - image_h.mean())/image_h.std() image_hsv_smooth = ndi.gaussian_filter(image_hsv, sigma=(1,1,1)) kernels = [] filtered = [] accum = np.zeros_like(image_y) for theta in range(4): theta = theta/4.0 *np.pi for sigma in (0.5,1,2): for frequency in (0.1,0.4): kernel = gabor_kernel(frequency=frequency, theta=theta, sigma_x=sigma, sigma_y=sigma) fimg = ndi.convolve(image_y,kernel,mode='constant',cval=0) accum = np.minimum(accum,fimg,accum) #filtered.append(ndi.convolve(image_y,kernel,mode='wrap')) #kernels.append(kernel) accum = (accum-accum.mean())/accum.std() fig = plt.figure() ax = fig.add_subplot(3,1,1) ax.imshow(image) ax = fig.add_subplot(3,1,2) ax.imshow(image_ycbcr) ax = fig.add_subplot(3,1,3) ax.imshow(image_hsv) plt.show()
def filter_bank_morlet2d(N, J=4, L=8, sigma_phi = 0.8, sigma_xi = 0.8): # This function computes the set of morlet filters at the maximum size (NxN) and also # tests the 'quality of the filters' by checking the littlewood-paley sum # Output : dictionary with the filters (in the Fourier domain), and the littlewood-paley image # TODO: # - allow boundary values that are not periodic # - introduce subsampling (now all filters are NxN) max_scale = 2 ** (float(J - 1)) sigma = sigma_phi * max_scale freq = 0. filter_phi = np.ndarray((1,N,N), dtype='complex') littlewood_paley = np.zeros((N, N), dtype='single') # Low pass filter_phi[0, :, :] = np.fft.fft2(np.fft.fftshift(zero_pad_filter(gabor_kernel(freq, theta=0., sigma_x=sigma, sigma_y=sigma),N))) ## Band pass filters: ## psi: Create band-pass filters # constant values for psi xi_psi = 3. /4 * np.pi slant = 4. / L filters_psi = [] for j, scale in enumerate(2. ** np.arange(J)): angles = np.zeros((L, N, N), dtype='complex') for l, theta in enumerate(np.pi * np.arange(L) / float(L)): sigma = sigma_xi * scale xi = xi_psi / scale sigma_x = sigma sigma_y = sigma / slant freq = xi / (np.pi * 2) psi = morlet_kernel(freq, theta=theta, sigma_x=sigma_x, sigma_y=sigma_y,n_stds=12) #needs a small shift for odd sizes if (psi.shape[0] % 2 > 0): if (psi.shape[1] % 2 > 0): Psi = zero_pad_filter(psi[:-1, :-1], N) else: Psi = zero_pad_filter(psi[:-1, :], N) else: if (psi.shape[1] % 2 > 0): Psi = zero_pad_filter(psi[:, :-1], N) else: Psi = zero_pad_filter(psi, N) angles[l, :, :] = np.fft.fft2(np.fft.fftshift(0.5*Psi)) littlewood_paley += np.sum(np.abs(angles) ** 2, axis=0) filters_psi.append(angles) lwp_max = littlewood_paley.max() for filt in filters_psi: filt /= np.sqrt(lwp_max/2) Filters = dict(phi=filter_phi, psi=filters_psi) return Filters, littlewood_paley