Beispiel #1
0
def make_gabor_filters(n_freq, n_orient, base_freq=1, 
        freq_factor=np.sqrt(2), offset=0):
    """
    Generate Gabor filter bank.

    n_freq: number of different frequencies
    n_orient: number of different orientations
    base_freq: base frequency
    freq_factor: the multiplicative factor used to derive frequencies, the 
        frequencies used will be base_freq, base_freq/freq_factor,
        base_freq/freq_factor^2, ...
    offset: phase offset, almost always set to 0

    Return: kernels, a list of kernel matrices. Different frequencies
        corresponds to different scales. So the kernel matrices will be of
        different sizes. The length of this list will be n_freq x n_orient x 2
    """
    kernels = []
    n_kernels = n_freq * n_orient * 2

    for i_freq in range(n_freq):
        freq = 1.0 * base_freq / freq_factor**i_freq
        for i_orient in range(n_orient):
            theta = np.pi * i_orient / n_orient

            kernel = gabor_kernel(freq, theta, offset=offset)
            ki = kernel.imag - kernel.imag.sum() / kernel.imag.size
            kr = kernel.real - kernel.real.sum() / kernel.real.size
            kernels.append(ki / np.abs(ki).max())
            kernels.append(kr / np.abs(kr).max())

    return kernels
Beispiel #2
0
def make_gabor_filters(n_freq,
                       n_orient,
                       base_freq=1,
                       freq_factor=np.sqrt(2),
                       offset=0):
    """
    Generate Gabor filter bank.

    n_freq: number of different frequencies
    n_orient: number of different orientations
    base_freq: base frequency
    freq_factor: the multiplicative factor used to derive frequencies, the 
        frequencies used will be base_freq, base_freq/freq_factor,
        base_freq/freq_factor^2, ...
    offset: phase offset, almost always set to 0

    Return: kernels, a list of kernel matrices. Different frequencies
        corresponds to different scales. So the kernel matrices will be of
        different sizes. The length of this list will be n_freq x n_orient x 2
    """
    kernels = []
    n_kernels = n_freq * n_orient * 2

    for i_freq in range(n_freq):
        freq = 1.0 * base_freq / freq_factor**i_freq
        for i_orient in range(n_orient):
            theta = np.pi * i_orient / n_orient

            kernel = gabor_kernel(freq, theta, offset=offset)
            ki = kernel.imag - kernel.imag.sum() / kernel.imag.size
            kr = kernel.real - kernel.real.sum() / kernel.real.size
            kernels.append(ki / np.abs(ki).max())
            kernels.append(kr / np.abs(kr).max())

    return kernels
Beispiel #3
0
def get_gabor_kernels(num_theta = 4,sigmas=[1,3],freq = [0.05,0.25]):
    gaborkernels =[]
    for theta in range(num_theta):
        theta = theta / float(num_theta) * np.pi
        for sigma in sigmas:
            for fr in freq:
                kernel = np.real(gabor_kernel(fr,theta,sigma_x=sigma,sigma_y=sigma))
                gaborkernels.append(kernel)
    return gaborkernels
Beispiel #4
0
def getGaborKernels(n_theta = 4, sigmas=[1,3], frequencies=[0.05, 0.25]):
	gaborKernels = []
	for theta in range(n_theta):
		theta = theta / float(n_theta) * np.pi
		for sigma in sigmas:
			for frequency in frequencies:
				kernel = np.real(gabor_kernel(frequency, theta, sigma_x = sigma, sigma_y = sigma))
				gaborKernels.append(kernel)
	return gaborKernels
Beispiel #5
0
def update(val):
    frequencia = slider_frequencias.val
    theta = slider_thetas.val
    sigmax = slider_sigmasx.val
    sigmay = slider_sigmasy.val
    nucleo = np.real(
        gabor_kernel(frequencia, theta=theta, sigma_x=sigmax, sigma_y=sigmay))
    ax.imshow(nucleo, interpolation='bicubic')
    draw()
Beispiel #6
0
def gabor():
	# prepare filter bank kernels
	kernels = []
	sigma = 6.0
	for f in range(9):
	    frequency = 1 / (2 * pow(math.sqrt(2), f) )
	    for theta in range(0, 8):
	        theta = (np.pi * theta) / 8.0
	        kernel = np.real(gabor_kernel(frequency, theta=theta, sigma_x=sigma, sigma_y=sigma))
	        kernels.append(kernel)

	fileHandle = open('train_small.csv', 'r')
	reader = csv.reader(fileHandle)

	#gabor_features = list()
	grid_img = Image.new('P', (432, 384))

	gab_file = open('gabor_feats.csv', 'w')
	row_no = 1
	for row in reader:
		if (row_no < start_row_no):
			row_no = row_no + 1
			continue

                if (row_no > end_row_no):
			break

		labelStr, featureStr, tp = row
		label = int(labelStr)
		features = map(lambda x: float(x), featureStr.split(' '))
		
		trn_2D = np.reshape(np.array(features), (48, 48))
		
		gab_images =  compute_feats(trn_2D, kernels)
		#with open("gabor_feats.csv", "a") as f:
		gab_file.write(labelStr + ',')
		pixels_str = ""
		for img in gab_images:
			img_array = img.ravel()
			for pixel in img_array:
				pixels_str = pixels_str + (" %.4f" % pixel)
				#gab_file.write(s + ' ')
		gab_file.write(pixels_str.lstrip() + '\n')
		row_no = row_no + 1
		#gab_file.write('\n')
	gab_file.close()	
	#for i in range(0, 9):
#		for j in range(0, 8):
#			grid_img.paste(toimage(gab_images[i*8 + j]), (i * 48, j * 48))
#	grid_img.show()
	fileHandle.close()
	
	print "Finished extracting gabor features"
Beispiel #7
0
    def make_gabor_field(self,
                         X,
                         zscore=True,
                         log_amplitude=True,
                         thetas=range(4),
                         sigmas=(1, 3),
                         frequencies=(0.05, 0.25)):
        """Given a spectrogram, prepare 2D patches and Gabor filter bank kernels
        inputs:
           X - spectrogram data (frequency x time)
           zscore - whether to zscore the ensemble of 2D patches [True]
           log_amplitude - whether to apply log(1+X) scaling of spectrogram data [True]
           thetas - list of 2D Gabor filter orientations in units of pi/4. [range(4)]
           sigmas - list of 2D Gabor filter standard deviations in oriented direction [(1,3)]
           frequencies - list of 2D Gabor filter frequencies [(0.05,0.25)]
        outputs:
           self.data - 2D patches of input spectrogram
           self.D.components_ - Gabor dictionary of thetas x sigmas x frequencies atoms
        """
        self._extract_data_patches(X, zscore, log_amplitude)
        self.n_components = len(thetas) * len(sigmas) * len(frequencies)
        self.thetas = thetas
        self.sigmas = sigmas
        self.frequencies = frequencies
        a, b = self.patch_size
        self.kernels = []
        for theta in thetas:
            theta = theta / 4. * np.pi
            for sigma in sigmas:
                for frequency in frequencies:
                    kernel = np.real(
                        gabor_kernel(frequency,
                                     theta=theta,
                                     sigma_x=sigma,
                                     sigma_y=sigma))
                    c, d = kernel.shape
                    if c <= a:
                        z = np.zeros(self.patch_size)
                        z[(a / 2 - c / 2):(a / 2 - c / 2 + c),
                          (b / 2 - d / 2):(b / 2 - d / 2 + d)] = kernel
                    else:
                        z = kernel[(c / 2 - a / 2):(c / 2 - a / 2 + a),
                                   (d / 2 - b / 2):(d / 2 - b / 2 + b)]
                    self.kernels.append(z.flatten())

        class Bunch:
            def __init__(self, **kwds):
                self.__dict__.update(kwds)

        self.D = Bunch(components_=np.vstack(self.kernels))
def gabor_kernel_features(path):
    print "Processing image: ", path.split("/")[-1]
    galaxy_image = io.imread(path, as_grey=True)
    galaxy_image = exposure.rescale_intensity(galaxy_image, out_range=(0,255))    # Improving contrast
    galaxy_image = rotateImage(galaxy_image)
    kernels = []
    for theta in [0, 1, 2, 3]:
        theta = theta / 4. * np.pi
        for frequency in (0.05, 0.3, 0.5, 0.7):
            kernel = gabor_kernel(frequency, theta=theta)
            kernels.append(kernel)
    galaxy_image = (galaxy_image-galaxy_image.mean())/galaxy_image.std()
    feature_vector = compute_gabor_feats(galaxy_image, kernels)
    feature_vector = _add_galaxy_id(path, feature_vector)
    return feature_vector
Beispiel #9
0
def make_filter_bank(frequencies, thetas, real=True):
    """prepare filter bank of Gabor kernels"""
    # TODO: set MTF of each filter at (u, v) to 0
    kernels = []
    kernel_freqs = []
    for frequency in frequencies:
        sigma_x, sigma_y = _compute_sigmas(frequency)
        for theta in thetas:
            kernel = gabor_kernel(frequency, theta=theta,
                                  bandwidth=1)
            kernels.append(kernel)
            kernel_freqs.append(frequency)
    if real:
        kernels = list(np.real(k) for k in kernels)
    return kernels, np.array(kernel_freqs)
Beispiel #10
0
def generate_data(ndim, nsamples, nfeatures):
    """Generate data by drawing samples that are a sparse combination of gabor features
    """

    # build features
    features = list()
    for j in range(nfeatures):
        theta = np.pi * np.random.rand()
        sigma = 2*np.random.rand() + 1
        freq = 0.2 * np.random.rand() + 0.05
        features.append(resize(np.real(gabor_kernel(freq, theta=theta, sigma_x=sigma, sigma_y=sigma)), (ndim, ndim)).ravel())
    features = np.vstack(features)

    # draw sparse combinations
    X = np.random.laplace(size=(nsamples, nfeatures)) .dot (features)

    return X, features
def gabor():
	# prepare filter bank kernels
	kernels = []
	for theta in range(4):
	    theta = theta / 4. * np.pi
	    for sigma in (5, 10):
		# TODO: check effect of frequency on kernel size
	        #for frequency in (0.05, 0.25):
	        for frequency in (0.1, 0.2):
	            kernel = np.real(gabor_kernel(frequency, theta=theta, sigma_x=sigma, sigma_y=sigma))
	            kernels.append(kernel)

	fileHandle = open('../Datasets/train.csv', 'r')
	reader = csv.reader(fileHandle)

	#gabor_features = list()
	for row in reader:
		labelStr, featureStr, tp = row
		label = int(labelStr)
		features = map(lambda x: float(x), featureStr.split(' '))
		
		trn_2D = np.reshape(np.array(features), (48, 48))
		
		gab_images =  compute_feats(trn_2D, kernels)
		avg_gab_image = np.zeros_like(gab_images[0])
		
		for ittrImg in range(0, len(gab_images)):
			avg_gab_image = avg_gab_image + gab_images[ittrImg]		
		
		avg_gab_image = avg_gab_image / len(gab_images)
		avg_gab_array = avg_gab_image.ravel()

		#gabor_features.append(avg_gab_array)
		#print len(gabor_features)

		with open("gabor_feats.csv", "a") as f:
			writer = csv.writer(f)
			writer.writerow(avg_gab_array)

	fileHandle.close()

	print "Finished extracting gabor features"
Beispiel #12
0
def creat_Gabor_Kernels(norientation, sigma, frequency,gamma):
    """This function creats the Gabor kernels with given parameters.

    Parameters
    ----------	
    norientation: integer
        number of orientations
    sigmm: float
        scale of the kernel
    frequency: float
        wavelength/frequency of the kernel
    """

    kernels = []	
    for orientation in range(norientation):
        theta = orientation / float(norientation) * np.pi
            
        kernel = np.real(gabor_kernel(frequency, theta=theta,
                                      sigma_x=sigma, sigma_y=sigma/float(gamma)))
        kernels.append(kernel)

    return kernels
 def make_gabor_field(self, X, zscore=True, log_amplitude=True, thetas=range(4), 
 		sigmas=(1,3), frequencies=(0.05, 0.25)) :
     """Given a spectrogram, prepare 2D patches and Gabor filter bank kernels
     inputs:
        X - spectrogram data (frequency x time)
        zscore - whether to zscore the ensemble of 2D patches [True]
        log_amplitude - whether to apply log(1+X) scaling of spectrogram data [True]
        thetas - list of 2D Gabor filter orientations in units of pi/4. [range(4)]
        sigmas - list of 2D Gabor filter standard deviations in oriented direction [(1,3)]
        frequencies - list of 2D Gabor filter frequencies [(0.05,0.25)]
     outputs:
        self.data - 2D patches of input spectrogram
        self.D.components_ - Gabor dictionary of thetas x sigmas x frequencies atoms
     """
     self._extract_data_patches(X, zscore, log_amplitude)
     self.n_components = len(thetas)*len(sigmas)*len(frequencies)
     self.thetas = thetas
     self.sigmas = sigmas
     self.frequencies = frequencies
     a,b = self.patch_size
     self.kernels = []
     for theta in thetas:
         theta = theta / 4. * np.pi
         for sigma in sigmas:
             for frequency in frequencies:
                 kernel = np.real(gabor_kernel(frequency, theta=theta,
                                               sigma_x=sigma, sigma_y=sigma))
                 c,d = kernel.shape
                 if c<=a:
                     z = np.zeros(self.patch_size)
                     z[(a/2-c/2):(a/2-c/2+c),(b/2-d/2):(b/2-d/2+d)] = kernel
                 else:
                     z = kernel[(c/2-a/2):(c/2-a/2+a),(d/2-b/2):(d/2-b/2+b)]
                 self.kernels.append(z.flatten())
     class Bunch:
         def __init__(self, **kwds):
             self.__dict__.update(kwds)
     self.D = Bunch(components_ = np.vstack(self.kernels))
def generateFeature(args):

    y_name_train, y_name_test, n_name_train, n_name_test = load_pkl(args['splitPkl'])
    print(len(y_name_train))
    print(len(y_name_test))
    print(len(n_name_train))
    print(len(n_name_test))

    x_data_train = list()
    y_data_train = list()
    x_data_test = list()
    y_data_test = list()
    q = queue.Queue()
    q_job = queue.Queue()
    kernels = []
    for theta in range(4):
        theta = theta / 4. * np.pi
        for sigma in (1, 3):
            for frequency in (0.05, 0.25):
                kernel = np.real(gabor_kernel(frequency, theta=theta, sigma_x=sigma, sigma_y=sigma))
                kernels.append(kernel)
    with Timer("Extracting features"):
        for name in y_name_train:
            q_job.put(name)
        for i, name in enumerate(y_name_train):
            print(i, len(y_name_train), name)
            t = threading.Thread(target=extractFeature, args = (q, q_job, args['ysmileDir']+'/'+name, args['block_number_x'], args['block_number_y'], kernels))
            t.daemon = True
            t.start()
            y = 1.0
            y_data_train.append(y)
        q_job.join()
        print("qsize: "+str(q.qsize()))
        while q.qsize()>0:
            x_data_train.append(q.get())

        for name in n_name_train:
            q_job.put(name)
        for i, name in enumerate(n_name_train):
            print(i, len(n_name_train), name)
            t = threading.Thread(target=extractFeature, args = (q, q_job, args['nsmileDir']+'/'+name, args['block_number_x'], args['block_number_y'], kernels))
            t.daemon = True
            t.start()
            y = -1.0
            y_data_train.append(y)
        q_job.join()
        print("qsize: "+str(q.qsize()))
        while q.qsize()>0:
            x_data_train.append(q.get())

        for name in y_name_test:
            q_job.put(name)
        for i, name in enumerate(y_name_test):
            print(i, len(y_name_test), name)
            t = threading.Thread(target=extractFeature, args = (q, q_job, args['ysmileDir']+'/'+name, args['block_number_x'], args['block_number_y'], kernels))
            t.daemon = True
            t.start()
            y = 1.0
            y_data_test.append(y)
        q_job.join()
        print("qsize: "+str(q.qsize()))
        while q.qsize()>0:
            x_data_test.append(q.get())
            
        for name in n_name_test:
            q_job.put(name) 
        for i, name in enumerate(n_name_test):
            print(i, len(n_name_test), name)
            t = threading.Thread(target=extractFeature, args = (q, q_job, args['nsmileDir']+'/'+name, args['block_number_x'], args['block_number_y'], kernels))
            t.daemon = True
            t.start()
            y = -1.0
            y_data_test.append(y)
        q_job.join()
        print("qsize: "+str(q.qsize()))
        while q.qsize()>0:
            x_data_test.append(q.get())
            
    return x_data_train, y_data_train, x_data_test, y_data_test
Beispiel #15
0
import tiffLib

from scipy import ndimage as nd
import scipy
from skimage.filter import gabor_kernel
import numpy as np
import time
from numpy.fft import fft, ifft, fft2, ifft2, fftshift

dataPath = 'C:/Tomosynthesis/localtest/'
fileName = 'test-crop.tif'
outputPath = 'C:/Tomosynthesis/localtest/res/'
im = ImageIO.imReader(dataPath,fileName, 'tif',2)


kernel = np.real(gabor_kernel(0.0185, 0, 20, 20/float(0.9)))
print kernel.shape

start = time.clock()
temp_response = nd.convolve(im.data[0], kernel, mode='nearest')
elapsed = (time.clock() - start)
print elapsed

start = time.clock()
data = np.lib.pad(im.data[0], ((0,kernel.shape[0]),(0,kernel.shape[1])),'edge')
temp_response_2 = np.fft.irfft2(np.fft.rfft2(data) * np.fft.rfft2(kernel,data.shape))
temp_response_2 = temp_response_2[kernel.shape[0]/2:data.shape[0] - kernel.shape[0]/2,kernel.shape[1]/2:data.shape[1] - kernel.shape[1]/2]
elapsed = (time.clock() - start)
print elapsed
print data.shape
def power(image, kernel):
    # Normalize images for better comparison.
    image = (image - image.mean()) / image.std()
    # 仅仅使用实部
    return nd.convolve(image, np.real(kernel), mode="wrap")
    # return np.sqrt(nd.convolve(image, np.real(kernel), mode='wrap')**2 +
    #               nd.convolve(image, np.imag(kernel), mode='wrap')**2)


# Plot a selection of the filter bank kernels and their responses.
results = []
kernel_params = []
for theta in range(4):
    theta = theta / 4.0 * np.pi
    frequency = 0.1
    kernel = gabor_kernel(frequency, theta=theta)
    # print(kernel)
    params = "theta=%d,\nfrequency=%.2f" % (theta * 180 / np.pi, frequency)
    kernel_params.append(params)
    # Save kernel and the power image for each image
    results.append((kernel, power(test, kernel)))

fig, axes = plt.subplots(nrows=2, ncols=5, figsize=(5, 4))
plt.gray()

# fig.suptitle('Convolutional Layer Using Gabor', fontsize=11)

axes[0][0].axis("off")

# Plot Source image
axes[1][0].imshow(test)
Beispiel #17
0
        error = np.sum((feats - ref_feats[i, :])**2)
        if error < min_error:
            min_error = error
            min_i = i
    return min_i


# prepare filter bank kernels
kernels = []
for theta in range(4):
    theta = theta / 4. * np.pi
    for sigma in (1, 3):
        for frequency in (0.05, 0.25):
            kernel = np.real(
                gabor_kernel(frequency,
                             theta=theta,
                             sigma_x=sigma,
                             sigma_y=sigma))
            kernels.append(kernel)

shrink = (slice(0, None, 3), slice(0, None, 3))
brick = img_as_float(data.load('brick.png'))[shrink]
grass = img_as_float(data.load('grass.png'))[shrink]
wall = img_as_float(data.load('rough-wall.png'))[shrink]
image_names = ('brick', 'grass', 'wall')
images = (brick, grass, wall)

# prepare reference features
ref_feats = np.zeros((3, len(kernels), 2), dtype=np.double)
ref_feats[0, :, :] = compute_feats(brick, kernels)
ref_feats[1, :, :] = compute_feats(grass, kernels)
ref_feats[2, :, :] = compute_feats(wall, kernels)
Beispiel #18
0
 def _gabor_filter(self, img, f, t):
     kernel = gabor_kernel(f, t)
     output = cv2.filter2D(img, -1, kernel.real) + 1j * \
              cv2.filter2D(img, -1, kernel.imag)
     return output
Beispiel #19
0
__author__ = 'junwangcas'
import numpy as np

from scipy import ndimage as nd

from skimage import data
from skimage.util import img_as_float
from skimage.filter import gabor_kernel

brick = img_as_float(data.load('brick.png'))

kernel = np.real(gabor_kernel(0.15, theta = 0.5 * np.pi,sigma_x=5, sigma_y=5))

filtered = nd.convolve(brick, kernel, mode='reflect')

mean = filtered.mean()
variance = filtered.var()
Beispiel #20
0
from sklearn.decomposition import PCA

# Load faces dataset
file_pkl = open("faces4_data.pkl", "rb")
img,img_name,img_identity,img_pose,img_expression,img_eye,\
        identity,pose,expression,eye = pickle.load(file_pkl)
file_pkl.close()

# Extract Gabor features
# Step 1: Prepare Gabor filter bank kernels
kernels = []
for theta in range(4):
    theta = theta / 4. * np.pi
    for sigma in (1., 2.):
        for frequency in (0.5, 1.0):
            kernel = np.real(gabor_kernel(frequency,theta=theta,sigma_x=sigma,\
                    sigma_y=sigma))
            kernels.append(kernel)

# Compute Gabor features
feat_gabor = np.zeros((img.shape[0], 16 * 2))
for i in range(img.shape[0]):
    img[i] = (img[i] - img[i].min()) / (img[i].max() - img[i].min())
    feat_gabor[i, :] = compute_feats(img[i], kernels).reshape(1, 32)
    #print i

# PCA on Gabor features
#pca = PCA(n_components=4)
#feat_gabor = pca.fit_transform(feat_gabor)
#print "Variance Ratio: ",sum(pca.explained_variance_ratio_)

#feat_gabor = scale(feat_gabor)
Beispiel #21
0
def power(image, kernel):
    # Normalize images for better comparison.
    image = (image - image.mean()) / image.std()
    # 仅仅使用实部
    return nd.convolve(image, np.real(kernel), mode='wrap')
    #return np.sqrt(nd.convolve(image, np.real(kernel), mode='wrap')**2 +
    #               nd.convolve(image, np.imag(kernel), mode='wrap')**2)


# Plot a selection of the filter bank kernels and their responses.
results = []
kernel_params = []
for theta in range(4):
    theta = theta / 4. * np.pi
    frequency = 0.1
    kernel = gabor_kernel(frequency, theta=theta)
    # print(kernel)
    params = 'theta=%d,\nfrequency=%.2f' % (theta * 180 / np.pi, frequency)
    kernel_params.append(params)
    # Save kernel and the power image for each image
    results.append((kernel, power(test, kernel)))

fig, axes = plt.subplots(nrows=2, ncols=5, figsize=(5, 4))
plt.gray()

# fig.suptitle('Convolutional Layer Using Gabor', fontsize=11)

axes[0][0].axis('off')

# Plot Source image
axes[1][0].imshow(test)
import cv2
import numpy as np
import timeit
import image_functions
from skimage.filter import gabor_kernel
from scipy import ndimage as nd

kernels = []
for theta in range(4):
    theta = theta / 4. * np.pi
    for sigma in (1, 3):
        for frequency in (0.05, 0.25):
            kernel = np.real(gabor_kernel(frequency, theta=theta,
                                          sigma_x=sigma, sigma_y=sigma))
            kernels.append(kernel)

def gabor_features(filename):
    I = cv2.imread(filename).astype(np.float32)[:,:,1]
    feats = np.zeros((len(kernels), 2), dtype=np.double)
    for k, kernel in enumerate(kernels):
        filtered = nd.convolve(I, kernel, mode='wrap')
        feats[k, 0] = filtered.mean()
        feats[k, 1] = filtered.var()
    return np.concatenate(feats)

def color_features(filename):
    I = cv2.imread(filename).astype(np.float32)
    h, w, dim = I.shape
    G = np.mean(I, axis = 2)
    G2 = np.dstack((G,G,G))
    C = cv2.resize(np.subtract(I, G2), (8, 8))
Beispiel #23
0
from pylab import *
from matplotlib.widgets import Slider, Button, RadioButtons

ax = subplot(111, axisbg=(0.44, 0.44, 0.44))
subplots_adjust(bottom=0.35)
gray()

axis([0, 30, 0, 30])
theta_init = np.pi / 2
freq_init = 0.25
sigmax_init = 3
sigmay_init = 3

nucleo = np.real(
    gabor_kernel(freq_init,
                 theta=theta_init,
                 sigma_x=sigmax_init,
                 sigma_y=sigmay_init))
ax.imshow(nucleo, interpolation='bicubic')

axcolor = 'lightgoldenrodyellow'
frequencias = axes([0.25, 0.1, 0.65, 0.03], axisbg=axcolor)
thetas = axes([0.25, 0.15, 0.65, 0.03], axisbg=axcolor)
sigmasx = axes([0.25, 0.20, 0.65, 0.03], axisbg=axcolor)
sigmasy = axes([0.25, 0.25, 0.65, 0.03], axisbg=axcolor)

slider_frequencias = Slider(frequencias, 'Freq', 0.05, 0.50, valinit=freq_init)
slider_thetas = Slider(thetas, 'Theta', 0, np.pi, valinit=theta_init)
slider_sigmasx = Slider(sigmasx, 'Sigma X', 1, 6, valinit=sigmax_init)
slider_sigmasy = Slider(sigmasy, 'Sigma Y', 1, 6, valinit=sigmay_init)