Пример #1
0
 def __init__(self):
     
     '''Gabor Filer''' 
     self.k = 99 # odd
     self.ksize  = (self.k,self.k)
     self.sigma = 20
     self.lambd = 5
     self.gamma = 1
     self.GaborKernel_0   = cv2.getGaborKernel(ksize = self.ksize, sigma = self.sigma,theta = 0, lambd = self.lambd, gamma = self.gamma)
     self.GaborKernel_45  = cv2.getGaborKernel(self.ksize, self.sigma, 45, self.lambd,  self.gamma)
     self.GaborKernel_90  = cv2.getGaborKernel(self.ksize, self.sigma, 90, self.lambd,  self.gamma)
     self.GaborKernel_135  = cv2.getGaborKernel(self.ksize, self.sigma, 135, self.lambd,  self.gamma)       
Пример #2
0
 def __init__(self):
     # いくつのDoGを足し合わせるか
     self.scale = 6
     
     # gabor kernelのパラメータ
     self.ksize  = (5,5)
     self.sigma = 5
     self.lambd = 5
     self.gamma = 1
     self.GaborKernel_0   = cv2.getGaborKernel(ksize = self.ksize, sigma = self.sigma,theta = 0, lambd = self.lambd, gamma = self.gamma)
     self.GaborKernel_45  = cv2.getGaborKernel(self.ksize, self.sigma, 45, self.lambd,  self.gamma)
     self.GaborKernel_90  = cv2.getGaborKernel(self.ksize, self.sigma, 90, self.lambd,  self.gamma)
     self.GaborKernel_135  = cv2.getGaborKernel(self.ksize, self.sigma, 135, self.lambd,  self.gamma)  
Пример #3
0
def filter(u=5,v=8):

  try: # read from a file
    # raise IOError
    if not os.path.exists(newpath):
      os.makedirs(newpath)
      raise IOError
    else:
      kernels = np.load('kernels/k.npy')
  except IOError:
    print "ioerror, creating new kernel"
    ksize = 31
    kernels = np.zeros(shape = (ksize,ksize))
    sigma = pi # sd of gaussian envelope or bandwidth
    gamma = 1 # spatial aspect ratio
    #fmax = 0.25

    #for i in range(1,u+1):
    # cv2.startWindowThread()
    # cv2.namedWindow("image")
    for theta in range(0,181,25): # six orientation
      for lambd in [7,8,9]: # three wavelegth of sinusoidal factor
        #lambd = fmax/pow(sqrt(2),i-1)
        kernel = cv2.getGaborKernel((ksize,ksize), sigma, theta*pi/180, lambd , gamma, psi = 0)
        kernel /= 1.5*kernel.sum()
        # rule of distributivity of convolution
        kernels += kernel
    # cv2.destroyAllWindows() 
    # will return 6x3 = 1 kernels
    np.save('kernels/k',kernels)

  return kernels
Пример #4
0
    def select(self, X, A, S):
        P, N = X.shape
        p = int(sqrt(P))
        
        # "Intensity" is just the input
        I = self._normalize(abs(X))

        # Apply gabor filters to get the orientation channel
        if self.kernels is None:
            # only available on OpenCV 3+...
            if hasattr(cv, 'getGaborKernel'):
                self.kernels = [cv.getGaborKernel((p, p), p / 2, pi * angle / 4, p, 1) for angle in [0, 45, 90, 135]]
            else:
                import pickle
                with open("./algorithms/gabor-kernels-%d.pkl" % p, 'r') as fin:
                    self.kernels = pickle.load(fin)
        
        Xs = zeros((p, p * N))
        for n in range(N):
            Xs[:, p * n:(p * n + p)] = X[:, n].reshape((p, p))

        Os = self._normalize(reduce(add, [self._normalize(cv.filter2D(Xs, cv.CV_32F, kernel)) for kernel in self.kernels]))
   
        O = zeros((P, N))
        for n in range(N):
            O[:, n] = Os[:, p * n:(p * n + p)].reshape((P))
        
        G = .5 * (I + O)
        return self._select_by_sum(G)
Пример #5
0
def build_filters(ksize, sigma, a, b, c):
    filters = []
    for theta in np.arange(0, np.pi, np.pi / 16):
        kern = cv2.getGaborKernel((ksize, ksize), sigma, theta, a, b, c, ktype=cv2.CV_32F)
        kern /= 1.5*kern.sum()
        filters.append(kern)
    return filters
Пример #6
0
def train_different_gabor_filter(img_size, file_path):
    filters = []
    count = 1
    # plt.figure(1)
    for lamd in np.arange(3, 13, 2):
        for thea in np.arange(0, np.pi, np.pi / 8):
            kern = cv2.getGaborKernel(
                (img_size, img_size), sigma=4, theta=thea, lambd=lamd, gamma=0.5, psi=0, ktype=cv2.CV_32F
            )
            kern /= 1.5 * kern.sum()
            filters.append(kern)
            print kern
            # plt.subplot(5, 8, count)
            # plt.plot(kern)
            # ax = fig.add_subplot()
            cv2.imwrite("./gabor_filter/gabor_{}.png".format(count), kern)
            count += 1
    # plt.show()
    # plt.figure(2)
    img = cv2.imread(file_path, 0)
    count = 1
    for kern in filters:
        fimg = cv2.filter2D(img, cv2.CV_8UC3, kern)
        # plt.subplot(5, 8, count)
        # plt.plot(fimg)
        cv2.imwrite("./gabor_filter/face_{}.png".format(count), fimg)
        count += 1
Пример #7
0
def buildGaborWavelets(directions, scales, ksize):
	filters = []
	for theta in np.arange(0, np.pi, np.pi / 6):
		for lambd in range(1,scales+1):
			kern = cv2.getGaborKernel((ksize, ksize), lambd, theta, 10.0, 0.5)
			filters.append(kern)
	return filters
Пример #8
0
    def convolution(self, img, direction, sigma):
        """
            入力画像を太陽方向に基づいた複数のカーネルで畳みこむ
            param: psi -> 位相
        """
        ## Kernels acquisition
        kernels = map(
            lambda x: cv2.getGaborKernel(
                ksize=(x, x), sigma=sigma, theta=direction, lambd=x, gamma=25.0 / x, psi=np.pi * 1 / 2
            ),
            range(5, 25, 2),
        )

        ## Normalize each kernels -1 ~ 1
        for i, kernel in enumerate(kernels):
            kernels[i] = 1.0 * kernels[i] / np.amax(kernels[i])

        ## Normalize each response 0 ~ 255 ## Convolution and normalization with kernel size
        responses = map(lambda x: cv2.filter2D(cv2.cvtColor(img, cv2.COLOR_BGR2GRAY), cv2.CV_64F, x), kernels)
        responses = map(lambda x: responses[x] / (kernels[x].shape[0] ** 2), range(len(range(5, 25, 2))))
        responses = cv2.normalize(np.array(responses), 0, 255, norm_type=cv2.NORM_MINMAX).astype(np.uint8)

        ### display ###
        # for kernel in kernels:
        # cv2.imshow('kernel',abs(kernel))
        # cv2.waitKey(-1)
        # for res in responses:
        # cv2.imshow('responses',res)
        # cv2.waitKey(-1)

        return responses
Пример #9
0
 def __createGaborKernels(self, labdas, thetas):
     self.__kernels = [None] * len(labdas) * len(thetas)
     k = 0
     for i in xrange(len(labdas)):
         for j in xrange(len(thetas)):
             self.__kernels[k] = cv.getGaborKernel((kernel_size,kernel_size),sigma,thetas[j],labdas[i],gamma,psi)
             k += 1
Пример #10
0
def build_filters():
    filters = []
    ksize = 31
    for theta in np.arange(0, np.pi, np.pi / 16):
        kern = cv2.getGaborKernel((ksize, ksize), 4.0, theta, 10.0, 0.5, 0, ktype=cv2.CV_32F)
        kern /= 1.5*kern.sum()
        filters.append(kern)
    return filters
Пример #11
0
    def generate_energy_map(self, image, filterType, half_width):
        if filterType == "g":
            kernel = cv2.getGaborKernel((half_width * 2 + 1, half_width * 2 + 1))
            gradient = cv2.filter2D(image, cv2.CV_32F, kernel, 2, 0, 3, 1)
        elif filterType == "s":
            gradient = cv2.Sobel(image, cv2.CV_32F, 1, 0, ksize=half_width * 2 + 1)

        return numpy.multiply(gradient, gradient)
def build_gabor(ksize, num, lmbda):
    filters = []
    ksize = ksize
    for theta in numpy.arange(0, numpy.pi, numpy.pi / num):
        kern = cv2.getGaborKernel((ksize, ksize), 1.0, theta, lmbda, 0.5, 0,  ktype=cv2.CV_32F)
        kern /= 1.5*kern.sum()
        filters.append(kern)
    return filters
Пример #13
0
def build_filters_improved(t,ksize,lamb,sigm):
 filters = []
 tab=t
 for theta in tab:
	kern = cv2.getGaborKernel((3,3), sigm, theta, lamb, 4.5, 150, ktype=cv2.CV_32F)
	kern /= 1*kern.sum()
	filters.append(kern)
 return filters
Пример #14
0
def build_filters(sigma, gamma, ksize, thetaRange, lmdaRange, thetaDivider, lmdaDivider):
    filters = []
    for theta in np.arange(0, thetaRange, thetaRange / thetaDivider):
        for lmda in np.arange(1, lmdaRange, lmdaDivider):
            kern = cv2.getGaborKernel((ksize, ksize), sigma, theta, lmda, gamma, ktype=cv2.CV_64F)
            cv2.multiply(kern, 1 * kern, kern)
            filters.append(kern)
    return filters
Пример #15
0
def decomposeImage(image, theta, window,ksize):
    windows = []
    orient = theta
    median = []
    row_count = 0
    for x in range(0, image.shape[0], window):
        for y in range(0,  image.shape[1], window):
            w = copyArray(image, theta, x, y, window)
            windows.append(w)
            # orient.append(o)
            row_count += 1

    print str(row_count)

    for cr in range(0,row_count):
        # sum = 0
        # for i in range(0, orient[cr].shape[0]):
        #     for j in range(0, orient[cr].shape[1]):
        #         sum += orient[cr]

        len = 1.5
        # median.append(sum / orient[cr].shape[0] ** 2)
        x1 = 4 - len / 2 * math.cos(np.average(orient[cr])) * 10
        x1 = np.around(x1)
        x1 = x1.astype(int)
        y1 = 4 - len / 2 * math.sin(np.average(orient[cr])) * 10
        y1 = np.around(y1)
        y1 = y1.astype(int)

        x2 = 4 + math.cos(np.average(orient[cr])) * 10
        x2 = np.around(x2)
        x2 = x2.astype(int)
        y2 = 4 + math.sin(np.average(orient[cr])) * 10
        y2 = np.around(y2)
        y2 = y2.astype(int)

        point1 = (x1, y1)
        point2 = (x2, y2)

        # windows[cr] = cv2.line(windows[cr], point1, point2, (255,255, 255))
    # print windows
    # print str(len(freqs))
    # print str(len(freqs[0]))
    # print str(len(theta))
    # print str(len(theta[0]))
    count = 0
    for i in range(0, row_count):
            # print str(i)
            # print len(freqs[i])
        kern = cv2.getGaborKernel((7,7),4,np.average(orient[i]),35,22)
        windows[i] = cv2.filter2D(src=windows[i], ddepth=cv2.CV_32F, kernel=kern)
        count +=1


    final_image = reconstructImage(image, windows, window)
    return final_image
def buildFilters(sigma, gamma):
    filters = []
    ksize = 17
    # Picked parameters from: Coding Facial Expressions with Gabor Wavelets by Lyons et al.
    for theta in np.arange(0, np.pi, np.pi / 17):
        for lambd in [np.pi / 2, np.pi / 4, np.pi / 8, np.pi / 16]:
            for psi in [0, np.pi / 2]:
                kern = cv2.getGaborKernel((ksize, ksize), sigma, theta, lambd, gamma, psi, ktype=cv2.CV_32F)
                filters.append(kern)
    return filters
Пример #17
0
def build_filter(fsize = 11, orientation = 8, scale = 4):
    filters = []
    lambd = 1
    gamma = 0.25
    sigma = np.sqrt(3)
    for theta in np.arange(0, np.pi, np.pi/(orientation*scale)):
        kern = cv2.getGaborKernel((fsize, fsize), sigma, theta, lambd, gamma, 0, ktype=cv2.CV_32F)
        kern /= 1.5*kern.sum()
        filters.append(kern)
    return filters
Пример #18
0
def predict_smile(clf, face):
    X = face.reshape(64*64)
    X_exp = X
    # apply Gabor filter here as well
    for theta in [0,  np.pi/3, np.pi]:
        kern = cv2.getGaborKernel((64,64), 4.0, theta, 10, 0.5, 0, ktype=cv2.CV_32F)
        kern /= 1.5 * kern.sum()
        gf1 = cv2.filter2D(X.reshape((64, 64)), cv2.CV_8UC3, kern).reshape(64 * 64)
        X_exp = np.hstack((X_exp, gf1))
    return clf.predict(X_exp)
Пример #19
0
def build_filter(img_size):
    filters = []
    for lamd in np.arange(np.pi, np.pi * 4, 3 * np.pi / 5):
        for thea in np.arange(0, np.pi, np.pi / 8):
            kern = cv2.getGaborKernel(
                (img_size, img_size), sigma=4, theta=thea, lambd=lamd, gamma=10, psi=0.5, ktype=cv2.CV_32F
            )
            kern /= 1.5 * kern.sum()
            filters.append(kern)
    return filters
Пример #20
0
def gabor_filters(ksize, sigma = 4.0, lmbda = 10.0, n = 16):
    '''
    Create a bank of Gabor filters spanning 180 degrees
    '''
    filters = []
    for theta in np.arange(0, np.pi, np.pi / n):
        kern = cv2.getGaborKernel((ksize, ksize), sigma, theta, lmbda, 0.5, 0, ktype=cv2.CV_64F)
        kern /= 1.5*kern.sum()
        filters.append(kern)
    return filters
Пример #21
0
def build_gabor_filters():
    filters = []
    ksize = 31
    for theta in (np.pi * np.array(range(3)) / 3).tolist():
        for lambd in [0.1, 0.2, 0.4]:
            kern = cv2.getGaborKernel((ksize, ksize), 4.0, theta, lambd, 0.5, 0, ktype=cv2.CV_32F)
            kern /= 1.5 * kern.sum()
            filters.append(kern)

    return filters
Пример #22
0
def build_filters(nlambda, ntheta):
    filters = []
    ksize = KSIZE
    lambd = LAMBDA
    print (np.arange(LAMBDAS["beg"], LAMBDAS["end"], (LAMBDAS["end"] - LAMBDAS["beg"]) / float(nlambda)))
    for lambd in np.arange(LAMBDAS["beg"], LAMBDAS["end"], (LAMBDAS["end"] - LAMBDAS["beg"]) / float(nlambda)):
        for theta in np.arange(THETAS["beg"], THETAS["end"], (THETAS["end"] - THETAS["beg"]) / float(ntheta)):
            kern = cv2.getGaborKernel((ksize, ksize), SIGMA, theta, lambd, 0.5, 0, ktype=cv2.CV_32F)
            kern /= 1.5 * kern.sum()
            filters.append(kern)
    return filters
Пример #23
0
def build_filters(t):
 filters = []
 ksize = 5
 lamb=5.0
 sigm=2.0
 tab=t
 for theta in tab:
	kern = cv2.getGaborKernel((ksize, ksize), sigm, theta, lamb, 0.5, 0, ktype=cv2.CV_32F)
	kern /= 1.5*kern.sum()
	filters.append(kern)
 return filters
Пример #24
0
def build_filters():
    filters = []
    ksize = 127
    for theta in np.arange(0, np.pi, np.pi / 16):
        for lambd in np.arange(4, 16, 2):
            for  psi in np.arange(0, np.pi/4, (np.pi/4) / 4):
                kern = cv2.getGaborKernel((ksize, ksize), 0.33*lambd, theta, lambd, 0.5, psi, ktype=cv2.CV_32F)
                kern /= 1.5*kern.sum()
                # cv2.imshow('kern', kern / np.max(kern) * 255)
                # cv2.waitKey(0)
                filters.append(kern)
    return filters
Пример #25
0
def discoverHorizontalLines(image):

    bkelim = eliminateBackground(image)
    #Generate the Gabor, isolate _horizontal_ white lines
    #This function expects the result of the background eliminator function
    gaborKernel = cv2.getGaborKernel((101,101),1,np.pi*93/180.0,13,15)
    #image_to_search = bkelim[:,:,1]
    image_to_search = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
    #cv2.imshow("GRAY", image_to_search)
    horizontalLines = cv2.filter2D(image_to_search, cv2.CV_32F, gaborKernel)
    
    return horizontalLines
Пример #26
0
def build_filters():
    filters = []
    sigma = 4
    gamma = 1.0
    ksize = 33
    lamba = 10
    ps = (90-180)*np.pi/180.0
    for theta in np.arange(0, np.pi, np.pi / 8):
        kernel = cv2.getGaborKernel((ksize, ksize), sigma, theta, lamba, gamma, ps)
        #kern = kern/2 + 0.5
        filters.append(kernel)
    return filters
Пример #27
0
def build_filters():
	""" returns a list of kernels in several orientations
	"""
	filters = []
	ksize = 31
	for theta in np.arange(0, np.pi, np.pi / 32):
		params = {'ksize':(ksize, ksize), 'sigma':1.0, 'theta':theta, 'lambd':15.0,
		'gamma':0.02, 'psi':0, 'ktype':cv2.CV_32F}
		kern = cv2.getGaborKernel(**params)
		kern /= 1.5*kern.sum()
		filters.append((kern,params))
	return filters
Пример #28
0
def get_gabor_filters(ksizes, num_orientations):
    """
    Creates a dictionary of gabor filters of various sizes and orientations
    @params ksizes list[int] size of box filter
    @params num_orientations int number of equally divided orientation of filters between 0 and 180 degrees.
    @return dict{size} = [cv2.gaborKernel]
    """
    filters = dict()
    for size in ksizes:
        for i in range(num_orientations):
            theta = np.pi/num_orientations * i
            filters[(size, i)] = cv2.getGaborKernel((size,size), 4, theta, 10.0, 0.5, 0)
    return filters 
Пример #29
0
    def _fill_with_gabor_filters(self, n_filters, shape, stddev):
        """
        Fills weights and biases with Gabor filters. Only 96 filters
            are implemented now, others are filled with white noise.

        Args:
            n_filters(int): number of filters
            shape(tuple): shape of each filter
            stddev(float): standard deviation of filtering kernels
        """
        import cv2

        def normalize_image(a):
            a -= a.min()
            mx = a.max()
            if mx:
                a *= 255.0 / mx

        # Gabor  filters
        orientations = [0, pi / 4, pi / 2, 3 * pi / 4]  # tilt of filters
        phase_shifts = [0, pi]  # pi phase shift inverts signal

        size = min(shape)

        kernels_count = 0
        n_chans = self.weights.mem.size // (self.kx * self.ky * self.n_kernels)
        for wavelen_ratio in range(4):  # how much waves should lay in kernel
            for dev_ratio in range(1, 2 * wavelen_ratio + 1):
                for ori in orientations:
                    for phase in phase_shifts:
                        kernel_chan = cv2.getGaborKernel(
                            ksize=shape, sigma=size / dev_ratio / 2,
                            theta=ori, lambd=size / wavelen_ratio,
                            gamma=1, psi=phase)

                        kernel_chan = normalize_image(kernel_chan) * stddev
                        kernel = numpy.zeros(shape=[n_chans, self.kx, self.ky],
                                             dtype=numpy.float64)
                        for chan in range(n_chans):
                            kernel[chan, :] = kernel_chan
                        kernel = kernel.swapaxes(0, 2)
                        self.weights.mem[
                            kernels_count * kernel.size:
                            (kernels_count + 1) * kernel.size] = kernel.ravel()

                        kernels_count += 1
                        if kernels_count == n_filters:
                            return

        # White noise (if more, than 96 filters are required)
        self.rand.fill_normal_real(self.weights.mem[kernels_count:], 0, stddev)
def gabor_wavelet(image):
    filters = []
    ksize = 7
    for theta in np.arange(0, np.pi, np.pi/16):
        kern = cv2.getGaborKernel((ksize, ksize), 4.0, theta, 10.0, 0.5, 0, ktype=cv2.CV_32F)
        kern /= 1.5*kern.sum()
        filters.append(kern)

    accum = np.zeros_like(image)
    for kern in filters:
        fimg = cv2.filter2D(image, cv2.CV_8UC3, kern)
        np.maximum(accum, fimg, accum)

    return accum
gray_image = cv2.cvtColor(original_image, cv2.COLOR_BGR2GRAY)

# Gobol filtering
kernel_size = 9
sigma = 3
theta = [30, 60, 90, 120, 150, 180]
lambd = 4
gamma = 0.04
psi = 0
combined_image = np.zeros_like(np.float64(gray_image))

for ii in range(6):
    gobor_kernel = cv2.getGaborKernel((kernel_size, kernel_size),
                                      sigma,
                                      theta[ii],
                                      lambd,
                                      gamma,
                                      psi,
                                      ktype=cv2.CV_64F)
    im_gobor_filtered = cv2.filter2D(gray_image, cv2.CV_64F, gobor_kernel)
    image_index = "images/" + ` ii ` + ".jpg"
    cv2.imwrite(image_index, im_gobor_filtered)
    combined_image += im_gobor_filtered

print np.min(combined_image)
print np.max(combined_image)
combined_image = np.uint8(
    (combined_image - np.min(combined_image)) /
    (np.max(combined_image) - np.min(combined_image)) * 255)
#combined_image = np.abs(combined_image)
#combined_image = np.uint8((combined_image)/(np.max(combined_image))*255)
Пример #32
0
import matplotlib.pyplot as plt

import math

 

origin_img = cv2.imread('lion.jpg', 0).astype(np.float32) # gray scale

origin_img = origin_img / 255

 

# Garbor filter form

kernel = cv2.getGaborKernel((21,21), 5, 1, 10, 1, 0, cv2.CV_32F)

kernel /= math.sqrt((kernel * kernel).sum())

 

filtered = cv2.filter2D(origin_img, -1, kernel)

 

plt.figure(figsize=(8,3))

plt.subplot(131)

plt.axis('off')
def identify_gripper(image):
    global image_name
    global count_index
    global first_pix_loc1_x

    print("Applying filter B for Gripper")
    debug = True

    try:
        crop1_x1 = 100 + 0
        crop1_x2 = 1200 - 0
        crop1_y1 = 220 + 0 - 20 - 50
        crop1_y2 = 520 + 50 + 20

        mid_y = int(round((crop1_x1 - crop1_x2) / 2))
        counter_y = 0

        # image = cv2.imread(str(path) + str(image_name))
        col_img = image.copy()
        # image=original_image.copy()
        gray = image[crop1_y1:crop1_y2, crop1_x1:crop1_x2]

        if debug:
            showimage("crop", gray)

        bef_gray = gray.copy()

        # #-----Converting image to LAB Color model-----------------------------------
        # lab= cv2.cvtColor(gray, cv2.COLOR_BGR2LAB)
        # # cv2.imshow("lab",lab)

        # #-----Splitting the LAB image to different channels-------------------------
        # l, a, b = cv2.split(lab)
        # # cv2.imshow('l_channel', l)
        # # cv2.imshow('a_channel', a)
        # # cv2.imshow('b_channel', b)

        # #-----Applying CLAHE to L-channel-------------------------------------------
        # clahe = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(8,8))
        # cl = clahe.apply(l)
        # # cv2.imshow('CLAHE output', cl)

        # #-----Merge the CLAHE enhanced L-channel with the a and b channel-----------
        # limg = cv2.merge((cl,a,b))
        # # cv2.imshow('limg', limg)

        # #-----Converting image from LAB Color model to RGB model--------------------
        # final = cv2.cvtColor(limg, cv2.COLOR_LAB2BGR)
        # # cv2.imshow('final', final)

        # # gray = final

        # if debug:
        # 	cv2.imwrite("outputs/"+str(image_name)+"_A1_"+"crop.jpg", image)
        # 	showimage('ImageWindow',image)
        # 	cv2.waitKey(0)

        gray = cv2.cvtColor(gray, cv2.COLOR_BGR2GRAY)  # grayscale

        clahe = cv2.createCLAHE(clipLimit=2.8, tileGridSize=(5, 5))
        gray = clahe.apply(gray)

        # cv2.imwrite("image_clahe_output.jpg",cl1)

        if debug:
            showimage("clahe", gray)

        g_bk = gray.copy()

        # if debug:
        # 	cv2.imwrite("outputs/"+str(image_name)+"_A2_"+"gray.jpg", gray)
        # 	showimage('gray image', image)
        # 	cv2.waitKey(0)

        gray = cv2.medianBlur(gray, 89)

        if debug:
            cv2.imwrite("outputs/" + str(image_name) + "_A3_" + "blur.jpg",
                        gray)
            showimage('median blur', gray)
            cv2.waitKey(0)

        _, g9 = cv2.threshold(gray, 25, 255, cv2.THRESH_BINARY)

        # showimage("thresh",g9)

        global count_index
        count_index += 1

        g10 = dynamic_crop(g9, g_bk)

        if debug:
            showimage('dynamic crop', g10)
            cv2.waitKey(0)

        # g12 = cv2.equalizeHist(g10)

        # if debug:
        # 	showimage("histogram",g12)

        # clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
        # g12 = clahe.apply(g10)

        # # cv2.imwrite("image_clahe_output.jpg",cl1)

        # if debug:
        # 	showimage("clahe",g12)

        g12 = cv2.medianBlur(g10, 7)

        if debug:
            showimage("medianBlur", g12)

        g_kernel2 = cv2.getGaborKernel(
            (7, 7), 1.55, np.pi, 5.45, 0.2, 0, ktype=cv2.CV_32F
        )  #(ksize, sigma, theta, lambda, gamma, psi, ktype)
        g13 = cv2.filter2D(g12, cv2.CV_8UC3, g_kernel2)

        if debug:
            showimage('gabor filter', g13)
            cv2.waitKey(0)

        g13 = cv2.GaussianBlur(g13, (7, 7), 0)

        # showimage("GaussianBlur",g13)

        # pos = np.where(g13>80)

        # g13[pos]=255

        # showimage("clahe mod",g13)

        _, g14 = cv2.threshold(g13, 160, 255, cv2.THRESH_BINARY)

        if debug:
            showimage("gabor thresh", g14)

        # g10 = g10[110:170, 800:750+150]

        # if debug:
        # 	showimage('ImageWindow', g10)
        # 	cv2.waitKey(0)

        # g12 =  cv2.medianBlur(g12, 7)

        # if debug:
        # 	showimage('medianBlur', g12)
        # 	cv2.waitKey(0)

        # edges = cv2.Canny(g12,100,255)

        # if debug:
        # 	showimage('ImageWindow', edges)
        # 	cv2.waitKey(0)

        # g_kernel2 = cv2.getGaborKernel((7, 7), 1.7, np.pi/3.4, 4.1, 0.5, 0, ktype=cv2.CV_32F)

        # g13 = cv2.GaussianBlur(g13,(5,5),0)

        # # g14 =  cv2.medianBlur(g14, 7)

        n_black_pix = np.sum(g14 < 100)
        print('\t\tNumber of black pixels for gripper:', n_black_pix)
        print('\t\tfirst_pix_loc1_x:', first_pix_loc1_x)

        if n_black_pix > 200:
            print("gripper present", n_black_pix)

        # # if n_black_pix>5:# or first_pix_loc1_x<10:
        # if n_black_pix>5 and first_pix_loc1_x>10 and first_pix_loc1_x<200:

        # 	cv2.rectangle(img = col_img, pt1 = (920-20, 260-20), pt2 = (1100+20, 450+20), color = (0, 0, 255), thickness = 2)

        # 	font                   = cv2.FONT_HERSHEY_SIMPLEX
        # 	bottomLeftCornerOfText = (920-20-300,260-20+50)
        # 	fontScale              = 1
        # 	fontColor              = (0,0,255)
        # 	lineType               = 2

        # 	cv2.putText(col_img,'NOT OK - Gripper',
        # 	    bottomLeftCornerOfText,
        # 	    font,
        # 	    fontScale,
        # 	    fontColor,
        # 	    lineType)

        # 	# showimage('ImageWindow', g14)
        # 	showimage('ImageWindow2', col_img)
        # 	# showimage('ImageWindow3', g9)

        # if debug:
        # 	showimage('ImageWindow', g14)
        # 	cv2.waitKey(0)

        return ((first_pix_loc1_x, n_black_pix, col_img))

        # kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (9, 9))
        # g15 = cv2.morphologyEx(g14, cv2.MORPH_OPEN, kernel)

        # if debug:
        # 	showimage('ImageWindow', g15)
        # 	cv2.waitKey(0)

        return

    except Exception as e:
        print("Exception", e)

    return
Пример #34
0
def get_filtered_map(image):
    first_layer = cv2.getGaborKernel((3, 3), sigma1, 0, lambda1, gamma, 0)
    second_layer = cv2.getGaborKernel((9, 9), sigma2, 0, lambda2, gamma, 0)
    filtered = cv2.filter2D(image, -1, first_layer)
    filtered = cv2.filter2D(filtered, -1, second_layer)
    return filtered
Пример #35
0
from ransac_eyelids import ransac_line, ransac_parabola
from image_utils import stack_imgs_horizontal, stack_imgs_vertical
from draw_utils import draw_points
from time import time
from math import pi

__gabor_params = {
    'ksize': (5, 5),
    'sigma': 3,
    'theta': pi / 4,
    'lambd': pi * 2,
    'gamma': 2,
    'psi': pi / 2,
    'ktype': cv2.CV_32F
}
__gabor_kern_diag = cv2.getGaborKernel(**__gabor_params)

__winname = "Eyelid Detection"
__debug_imgs_upper = {}
__debug_imgs_lower = {}

__min_thresh = 50


def filter_limbus_pts(u_eyelid, l_eyelid, pts):

    # --- Filters to return only pts between eyelids ---
    #    [[ x1   y1]
    #     [ x2   y2]
    #         ...
    #     [ xn   yn]]
Пример #36
0
	def texture_seg(self, src_gray):
		""" Genetate texture feature images. 

			Input:
				The gray scaled image.

		"""

		# Store the number of features contained.
		num_texture_feature = 0

		# cv2.getGaborKernel(ksize, sigma, theta, lambda, gamma, psi, ktype)
		# For example, 
		# 	g_kernel = cv2.getGaborKernel(\
		# 			   (21, 21), 8.0, np.pi/4, 10.0, 0.5, 0, ktype=cv2.CV_32F)
		# ksize - size of gabor filter (n, n)
		# sigma - standard deviation of the gaussian function
		# theta - orientation of the normal to the parallel stripes
		# 		  for lawn, should be 0.0 rad.
		# lambda - wavelength of the sinusoidal factor
		# gamma - spatial aspect ratio
		# psi - phase offset
		# ktype - type and range of values that each pixel in the gabor kernel can hold
		# g_kernel = cv2.getGaborKernel((15, 15), 7.0, 3.14159/12*0, 2.5, 1.0, 0, ktype=cv2.CV_32F)
		ksize = 15
		sigma = 4.0
		theta_max = np.pi
		theta_seq = np.pi/6
		theta_num = int(np.floor(theta_max/theta_seq)+1)
		wavelength_max = ksize/2
		wavelength_min = 2.5
		wavelength_num = 6
		wavelength_seq = (wavelength_max - wavelength_min)/wavelength_num
		gamma = 1.0
		psi = 0.0

		# Stored for plotting the gaber filters in the spatial domain.
		gabor_kernels = []
		theta_arr = []
		wavelength_arr = []

		# Iterate over both wavelength and theta.
		for j in range(theta_num):
			theta = j*theta_seq
			theta_arr.append( np.degrees(theta) )
			for k in range(wavelength_num):
				wavelength = wavelength_min + k*wavelength_seq
				if (j == 0):
					wavelength_arr.append(wavelength)				
				g_kernel = cv2.getGaborKernel((ksize, ksize), sigma, theta, wavelength, gamma, psi, ktype=cv2.CV_32F)
				filtered_img = cv2.filter2D(src_gray, cv2.CV_8UC3, g_kernel)
				
				# Stored for plotting the gaber filters in the spatial domain.
				gabor_kernels.append(g_kernel)

				# gray_bound_low, gray_bound_up = self.color_sample(filtered_img, self.select_precentage)
				# gray_mask, self.gray_threshold = self.color_mask(self.src, gray_bound_low, gray_bound_up)

				# filtered_img = cv2.cvtColor(filtered_img, cv2.COLOR_BGR2GRAY)

				logger.debug('Theta = {}, wavelength = {}.'.format(theta, wavelength))

				if (self.texture_feature_valid(filtered_img) == 0):
					if (self.feature_mat_empty):
						filtered_img = self.feature_normalize(filtered_img, self.feature_range_texture)
						self.feature_mat = filtered_img.reshape((-1, 1))
						self.feature_mat_empty = False
						num_texture_feature = 1
					else:
						filtered_img = self.feature_normalize(filtered_img, self.feature_range_texture)
						self.feature_append(self.feature_mat, filtered_img, 1)
						logger.debug('\tThe feature matrix now has the shape {}.'.format(self.feature_mat.shape))
					num_texture_feature += 1

				# cv2.imshow('Filtered Image', filtered_img)
				# cv2.imshow('Filtered Image Thresholded', gray_mask)
				# cv2.waitKey(0)

		# To plot the Gabor filters.
		if(self.show_gabor_filters):
			# plt.figure(self.fig_num)
			# self.fig_num += 1
			i = 0

			# fig, axs = plt.subplots(ncols=wavelength_num, nrows=theta_num, gridspec_kw={'hspace': 0.0, 'wspace': 0.0})
			fig, axs = plt.subplots( nrows=theta_num, ncols=wavelength_num, figsize=(6, 6) )
			plt.setp(axs.flat, xticks=[], yticks=[])

			print(theta_arr)
			print(wavelength_arr)
			# plt.setp(axes.flat, xticks=[], yticks=[])
			for ax in axs.flat:
				k = gabor_kernels[i]
				h, w = k.shape[:2]
				k = cv2.resize(k, (3*w, 3*h), interpolation=cv2.INTER_CUBIC)
				# ax.axis('off')
				ax.imshow(k, cmap='gray', norm=NoNorm())
				i += 1

			for ax, wl in zip(axs[-1], wavelength_arr):
			    ax.set_xlabel('{0}'.format(wl))
			for ax, angle in zip(axs[:, 0], theta_arr):
			    ax.set_ylabel( '{0}'.format(angle) )

			# for ax, ve in zip(axes[0], [0.1, 1, 10]):
			#     ax.set_title('{0}'.format(ve), size=18)
			# for ax, mode in zip(axes[:, 0], ['Hillshade', 'hsv', 'overlay', 'soft']):
			#     ax.set_ylabel(mode, size=18)
			plt.suptitle('Gabor filters in the spatial domain') # or plt.suptitle('Main title')
			
			fig.text(0.5, 0.04, 'Wavelength (pixel)', ha='center')
			fig.text(0.04, 0.5, 'Angle (degree)', va='center', rotation='vertical')
			# fig.tight_layout()

			plt.show()

		return num_texture_feature
import cv2
import numpy as np

data_id = 21
image = 255 - cv2.imread(
    'D:/Datasets/DRIVE/training/images/%d_training.tif' % data_id)[:, :, 1]

kernel = cv2.getGaborKernel((3, 3), 3, np.pi / 4, 5, .5)
filtered = cv2.filter2D(image, -1, kernel)

cv2.imshow('Gabor', filtered)
cv2.waitKey(0)
cv2.destroyAllWindows()
        x1 = int(x0 + 1000 * (-b))
        y1 = int(y0 + 1000 * (a))
        x2 = int(x0 - 1000 * (-b))
        y2 = int(y0 - 1000 * (a))

        cv2.line(img_color, (x1, y1), (x2, y2), (0, 0, 255), 2)
        all_theta.append(theta)
cv2.imshow('img line ', img_color)
cv2.waitKey(0)
filter = []
ksize = 31
for the in all_theta:
    kern = cv2.getGaborKernel((ksize, ksize),
                              4.0,
                              the,
                              10.0,
                              0.5,
                              0,
                              ktype=cv2.CV_32F)
    kern /= 1.5 * kern.sum()
    filter.append(kern)
fimg = process(img, filter)
cv2.imshow("ori: ", img)
cv2.imshow("garbor:", fimg)
th3 = np.zeros_like(fimg)
_, th3 = cv2.threshold(fimg, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
# kernel=np.ones((1,100),np.uint8)
# th3=cv2.erode(th3,kernel,iterations=1)
cv2.imshow('threshold: ', np.uint8(th3))
# ero=cv2.erode(th3,kernel,iterations=1)
# cv2.imshow("erode: ",ero)
Пример #39
0
    try:
        img_fn = sys.argv[1]
    except:
        img_fn = 'test.jpg'

    img = cv2.imread(img_fn)
    img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    if img is None:
        print('Failed to load image file:', img_fn)
        sys.exit(1)

    g_kernel = cv2.getGaborKernel((21, 21),
                                  8.0,
                                  np.pi / 8,
                                  10.0,
                                  0.5,
                                  0,
                                  ktype=cv2.CV_32F)

    filtered_img = cv2.filter2D(img, cv2.CV_8UC3, g_kernel)

    cv2.imshow('image', img)
    cv2.imshow('filtered image', filtered_img)

    h, w = g_kernel.shape[:2]
    g_kernel = cv2.resize(g_kernel, (3 * w, 3 * h),
                          interpolation=cv2.INTER_CUBIC)
    cv2.imshow('gabor kernel (resized)', g_kernel)
    cv2.waitKey(0)
    cv2.destroyAllWindows()
def feature_extraction(img):
    df = pd.DataFrame()

    #All features generated must match the way features are generated for TRAINING.
    #Feature1 is our original image pixels
    img2 = img.reshape(-1)
    df['Original Image'] = img2

    #Generate Gabor features
    num = 1
    kernels = []
    for theta in range(2):
        theta = theta / 4. * np.pi
        for sigma in (1, 3):
            for lamda in np.arange(0, np.pi, np.pi / 4):
                for gamma in (0.05, 0.5):
                    #               print(theta, sigma, , lamda, frequency)

                    gabor_label = 'Gabor' + str(num)
                    #                    print(gabor_label)
                    ksize = 9
                    kernel = cv2.getGaborKernel((ksize, ksize),
                                                sigma,
                                                theta,
                                                lamda,
                                                gamma,
                                                0,
                                                ktype=cv2.CV_32F)
                    kernels.append(kernel)
                    #Now filter image and add values to new column
                    fimg = cv2.filter2D(img2, cv2.CV_8UC3, kernel)
                    filtered_img = fimg.reshape(-1)
                    df[gabor_label] = filtered_img  #Modify this to add new column for each gabor
                    num += 1
########################################
#Geerate OTHER FEATURES and add them to the data frame
#Feature 3 is canny edge
    edges = cv2.Canny(img, 100, 200)  #Image, min and max values
    edges1 = edges.reshape(-1)
    df['Canny Edge'] = edges1  #Add column to original dataframe

    from skimage.filters import roberts, sobel, scharr, prewitt

    #Feature 4 is Roberts edge
    edge_roberts = roberts(img)
    edge_roberts1 = edge_roberts.reshape(-1)
    df['Roberts'] = edge_roberts1

    #Feature 5 is Sobel
    edge_sobel = sobel(img)
    edge_sobel1 = edge_sobel.reshape(-1)
    df['Sobel'] = edge_sobel1

    #Feature 6 is Scharr
    edge_scharr = scharr(img)
    edge_scharr1 = edge_scharr.reshape(-1)
    df['Scharr'] = edge_scharr1

    #Feature 7 is Prewitt
    edge_prewitt = prewitt(img)
    edge_prewitt1 = edge_prewitt.reshape(-1)
    df['Prewitt'] = edge_prewitt1

    #Feature 8 is Gaussian with sigma=3
    from scipy import ndimage as nd
    gaussian_img = nd.gaussian_filter(img, sigma=3)
    gaussian_img1 = gaussian_img.reshape(-1)
    df['Gaussian s3'] = gaussian_img1

    #Feature 9 is Gaussian with sigma=7
    gaussian_img2 = nd.gaussian_filter(img, sigma=7)
    gaussian_img3 = gaussian_img2.reshape(-1)
    df['Gaussian s7'] = gaussian_img3

    #Feature 10 is Median with sigma=3
    median_img = nd.median_filter(img, size=3)
    median_img1 = median_img.reshape(-1)
    df['Median s3'] = median_img1

    #Feature 11 is Variance with size=3
    variance_img = nd.generic_filter(img, np.var, size=3)
    variance_img1 = variance_img.reshape(-1)
    df['Variance s3'] = variance_img1  #Add column to original dataframe

    return df
Пример #41
0
import cv2
import numpy as np


# Grayscale
def BGR2GRAY(img):
    # Grayscale
    gray = 0.2126 * img[..., 2] + 0.7152 * img[..., 1] + 0.0722 * img[..., 0]
    return gray


gabor_0 = cv2.getGaborKernel((11, 11), 1.5, np.radians(0), 3, 1.2, 0,
                             cv2.CV_64F)
gabor_45 = cv2.getGaborKernel((11, 11), 1.5, np.radians(45), 3, 1.2, 0,
                              cv2.CV_64F)
gabor_90 = cv2.getGaborKernel((11, 11), 1.5, np.radians(90), 3, 1.2, 0,
                              cv2.CV_64F)
gabor_135 = cv2.getGaborKernel((11, 11), 1.5, np.radians(135), 3, 1.2, 0,
                               cv2.CV_64F)

img = cv2.imread("./image_71_80/imori.jpg").astype(np.float)

H, W, C = img.shape
img_gray = BGR2GRAY(img)
img_gray = np.pad(img_gray, (11 // 2, 11 // 2), 'edge')

out0 = np.zeros((H, W), dtype=np.float32)
out45 = np.zeros((H, W), dtype=np.float32)
out90 = np.zeros((H, W), dtype=np.float32)
out135 = np.zeros((H, W), dtype=np.float32)
Пример #42
0
	
	return output

i = 1

while i > 0:
	'''
	# argumento de imagem
	ap = argparse.ArgumentParser()
	ap.add_argument("-i", "--image", required=True,
		help="path to the input image")
	args = vars(ap.parse_args())
	'''

	# kernels
	gabor = cv2.getGaborKernel((21,21), 5, 1, 10, 1, 0, cv2.CV_32F)
	mauricio = cv2.getGaborKernel((21,21), 15, 1, 10, 1, 0, cv2.CV_32F)
	marcelo = cv2.getGaborKernel((21,21), 25, 2, 20, 1, 0, cv2.CV_32F)
	tamires = cv2.getGaborKernel((21,21), 35, 2, 15, 1, 0, cv2.CV_32F)
	ricardo = cv2.getGaborKernel((21,21), 8, 1, 4, 3, 0, cv2.CV_32F)
	gilmar = cv2.getGaborKernel((21,21), 3, 3, 6, 3, 0, cv2.CV_32F)

	# construir o banco de kernel, uma lista de kernels que vamos
	# aplicar usando a função `convole` e a Função filter2D do OpenCV

	kernelBank = (
        ("maricio", mauricio),
        ("ricardo", ricardo),
        ("tamires", tamires),
        ("marcelo", marcelo),
        ("gilmar", gilmar)        
Пример #43
0
def finger_function():
    g_kernel = cv.getGaborKernel((21, 21),
                                 8.0,
                                 np.pi / 4,
                                 10.0,
                                 0.5,
                                 0,
                                 ktype=cv.CV_32F)
    # Wczytanie danych
    X_data = []
    files = glob.glob('-/Projekt/newDB/Fingerprints/fp_1/*.tif')
    for myFile in files:
        print(myFile)
        image = cv.imread(myFile, 0)
        crop_img = image[120:220, 75:175]
        filtered_img = cv.filter2D(crop_img, cv.CV_8UC3, g_kernel)
        X_data.append(filtered_img)
        #print('X_data shape:', np.array(X_data).shape)

    #print(X_data)

    Xt_data = []
    files = glob.glob('-Projekt/newDBT/Fingerprints/fp_1/*.tif')
    for myFile in files:
        print(myFile)
        image = cv.imread(myFile, 0)
        crop_img = image[120:220, 75:175]
        filtered_img = cv.filter2D(crop_img, cv.CV_8UC3, g_kernel)
        Xt_data.append(filtered_img)
        #print('X_data shape:', np.array(Xt_data).shape)

    #print(Xt_data)

    Xs_data = []
    files = glob.glob('-Projekt/checkphoto/*.tif')
    for myFile in files:
        print(myFile)
        image = cv.imread(myFile, 0)
        crop_img = image[120:220, 75:175]
        filtered_img = cv.filter2D(crop_img, cv.CV_8UC3, g_kernel)
        Xs_data.append(filtered_img)
        #print('X_data shape:', np.array(Xs_data).shape)

    #print(Xs_data)

    Xp_data = []
    files = glob.glob('-Projekt/newDBP/Fingerprints/*.tif')
    for myFile in files:
        print(myFile)
        image = cv.imread(myFile, 0)
        crop_img = image[120:220, 75:175]
        filtered_img = cv.filter2D(crop_img, cv.CV_8UC3, g_kernel)
        Xp_data.append(filtered_img)
        #print('X_data shape:', np.array(Xp_data).shape)

    #print(Xp_data)
    # Dostosowanie formatu tablic
    X_data = np.array(X_data, dtype=np.float32)
    Xt_data = np.array(Xt_data, dtype=np.float32)
    Xs_data = np.array(Xs_data, dtype=np.float32)
    Xp_data = np.array(Xp_data, dtype=np.float32)

    X_data = X_data.astype('float32') / 255.
    Xt_data = Xt_data.astype('float32') / 255.
    Xs_data = Xs_data.astype('float32') / 255.
    Xp_data = Xp_data.astype('float32') / 255.

    X_data = np.reshape(X_data, (len(X_data), 100, 100, 1))
    Xt_data = np.reshape(Xt_data, (len(Xt_data), 100, 100, 1))
    Xs_data = np.reshape(Xs_data, (len(Xs_data), 100, 100, 1))
    Xp_data = np.reshape(Xp_data, (len(Xp_data), 100, 100, 1))

    my_file = Path('-/Projekt/my_model.h5')
    if my_file.exists():
        autoencoder = load_model('my_model.h5')
        print("MODEL WCZYTANY")
    else:
        # Tworzenie modelu
        input_img = Input(shape=(100, 100, 1))
        x = Conv2D(16, (3, 3), activation='relu', padding='same')(input_img)
        x = MaxPooling2D((2, 2), padding='same')(x)
        x = Conv2D(8, (3, 3), activation='relu', padding='same')(x)
        x = MaxPooling2D((2, 2), padding='same')(x)
        x = Conv2D(8, (3, 3), activation='relu', padding='same')(x)
        encoded = MaxPooling2D((2, 2), padding='same')(x)

        x = Conv2D(8, (3, 3), activation='relu', padding='same')(encoded)
        x = UpSampling2D((2, 2))(x)
        x = Conv2D(8, (3, 3), activation='relu', padding='same')(x)
        x = UpSampling2D((2, 2))(x)
        x = Conv2D(16, (3, 3), activation='relu')(x)
        x = UpSampling2D((2, 2))(x)
        decoded = Conv2D(1, (3, 3), activation='sigmoid', padding='same')(x)

        autoencoder = Model(input_img, decoded)
        autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')

        autoencoder = Model(input_img, decoded)
        autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')
        # uczenie modelu
        autoencoder.fit(X_data,
                        X_data,
                        epochs=10,
                        batch_size=250,
                        shuffle=True,
                        validation_data=(Xt_data, Xt_data),
                        callbacks=[TensorBoard(log_dir='/tmp/autoencoder')])

    # przetworzenie danych przez autoencoder
    #encoded_imgs = autoencoder.predict(Xt_data)
    encoded_s = autoencoder.predict(Xs_data)
    encoded_p = autoencoder.predict(Xp_data)
    print("ok")
    from scipy import signal
    k = 0
    s = 1
    m = 10
    for j in range(s):
        for l in range(m):

            img = encoded_s[j].reshape(100, 100)
            img2 = encoded_p[l].reshape(100, 100)
            cor = signal.correlate2d(img, img, mode="valid")
            cor1 = signal.correlate2d(img2, img2, mode="valid")
            if cor == cor1:
                k = k + 1
            else:
                k = k
    return k

    # Zapisanie modelu
    autoencoder.save('my_model.h5')
Пример #44
0
 def __init__(self, sigma, theta, lambd, gamma, psi):
     sz = sigma2sz(sigma)
     kernel = cv2.getGaborKernel((sz, sz), sigma, theta, lambd, gamma, psi)
     super().__init__(_Filter2D, kernel)
Пример #45
0
from matplotlib import pyplot as plt
import cv2
import csv
import numpy as np
from random import randrange, uniform
import pandas as pd

IMAGEPATH = "C:\\Python27\\YOLOandGabor\\Data\\Source_Images\\Training_Images_Kaggle\\Resize_Images"
CSVPATH = "C:\\Python27\\YOLOandGabor\\Data\\Source_Images\\Training_Images_Kaggle\\Resize_Images\\Annotation-export-resize.csv"
target_csv = 'C:\\Python27\\YOLOandGabor\\Data\\Source_Images\\Training_Images_Kaggle\\Filtered_Image\\Annotation-export-filtered.csv'
target = 'C:\\Python27\\YOLOandGabor\\Data\\Source_Images\\Training_Images_Kaggle\\Filtered_Image'

num = 7
kernels = []
for i in range(num * num):
    kernels.append(cv2.getGaborKernel((100, 100), 4, randrange(0,180, 45), randrange(10, 20), uniform(.2, .9), 0, ktype=cv2.CV_32F))

# Credits: https://stackoverflow.com/a/42579291/8791891
def convolution2d(image, kernel, bias):
    m, n = kernel.shape
    if (m == n):
        y, x = image.shape
        y = y - m + 1
        x = x - m + 1
        new_image = np.zeros((y, x))
        for i in range(y):
            for j in range(x):
                new_image[i][j] = np.sum(image[i:i+m, j:j+m]*kernel) + bias
    return new_image

fig, ax = plt.subplots(num, num, sharex=True, sharey=True, figsize=[16, 16])
Пример #46
0
 def __conv_gabor(self, src, theta):
     # Gabor是一个用于边缘提取的线性滤波器
     # 其频率和方向表达与人类视觉系统类似
     # 能够提供良好的方向选择和尺度选择特性,而且对于光照变化不敏感,因此十分适合纹理分析。
     kernel = cv.getGaborKernel((8, 8), 4, theta, 8, 1)
     return cv.filter2D(src, cv.CV_32F, kernel)
Пример #47
0
    res = cv2.filter2D(img, cv2.CV_8UC4, filters)  # 2D滤波函数 kern为滤波模板
    return res


def getMatchNum(matches, ratio):
    '''返回特征点匹配数量和匹配掩码'''
    matchesMask = [[0, 0] for i in range(len(matches))]
    matchNum = 0
    for i, (m, n) in enumerate(matches):
        if m.distance < ratio * n.distance:  # 将距离比率小于ratio的匹配点筛选出来
            matchesMask[i] = [1, 0]
            matchNum += 1
    return (matchNum, matchesMask)


filters = cv2.getGaborKernel((11, 11), 11, 2.670353755551324, 11, 1.94,
                             -1.5707963267948966)

# 读取文件
imgfiles = []
for root, dirs, files in os.walk("./img"):
    imgfiles = files

# 创建sift及flann初始化
# 创建SIFT特征提取器
sift = cv2.xfeatures2d.SIFT_create()
# 创建FLANN匹配对象
FLANN_INDEX_KDTREE = 0
indexParams = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
searchParams = dict(checks=50)
flann = cv2.FlannBasedMatcher(indexParams, searchParams)
Пример #48
0
# ## Gabor Kernel at different thetha

# In[5]:

print(complexcell(rec, plot=False))  #square image given as input
print(complexcell(tri, plot=True))  #triangle image given as input
plt.savefig('GaborResponse.png', bbox_inches='tight')

# ### Sample response for rectangle.

# In[6]:

gabor_kernel = cv2.getGaborKernel(ksize=(11, 11),
                                  sigma=2,
                                  theta=np.pi / 2,
                                  lambd=4,
                                  gamma=2,
                                  psi=0)
rec = color.rgb2gray(rec)
filtered = ndi.convolve(rec, gabor_kernel, mode='wrap')
filtered[filtered < 0.85] = 0
fig, ax = plt.subplots(1, 2, figsize=(8, 5))
ax[0].imshow(filtered, cmap='gray')
ax[0].set_title("At 90 degree")
gabor_kernel = cv2.getGaborKernel(ksize=(11, 11),
                                  sigma=2,
                                  theta=0,
                                  lambd=4,
                                  gamma=2,
                                  psi=0)
rec = color.rgb2gray(rec)
Пример #49
0
import matplotlib.pyplot as plt
from oct2py import octave as oc

cascPath = "haar/haarcascade_frontalface_alt.xml"
faceCascade = cv2.CascadeClassifier(cascPath)

sdgaussian = 3.14
theta = 0
spatialasp = 1

kernels = np.zeros((31, 31))
for theta in range(0, 181, 25):  # six orientation
    for lambd in [2, 3, 4]:  # three wavelegth of sinusoidal factor
        kernels += cv2.getGaborKernel((31, 31),
                                      sdgaussian,
                                      theta * 3.14 / 180,
                                      lambd,
                                      spatialasp,
                                      psi=0)
'''
for i in range(18):
	plt1 = plt.gca()
	plt1.axes.get_xaxis().set_ticks([])
	plt1.axes.get_yaxis().set_ticks([])
	plt.subplot(6,3,i+1)
	plt.imshow(kernels[i],'gray')

plt.show()
'''
image = "image.tiff"
image = cv2.imread(image, 0)
image = cv2.equalizeHist(image)
 def encode_features(self,image):
     g_kernel = cv2.getGaborKernel((27, 27), 8.0, np.pi/4, 10.0, 0.5, 0, ktype=cv2.CV_32F)
     filtered_img = cv2.filter2D(image, cv2.CV_8UC3, g_kernel)
     h, w = g_kernel.shape[:2]
     g_kernel = cv2.resize(g_kernel, (3*w, 3*h), interpolation=cv2.INTER_CUBIC)
     return filtered_img
Пример #51
0
import cv2
import numpy as np
img = cv2.imread(
    '/home/grim/learning_image_classification/code/cracks/327.jpg', 0)
cv2.imshow("normal", img)
cv2.waitKey(0)
cv2.destroyAllWindows()

import cv2
import numpy as np
img = cv2.imread(
    '/home/grim/learning_image_classification/code/cracks/327.jpg', 0)
g_kernel = cv2.getGaborKernel((30, 30),
                              5.6,
                              np.pi / 4,
                              19,
                              -20,
                              0,
                              ktype=cv2.CV_32F)
filtered_img = cv2.filter2D(img, -1, g_kernel)
cv2.imshow("filtered image", filtered_img)
cv2.waitKey(0)
cv2.destroyAllWindows()
Пример #52
0
# filter window. if its low the filter will be small but more numerous windows
# will be used, andif its higher the window will be bigger
lambd = 6.0

nr, nc, nl = img.shape

#lets show 9 cases changing theta and sigma
for sig in sigmas:
    stack_toshow = np.zeros((1, nc * 3, 3), np.uint8)

    for ang in thetas:
        #creating the filter
        gabor = cv2.getGaborKernel((30, 30),
                                   sig,
                                   ang,
                                   lambd,
                                   0.5,
                                   0,
                                   ktype=cv2.CV_32F)

        #creating an image to show the filter visualy
        gabor_toshow = cv2.resize(gabor, (nc, nr),
                                  interpolation=cv2.INTER_AREA)
        gabor_toshow = np.around(
            np.abs(gabor_toshow) * 255 / (np.max(gabor_toshow))).astype(
                np.uint8)
        gabor_toshow = cv2.merge([gabor_toshow, gabor_toshow, gabor_toshow])

        ##applying the filter
        img_filt = cv2.filter2D(img, cv2.CV_8UC3, gabor)
]  #Create empty list to hold all kernels that we will generate in a loop
for theta in range(
        2):  #Define number of thetas. Here only 2 theta values 0 and 1/4 . pi
    theta = theta / 4. * np.pi
    for sigma in (1, 3):  #Sigma with values of 1 and 3
        for lamda in np.arange(0, np.pi, np.pi / 4):  #Range of wavelengths
            for gamma in (0.05, 0.5):  #Gamma values of 0.05 and 0.5

                gabor_label = 'Gabor' + str(
                    num)  #Label Gabor columns as Gabor1, Gabor2, etc.
                #                print(gabor_label)
                ksize = 9
                kernel = cv2.getGaborKernel((ksize, ksize),
                                            sigma,
                                            theta,
                                            lamda,
                                            gamma,
                                            0,
                                            ktype=cv2.CV_32F)
                kernels.append(kernel)
                #Now filter the image and add values to a new column
                fimg = cv2.filter2D(img2, cv2.CV_8UC3, kernel)
                filtered_img = fimg.reshape(-1)
                df[gabor_label] = filtered_img  #Labels columns as Gabor1, Gabor2, etc.
                print(gabor_label, ': theta=', theta, ': sigma=', sigma,
                      ': lamda=', lamda, ': gamma=', gamma)
                num += 1  #Increment for gabor column label

print(df.head())

df.to_csv("Gabor.csv")
def create_mask_feature_extraction(img, labeled_img=None):
    df = pd.DataFrame()

    i = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
    img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    img2 = i[:, :, 0]
    img2 = img2.reshape((-1))

    df['Original Image'] = img2

    num = 1  #To count numbers up in order to give Gabor features a lable in the data frame
    kernels = []
    for theta in range(2):  #Define number of thetas
        theta = theta / 4. * np.pi
        for sigma in (1, 3):  #Sigma with 1 and 3
            for lamda in np.arange(0, np.pi, np.pi / 4):  #Range of wavelengths
                for gamma in (0.05, 0.5):  #Gamma values of 0.05 and 0.5
                    gabor_label = 'Gabor' + str(
                        num)  #Label Gabor columns as Gabor1, Gabor2, etc.
                    ksize = 9
                    kernel = cv2.getGaborKernel((ksize, ksize),
                                                sigma,
                                                theta,
                                                lamda,
                                                gamma,
                                                0,
                                                ktype=cv2.CV_32F)
                    kernels.append(kernel)
                    #Now filter the image and add values to a new column
                    fimg = cv2.filter2D(img2, cv2.CV_8UC3, kernel)
                    filtered_img = fimg.reshape(-1)
                    df[gabor_label] = filtered_img
                    num += 1  #Increment for gabor column label

    #CANNY EDGE
    edges = cv2.Canny(img, 100, 200)  #Image, min and max values
    edges1 = edges.reshape(-1)
    df['Canny Edge'] = edges1  #Add column to original dataframe

    #ROBERTS EDGE
    edge_roberts = roberts(img)
    edge_roberts1 = edge_roberts.reshape(-1)
    df['Roberts'] = edge_roberts1

    #SOBEL
    edge_sobel = sobel(img)
    edge_sobel1 = edge_sobel.reshape(-1)
    df['Sobel'] = edge_sobel1

    #SCHARR
    edge_scharr = scharr(img)
    edge_scharr1 = edge_scharr.reshape(-1)
    df['Scharr'] = edge_scharr1

    #PREWITT
    edge_prewitt = prewitt(img)
    edge_prewitt1 = edge_prewitt.reshape(-1)
    df['Prewitt'] = edge_prewitt1

    #GAUSSIAN with sigma=3
    from scipy import ndimage as nd
    gaussian_img = nd.gaussian_filter(img, sigma=3)
    gaussian_img1 = gaussian_img.reshape(-1)
    df['Gaussian s3'] = gaussian_img1

    #GAUSSIAN with sigma=7
    gaussian_img2 = nd.gaussian_filter(img, sigma=7)
    gaussian_img3 = gaussian_img2.reshape(-1)
    df['Gaussian s7'] = gaussian_img3

    #MEDIAN with sigma=3
    median_img = nd.median_filter(img, size=3)
    median_img1 = median_img.reshape(-1)
    df['Median s3'] = median_img1

    #VARIANCE with size=3
    variance_img = nd.generic_filter(img, np.var, size=3)
    variance_img1 = variance_img.reshape(-1)
    df['Variance s3'] = variance_img1  #Add column to original dataframe

    if labeled_img is not None:
        labeled_img = cv2.cvtColor(labeled_img, cv2.COLOR_BGR2GRAY)
        labeled_img1 = labeled_img.reshape(-1)
        df['Labels'] = labeled_img1

    return df
Пример #55
0
from bindsnet.learning import PostPre, WeightDependentPostPre
from bindsnet.evaluation import assign_labels, all_activity
from sklearn.metrics import classification_report

from dataset import Dataset

SUBJECTS = ['soccer_ball', 'butterfly', 'revolver', 'garfield']
# SUBJECTS = ['soccer_ball', 'revolver']

GAMMA = 0.5
FILTER_TYPES = 4
THETA = [math.pi * i / FILTER_TYPES for i in range(FILTER_TYPES)]
FILTER_SIZES = [5, 11, 19]
IMAGE_SIZE = 35
KERNELS = [
    cv2.getGaborKernel((size, size), size / 3, theta, size / 2, GAMMA)
    for theta in THETA for size in FILTER_SIZES
]
# KERNELS = [cv2.getGaussianKernel(size, size / 9) - cv2.getGaussianKernel(size, size / 4.5)
#            for size in FILTER_SIZES]
FILTERS = [lambda x: cv2.filter2D(x, -1, kernel) for kernel in KERNELS]

FEATURES = range(12)
RUN_TIME = 40
TRAINED_NETWORK_PATH = 'trained_network.pt'
EPOCHS = 2


def get_s1_name(size):
    return 'S1_%d' % size
Пример #56
0
 def __init__(self, size, orientation, frequency):
     self.gaborFilter = cv2.getGaborKernel(
         (size, size), 3, orientation * np.pi / 180, frequency, 4, 0,
         cv2.CV_32F)
    label_malignant.append(1)  #标签

target_y = np.concatenate([label_benign, label_malignant], axis=0)  # 放在一起
# ======##=====##========####=============##===========####======##=============####=
#####Gabor 特征
gabor_list1 = []
gabor_list2 = []
for i in np.arange(367):
    image = mpimg.imread('/Users/huafenguo/Desktop/良性/40X/' + str(i) + '.png')
    gabor_list1.append(image)
    image = np.array(gabor_list1)

    kern = cv2.getGaborKernel((31, 31),
                              3.85,
                              np.pi / 4,
                              8.0,
                              1.0,
                              0,
                              ktype=cv2.CV_32F)
    image = cv2.filter2D(image, cv2.CV_8UC3, kern)

    img1 = image.astype('float64')
for i in np.arange(358):
    image = mpimg.imread('/Users/huafenguo/Desktop/恶性/40X/' + str(i) + '.png')
    gabor_list2.append(image)
    image = np.array(gabor_list2)

    kern = cv2.getGaborKernel((31, 31),
                              3.85,
                              np.pi / 4,
                              8.0,
Пример #58
0
def test_gabor_cv2(im,
                   size=9,
                   stdv=1,
                   angle=None,
                   wave_length=3,
                   eccen=1,
                   phase_off=0,
                   plot=True,
                   smooth=True,
                   interp='none'):
    """ 
    Process image with gabor filter bank of specified orientation or derived from
    image positive values bounding box
    
    This is the open cv based one
    
    Parameters
    ----------
    
    inRas: string
                  input raster

    size: int
           size of in gabor kernel in pixels (ksize)
        
    stdv: int
           stdv / of of gabor kernel (sigma/stdv)
           
        
    angles: int
           number of angles  in gabor kernel (theta)

    wave_length: int
           width of stripe in gabor kernel (lambda/wavelength)
           optional best to leave none and hence same as size
        
    phase_off: int
           the phase offset of the kernel      
           
    eccen: int
          the elipticity of the kernel when = 1 the gaussian envelope is circular (gamma)

    """

    # ksize - size of gabor filter (n, n)
    # sigma - standard deviation of the gaussian function
    # theta - orientation of the normal to the parallel stripes
    # lambda - wavelength of the sunusoidal factor wave_length
    # gamma - spatial aspect ratio
    # psi - phase offset
    # ktype - type and range of values that each pixel in the gabor kernel can hold

    def deginrad(degree):
        radiant = 2 * np.pi / 360 * degree
        return radiant

    if hasattr(im, 'shape'):
        img = im
    else:

        img = rgb2gray(io.imread(im))

    if smooth == True:

        img = gaussian_filter(img, 1)

    #TODO add a polygon argument to make it easier....
    if angle == None:
        # here we use the orientation to get the line of crops assuming the user has
        # cropped it well
        bw = img > 0
        props = regionprops(bw * 1)
        orient = props[0]['Orientation']
        angle = 90 - np.degrees(orient)

    if wave_length == None:
        wave_length = 3

#    if width2 == None:
#        width2 = width
#
    theta = deginrad(
        angle)  # unit circle: left: -90 deg, right: 90 deg, straight: 0 deg
    g_kernel = cv2.getGaborKernel((size, size),
                                  stdv,
                                  theta,
                                  wave_length,
                                  eccen,
                                  phase_off,
                                  ktype=cv2.CV_32F)
    filtered_img = cv2.filter2D(img, cv2.CV_8UC3, g_kernel)

    theta2 = deginrad(angle + 90)
    g_kernel2 = cv2.getGaborKernel((size, size),
                                   stdv,
                                   theta2,
                                   wave_length,
                                   eccen,
                                   phase_off,
                                   ktype=cv2.CV_32F)
    filtered_img2 = cv2.filter2D(img, cv2.CV_8UC3, g_kernel2)

    if plot == True:
        fig = plt.figure()
        fig.add_subplot(1, 4, 1)
        plt.imshow(img)
        fig.add_subplot(1, 4, 2)
        plt.imshow(filtered_img)
        fig.add_subplot(1, 4, 3)
        plt.imshow(filtered_img2)
        fig.add_subplot(1, 4, 4)
        plt.imshow(g_kernel, interpolation=interp)

    #h, w = g_kernel.shape[:2]
    #g_kernel = cv2.resize(g_kernel, (3*w, 3*h), interpolation=cv2.INTER_CUBIC)
    #cv2.imshow('gabor kernel (resized)', g_kernel)


#    filtered_img[img==0]=0
#    filtered_img2[img==0]=0

    return filtered_img, filtered_img2
Пример #59
0
frequencies = numpy.geomspace(min_sf, max_sf, num=n_sfs)
orientations = numpy.arange(n_oris) * (pi / n_oris)
ori_freq_idx = list(itertools.product(range(n_oris), range(n_sfs)))

gain = numpy.full([n_oris, n_sfs, size, size], numpy.nan)
for o, f in tqdm.tqdm(ori_freq_idx, desc='kernel gain'):
    wavelength = size / frequencies[f]
    gaussian_std = wavelength * bandwidth_constant
    kside = 1 + 2 * int(ceil(kernel_extent * gaussian_std))
    kernel_params = dict(ksize=(kside, kside),
                         sigma=gaussian_std,
                         theta=orientations[o],
                         lambd=wavelength,
                         gamma=gamma,
                         ktype=cv2.CV_32F)
    kernel_real = cv2.getGaborKernel(psi=0, **kernel_params)
    kernel_imag = cv2.getGaborKernel(psi=pi / 2, **kernel_params)
    filt_real = cv2.filter2D(image, -1, kernel_real)
    filt_imag = cv2.filter2D(image, -1, kernel_imag)
    gain[o, f, :, :] = numpy.abs(filt_real + 1j * filt_imag)

features = []
for o, f in tqdm.tqdm(ori_freq_idx, desc='local selection'):
    wavelength = size / frequencies[f]
    gaussian_std = wavelength * bandwidth_constant

    ## a) pixels above threshold for this kernel
    kernel_pix_ranks = rankdata(gain[o, f, :, :]).reshape([size, size])
    pix_rank_cutoff = (1 - pix_fraction) * (size**2)
    kernel_top_pix = kernel_pix_ranks > pix_rank_cutoff
    best_ori = gain[:, f, :, :].argmax(axis=0)
Пример #60
0
def extract_gabor_features(
        filename,
        resize=255,
        window_sizes=[64, 32, 16, 8, 4],
        batch_size=100,
        gabor_sigma=1.0,
        gabor_lambda=0.25,
        gabor_gamma=0.02,
        gabor_psi=0,
        gabor_thetas=[np.pi / 4, np.pi / 2, 3 * np.pi / 4, np.pi]):
    """
    Briefly, calculates the means and standard deviations of a random sampling of
    windows convolved with several Gabor filters.

    Windows of sizes `window_sizes` are centered on a random sampling of pixels of
    the image located at `filename` resized to `resize`x`resize` pixels. A quarter
    of the area of the image is sampled.

    The final output are the means and standard deviation of the
    resultant convolutions of the windows against `n` Gabor filters, where
    `n` is the number of elements in `gabor_thetas`. The Gabor filters are
    are generated according to the `gabor_` prefixed arguments.

    The means and standard deviation are saved into a HDF5 file with the filename
    "gabor_{image_name}_s{sigma}_l{lambda}_g{gamma}_p{psi}_{time}.h5" where

    {image_name} is the filename of the image
    {sigma} is the sigma argument of the gabor function
    {lambda} is the lambda argument of the gabor function
    {gamma} is the gamma argument of the gabor function
    {psi} is the psi argument of the gabor function
    {time} is the unix time stamp

    If a compliant GPU is detected, convolutions will be computed on them.
    Computations will fall back on CPU in other cases.

    Convolutions are computed in batches of size `batch_size`. In the event of a
    memory error, this number should be reduced.

    :param filename: The path to the image whose gabor energies are to be calculated.
    :param resize: Image at `filename` will be resized to `resize`x`resize` pixels
                    prior to computation. Image size is positively correlated to
                    computation time.
    :param window_sizes: The sizes of the sliding windows to be extracted at each
                            pixel.
    :param batch_size:  The number of windows to convolve at a time. Reduce this
                            number in the event of memory errors; increase this
                            number in the event of slow computation time.
    :param gabor_sigma: The standard-deviation of the gabor function.
    :param gabor_lambda: The frequency of the gabor function.
    :param gabor_gamma: The bandwidth of the gabor function.
    :param gabor_psi: The phase offset of the gabor function.
    :param gabor_thetas: The orientation of the filter, in radians.
    :return: The name of the H5 file that the results are saed to.
    """
    image_name = os.path.basename(filename)

    hd5_filename = "gabor_{}_s{}_l{}_g{}_p{}_{}.h5".format(
        image_name, gabor_sigma, gabor_lambda, gabor_gamma, gabor_psi,
        int(time.time()))

    gabor_feature_tables = get_HDF5_table(hd5_filename, window_sizes,
                                          len(gabor_thetas))

    im = Image.open(filename).convert('L')

    im = np.array(im)

    if resize:
        if resize > 1:
            im = skimage.transform.resize(im, (resize, resize))
        else:
            im = skimage.transform.resize(
                im, (im.shape[0] * resize, im.shape[1] * resize))

    num_windows = int(math.pow(im.shape[0], 2) // 4)

    batches = get_windows_batches(num_windows, window_sizes, im, batch_size)

    total_batches = round(len(next(batches)) / 100)
    total_time = 0
    batch_counter = 0

    image_tensor = T.tensor4()
    filter_tensor = T.tensor4()

    for i, batch in enumerate(batches):
        start = time.time()

        coord, batch = batch

        win_shape = batch[0, 0, :, :].shape
        num_batches = len(batch)

        batch = batch.astype(np.float32)

        kerns = []

        for gabor_theta in gabor_thetas:
            kern = cv2.getGaborKernel(win_shape,
                                      gabor_sigma,
                                      gabor_theta,
                                      gabor_lambda,
                                      gabor_gamma,
                                      gabor_psi,
                                      ktype=cv2.CV_32F)
            kerns.append(kern)

        means = []
        stds = []
        for kern in kerns:
            kern_shape = np.array(kern).shape

            kern_4d = np.array(kern).reshape(1, 1, kern_shape[0],
                                             kern_shape[1]).astype(np.float32)

            theano_convolve2d = theano.function(
                [image_tensor, filter_tensor],
                T.nnet.conv2d(image_tensor,
                              filter_tensor,
                              input_shape=(num_batches, 1, win_shape[0],
                                           win_shape[1]),
                              filter_shape=(num_batches, 1, kern_shape[0],
                                            kern_shape[1]),
                              border_mode=(win_shape[0], win_shape[1])))

            theano_convolved = theano_convolve2d(batch, kern_4d)

            output_shape = theano_convolved.shape

            conv_flat = theano_convolved.reshape(
                num_batches, output_shape[2] * output_shape[3])

            conv_mean = conv_flat.mean(axis=1)
            means.append(conv_mean)

            conv_std = conv_flat.std(axis=1)
            stds.append(conv_std)

        end = time.time() - start

        total_time += end

        batch_counter += 1

        avg_time = total_time / (batch_counter)

        print("{:5}/{:5} batches complete [ETA {:3.2f}m], ~{:3.2f}s per batch".
              format(batch_counter, total_batches,
                     (avg_time * (total_batches - batch_counter)) / 60,
                     avg_time))

        # h5 start
        zipped_means = zip(*means)
        zipped_stds = zip(*stds)

        feature_table = gabor_feature_tables[win_shape[0]]
        feature_table_row = feature_table.row

        for i, mean in enumerate(zipped_means):
            feature_table_row['win_size'] = win_shape[0]
            feature_table_row['coord'] = coord[i]
            feature_table_row['means'] = mean
            feature_table_row['stds'] = next(zipped_stds)
            feature_table_row.append()

        feature_table.flush()
        # h5 end

    return hd5_filename