Exemple #1
0
    def fresnelConvolutionTransform(self,d) :
        # make intensity distribution
        i2 = Intensity2D(self.nx,self.startx,self.endx,
                         self.ny,self.starty,self.endy,
                         self.wl)       

        # FT on inital distribution 
        u1ft = pl.fft2(self.i)

        # 2d convolution kernel
        k = 2*pl.pi/i2.wl
        
        # make spatial frequency matrix
        maxsfx = 2*pl.pi/self.dx
        maxsfy = 2*pl.pi/self.dy
        
        dsfx = 2*maxsfx/(self.nx)
        dsfy = 2*maxsfy/(self.ny)
        
        self.sfx = pl.arange(-maxsfx/2,maxsfx/2+1e-15,dsfx/2)
        self.sfy = pl.arange(-maxsfy/2,maxsfy/2+1e-15,dsfy/2)

        [self.sfxgrid, self.sfygrid] = pl.fftshift(pl.meshgrid(self.sfx,self.sfy))
                
        # make convolution kernel 
        kern = pl.exp(1j*d*(self.sfxgrid**2+self.sfygrid**2)/(2*k))
        
        # apply convolution kernel and invert
        i2.i = pl.ifft2(kern*u1ft) 

        return i2
def dense_gauss_kernel(sigma, x, y=None):
    xf = pylab.fft2(x)  # x in Fourier domain
    x_flat = x.flatten()
    xx = pylab.dot(x_flat.transpose(), x_flat)  # squared norm of x

    if y is not None:
        yf = pylab.fft2(y)
        y_flat = y.flatten()
        yy = pylab.dot(y_flat.transpose(), y_flat)
    else:
        yf = xf
        yy = xx

    xyf = pylab.multiply(xf, pylab.conj(yf))

    xyf_ifft = pylab.ifft2(xyf)
    row_shift, col_shift = pylab.floor(pylab.array(x.shape) / 2).astype(int)
    xy_complex = pylab.roll(xyf_ifft, row_shift, axis=0)
    xy_complex = pylab.roll(xy_complex, col_shift, axis=1)
    xy = pylab.real(xy_complex)

    scaling = -1 / (sigma**2)
    xx_yy = xx + yy
    xx_yy_2xy = xx_yy - 2 * xy
    k = pylab.exp(scaling * pylab.maximum(0, xx_yy_2xy / x.size))

    return k
Exemple #3
0
    def fresnelConvolutionTransform(self, d):
        # make intensity distribution
        i2 = Intensity2D(self.nx, self.startx, self.endx, self.ny, self.starty,
                         self.endy, self.wl)

        # FT on inital distribution
        u1ft = pl.fft2(self.i)

        # 2d convolution kernel
        k = 2 * pl.pi / i2.wl

        # make spatial frequency matrix
        maxsfx = 2 * pl.pi / self.dx
        maxsfy = 2 * pl.pi / self.dy

        dsfx = 2 * maxsfx / (self.nx)
        dsfy = 2 * maxsfy / (self.ny)

        self.sfx = pl.arange(-maxsfx / 2, maxsfx / 2 + 1e-15, dsfx / 2)
        self.sfy = pl.arange(-maxsfy / 2, maxsfy / 2 + 1e-15, dsfy / 2)

        [self.sfxgrid,
         self.sfygrid] = pl.fftshift(pl.meshgrid(self.sfx, self.sfy))

        # make convolution kernel
        kern = pl.exp(1j * d * (self.sfxgrid**2 + self.sfygrid**2) / (2 * k))

        # apply convolution kernel and invert
        i2.i = pl.ifft2(kern * u1ft)

        return i2
def circular_blur(f, radius):
    n = max(f.shape)
    t = np.concatenate((np.arange(0, n / 2 + 1), np.arange(-n / 2, -1)))
    [Y, X] = np.meshgrid(t, t)
    k = (X**2 + Y**2) <= radius**2
    k = k / np.sum(k)
    return np.real(pylab.ifft2(pylab.fft2(f) * pylab.fft2(k)))
Exemple #5
0
    def update(self, img):
        img_now = ops.read_image(img)
        if img_now.ndim == 3:
            img_now = ops.rgb2gray(img_now)
        x = ops.get_subwindow(img_now, self.pos, self.sz, self.cos_window)
        # print(x)
        k = ops.dense_gauss_kernel(self.sigma, x, self.z)
        kf = pylab.fft2(k)
        alphaf_kf = pylab.multiply(self.alphaf, kf)
        response = pylab.real(pylab.ifft2(alphaf_kf))  # Eq. 9

        # target location is at the maximum response
        r = response
        row, col = pylab.unravel_index(r.argmax(), r.shape)

        self.pos = self.pos - pylab.floor(self.sz / 2) + [row, col]
        x = ops.get_subwindow(img_now, self.pos, self.sz, self.cos_window)
        k = ops.dense_gauss_kernel(self.sigma, x)

        new_alphaf = pylab.divide(self.yf,
                                  (pylab.fft2(k) + self.lambda_value))  # Eq. 7
        new_z = x
        f = self.interpolation_factor
        self.alphaf = (1 - f) * self.alphaf + f * new_alphaf
        self.z = (1 - f) * self.z + f * new_z

        box_new = np.array([
            self.pos[1] - (self.sz[1]) / 2 + 1, self.pos[0] -
            (self.sz[0]) / 2 + 1, self.sz[1], self.sz[0]
        ],
                           dtype=np.float32)
        return box_new
Exemple #6
0
def split_step_fourier(state, V, Wx, Wy, dt):
    """
    This function evolves the state by a time step using the split step Fourier method
    """
    stateNew = sp.exp(-1j * dt * V) * state
    stateNew = pl.fft2(stateNew)
    stateNew = pl.exp(-1j * dt * (Wx ** 2 + Wy ** 2)) * stateNew

    return pl.ifft2(stateNew)
def BlurredLaplacian(f, r):
    k = np.zeros(f.shape)
    k[0, 0] = -4
    k[0, -1] = 1
    k[-1, 0] = 1
    k[0, 1] = 1
    k[1, 0] = 1
    k = gaussian_blur(k, r)
    return np.real(pylab.ifft2(pylab.fft2(f) * pylab.fft2(k)))
Exemple #8
0
def correlation_transform(f, radius):
    n = max(f.shape)
    t = np.concatenate((np.arange(0, n / 2 + 1), np.arange(-n / 2, -1)))
    [Y, X] = np.meshgrid(t, t)
    k = (X**2 + Y**2) <= radius**2
    C = np.sum(k)
    k = k / np.sum(k)
    #imageplot(k)
    #plt.show()
    return np.real(pylab.ifft2(pylab.fft2(f) * pylab.fft2(k))) - f / C
def SpectralSynthesisFM2D(max_level, sigma, H, seed=0, normalise=True, bounds=[0,1]):
  """
  ________________________________________________________________________
  Args:
      max_level : Maximum number of recursions( N = 2^max_level)
      sigma     : Initial standard deviation
      H         : Roughness constant varies form 0.0 to 1.0
      seed      : seed value for random number generator
      normalise : normalizes the data using bound
      bounds    : used for normalization of the grid data
  Result:     
      Output is given in the form of an array(grid) which holds surface
      elevation data for a square region.  
  _________________________________________________________________________
  """	

  N = 2**max_level 
  A = numpy.zeros((N,N), dtype = complex)
  random.seed(seed) #seed the random number generator
  PI = 3.141592
  for i in range(0,N/2):
    for j in range(0,N/2):
      phase = 2*PI*random.random()#/random.randrange(1,Arand)
      if i != 0 or j != 0:
        rad = pow((i*i + j*j),(-(H+1)/2) )*random.gauss(0.0, sigma)
      else:
        rad = 0.0
      
      A[i][j] = rad*math.cos(phase) + rad*math.sin(phase)*j 
      
      if i ==0: 
        i0 = 0
      else:
        i0 = N - i
      
      if j==0:
        j0 = 0
      else:
        j0 = N - j
    
      A[i0][j0] = rad * math.cos(phase) - rad*math.sin(phase)*j
  
  for i in range(1,N/2):
    for j in range(1,N/2):
      phase = 2*PI*random.random()#/random.randrange(1,Arand)
      rad = pow((i*i + j*j),(-(H+1)/2) )*random.gauss(0.0, sigma)
      A[i][N-j] = rad * math.cos(phase) + rad* math.sin(phase)*j
      A[N-i][j] = rad * math.cos(phase) - rad* math.sin(phase)*j
  
  Grid = numpy.real(pylab.ifft2(( A ) ))
  if(normalise):
        Grid += numpy.amin(Grid)*-1 + bounds[0]
        Grid = (Grid/numpy.amax(Grid)) * bounds[1]
  return Grid
def correlation_transform(f,radius):
    # use it to compute an approximate measure of local correlation
    n = max(f.shape);
    t = np.concatenate( (np.arange(0,n/2+1), np.arange(-n/2,-1)) )
    [Y,X] = np.meshgrid(t,t)
    k = (X**2+Y**2)<=radius**2
    C = np.sum(k)
    k = k/np.sum(k)
    #imageplot(k)
    #plt.show()
    return np.real( pylab.ifft2(pylab.fft2(f) * pylab.fft2(k)) ) - f/C
Exemple #11
0
    def update_ret_response(self, new_img):
        '''
        :param new_img: new frame should be normalized, for tracker_status estimating the rect_snr
        :return:
        '''
        self.canvas = new_img.copy()
        self.trackNo += 1

        # get subwindow at current estimated target position, to train classifier
        x = self.get_subwindow(new_img, self.pos, self.window_sz,
                               self.cos_window)
        # calculate response of the classifier at all locations
        k = self.dense_gauss_kernel(self.sigma, x, self.z)
        kf = pylab.fft2(k)
        alphaf_kf = pylab.multiply(self.alphaf, kf)
        response = pylab.real(pylab.ifft2(alphaf_kf))  # Eq. 9

        # target location is at the maximum response
        row, col = pylab.unravel_index(response.argmax(), response.shape)
        # roi rect's topleft point add [row, col]
        self.tly, self.tlx = self.pos - pylab.floor(self.window_sz / 2)

        # here the pos is not given to self.pos at once, we need to check the psr first.
        # if it above the threashhold(default is 5), self.pos = pos.
        pos = np.array([self.tly, self.tlx]) + np.array([row, col])

        # Noting, for pos(cy,cx)! for cv2.rect rect(x,y,w,h)!
        rect = pylab.array([
            pos[1] - self.target_sz[1] / 2, pos[0] - self.target_sz[0] / 2,
            self.target_sz[1], self.target_sz[0]
        ])
        rect = rect.astype(np.int)

        self.psr, self.trkStatus = self.tracker_status(col, row, response,
                                                       rect, new_img)
        self.pos = pos
        #only update when tracker_status's psr is high
        if (self.psr > 10):
            #computing new_alphaf and observed x as z
            x = self.get_subwindow(new_img, self.pos, self.window_sz,
                                   self.cos_window)
            # Kernel Regularized Least-Squares, calculate alphas (in Fourier domain)
            k = self.dense_gauss_kernel(self.sigma, x)
            new_alphaf = pylab.divide(
                self.yf, (pylab.fft2(k) + self.lambda_value))  # Eq. 7
            new_z = x

            # subsequent frames, interpolate model
            f = self.interpolation_factor
            self.alphaf = (1 - f) * self.alphaf + f * new_alphaf
            self.z = (1 - f) * self.z + f * new_z
        ok = 1
        return ok, rect, self.psr, response
def BlurredLaplacian(f, r):
    n = max(f.shape)
    t = np.concatenate((np.arange(0, n / 2 + 1), np.arange(-n / 2, -1)))
    [Y, X] = np.meshgrid(t, t)
    k = np.zeros(Y.shape)
    k[0, 0] = -4
    k[0, -1] = 1
    k[-1, 0] = 1
    k[0, 1] = 1
    k[1, 0] = 1
    k = gaussian_blur(k, r)
    k = k / np.sum(np.abs(k))
    return np.real(pylab.ifft2(pylab.fft2(f) * pylab.fft2(k)))
def dense_gauss_kernel(sigma, x, y=None):
    """
    Gaussian Kernel with dense sampling.
    Evaluates a gaussian kernel with bandwidth SIGMA for all displacements
    between input images X and Y, which must both be MxN. They must also
    be periodic (ie., pre-processed with a cosine window). The result is
    an MxN map of responses.

    If X and Y are the same, ommit the third parameter to re-use some
    values, which is faster.
    """

    xf = pylab.fft2(x)  # x in Fourier domain
    x_flat = x.flatten()
    xx = pylab.dot(x_flat.transpose(), x_flat)  # squared norm of x

    if y is not None:
        # general case, x and y are different
        yf = pylab.fft2(y)
        y_flat = y.flatten()
        yy = pylab.dot(y_flat.transpose(), y_flat)
    else:
        # auto-correlation of x, avoid repeating a few operations
        yf = xf
        yy = xx

    # cross-correlation term in Fourier domain
    xyf = pylab.multiply(xf, pylab.conj(yf))

    # to spatial domain
    xyf_ifft = pylab.ifft2(xyf)
    #xy_complex = circshift(xyf_ifft, floor(x.shape/2))
    row_shift, col_shift = pylab.floor(pylab.array(x.shape)/2).astype(int)
    xy_complex = pylab.roll(xyf_ifft, row_shift, axis=0)
    xy_complex = pylab.roll(xy_complex, col_shift, axis=1)
    xy = pylab.real(xy_complex)

    # calculate gaussian response for all positions
    scaling = -1 / (sigma**2)
    xx_yy = xx + yy
    xx_yy_2xy = xx_yy - 2 * xy
    k = pylab.exp(scaling * pylab.maximum(0, xx_yy_2xy / x.size))

    #print("dense_gauss_kernel x.shape ==", x.shape)
    #print("dense_gauss_kernel k.shape ==", k.shape)

    return k
Exemple #14
0
def dense_gauss_kernel(sigma, x, y=None):
    """
    Gaussian Kernel with dense sampling.
    Evaluates a gaussian kernel with bandwidth SIGMA for all displacements
    between input images X and Y, which must both be MxN. They must also
    be periodic (ie., pre-processed with a cosine window). The result is
    an MxN map of responses.

    If X and Y are the same, omit the third parameter to re-use some
    values, which is faster.
    """

    xf = pylab.fft2(x)  # x in Fourier domain
    x_flat = x.flatten()
    xx = pylab.dot(x_flat.transpose(), x_flat)  # squared norm of x

    if y is not None:
        # general case, x and y are different
        yf = pylab.fft2(y)
        y_flat = y.flatten()
        yy = pylab.dot(y_flat.transpose(), y_flat)
    else:
        # auto-correlation of x, avoid repeating a few operations
        yf = xf
        yy = xx

    # cross-correlation term in Fourier domain
    xyf = pylab.multiply(xf, pylab.conj(yf))

    # to spatial domain
    xyf_ifft = pylab.ifft2(xyf)
    #xy_complex = circshift(xyf_ifft, floor(x.shape/2))
    row_shift, col_shift = pylab.floor(pylab.array(x.shape) / 2).astype(int)
    xy_complex = pylab.roll(xyf_ifft, row_shift, axis=0)
    xy_complex = pylab.roll(xy_complex, col_shift, axis=1)
    xy = pylab.real(xy_complex)

    # calculate gaussian response for all positions
    scaling = -1 / (sigma**2)
    xx_yy = xx + yy
    xx_yy_2xy = xx_yy - 2 * xy
    k = pylab.exp(scaling * pylab.maximum(0, xx_yy_2xy / x.size))

    #print("dense_gauss_kernel x.shape ==", x.shape)
    #print("dense_gauss_kernel k.shape ==", k.shape)

    return k
Exemple #15
0
def dense_gauss_kernel(sigma, x, y=None):
    """
    通过高斯核计算余弦子窗口图像块的响应图
    利用带宽是 sigma 的高斯核估计两个图像块 X (MxN) 和 Y (MxN) 的关系。X, Y 是循环的、经余弦窗处理的。输出结果是
    响应图矩阵 MxN. 如果 X = Y, 则函数调用时取消 y,则加快计算。
    该函数对应原文中的公式 (16),以及算法1中的 function k = dgk(x1, x2, sigma)
    :param sigma: 高斯核带宽
    :param x: 余弦子窗口图像块
    :param y: 空或者模板图像块
    :return: 响应图
    """
    # 计算图像块 x 的傅里叶变换
    xf = pylab.fft2(x)  # x in Fourier domain
    # 把图像块 x 拉平
    x_flat = x.flatten()
    # 计算 x 的2范数平方
    xx = pylab.dot(x_flat.transpose(), x_flat)  # squared norm of x

    if y is not None:
        # 一半情况, x 和 y 是不同的,计算 y 的傅里叶变化和2范数平方
        yf = pylab.fft2(y)
        y_flat = y.flatten()
        yy = pylab.dot(y_flat.transpose(), y_flat)
    else:
        # x 的自相关,避免重复计算
        yf = xf
        yy = xx

    # 傅里叶域的互相关计算,逐元素相乘
    xyf = pylab.multiply(xf, pylab.conj(yf))

    # 转化为频率域
    xyf_ifft = pylab.ifft2(xyf)
    # 对频率域里的矩阵块进行滚动平移,分别沿 row 和 col 轴
    row_shift, col_shift = pylab.floor(pylab.array(x.shape) / 2).astype(int)
    xy_complex = pylab.roll(xyf_ifft, row_shift, axis=0)
    xy_complex = pylab.roll(xy_complex, col_shift, axis=1)
    xy = pylab.real(xy_complex)

    # 计算高斯核响应图
    scaling = -1 / (sigma**2)
    xx_yy = xx + yy
    xx_yy_2xy = xx_yy - 2 * xy

    return pylab.exp(scaling * pylab.maximum(0, xx_yy_2xy / x.size))
Exemple #16
0
    def angularSpectrum(self, d):
        # make intensity distribution
        i2 = Intensity2D(self.nx, self.startx, self.endx, self.ny, self.starty,
                         self.endy, self.wl)

        # Angular spectrum (FT of input wavefield)
        a = pl.fft2(pl.fftshift(self.i))

        print a

        # 2d convolution kernel
        k = 2 * pl.pi / self.wl

        print k

        # make spatial frequency matrix
        maxsfx = 2 * pl.pi / self.dx
        maxsfy = 2 * pl.pi / self.dy

        print maxsfx, maxsfy

        dsfx = 2 * maxsfx / (self.nx)
        dsfy = 2 * maxsfy / (self.ny)

        self.sfx = pl.arange(-maxsfx / 2, maxsfx / 2 + 1e-15, dsfx / 2)
        self.sfy = pl.arange(-maxsfy / 2, maxsfy / 2 + 1e-15, dsfy / 2)

        print self.sfx
        print self.sfy

        [self.sfxgrid, self.sfygrid] = pl.meshgrid(self.sfx, self.sfy)

        # angular spectrum propagation kernel
        aspk = pl.fftshift(
            pl.exp(1j * d * pl.sqrt(k**2 -
                                    (self.sfxgrid**2 + self.sfygrid**2))))

        print "Angular spectrum propagation kernel"
        print aspk

        # apply angular spectrum propagation kernel and inverse Fourier transform
        i2.i = pl.fftshift(pl.ifft2(aspk * a))

        print i2.i
        return i2
def gaussian_blur(f, sigma):
    """ gaussian_blur - gaussian blurs an image
    %
    %   M = perform_blurring(M, sigma, options);
    %
    %   M is the original data
    %   sigma is the std of the Gaussian blur (in pixels)
    %
    %   Copyright (c) 2007 Gabriel Peyre
    """
    if sigma <= 0:
        return
    n = max(f.shape)
    t = np.concatenate((np.arange(0, n / 2 + 1), np.arange(-n / 2, -1)))
    [Y, X] = np.meshgrid(t, t)
    h = np.exp(-(X**2 + Y**2) / (2.0 * float(sigma)**2))
    h = h / np.sum(h)
    return np.real(pylab.ifft2(pylab.fft2(f) * pylab.fft2(h)))
Exemple #18
0
    def angularSpectrum(self,d) :
        # make intensity distribution
        i2 = Intensity2D(self.nx,self.startx,self.endx,
                         self.ny,self.starty,self.endy,
                         self.wl)       

        # Angular spectrum (FT of input wavefield)
        a = pl.fft2(pl.fftshift(self.i))
        
        print a

        # 2d convolution kernel
        k = 2*pl.pi/self.wl

        print k

        # make spatial frequency matrix
        maxsfx = 2*pl.pi/self.dx
        maxsfy = 2*pl.pi/self.dy

        print maxsfx,maxsfy
        
        dsfx = 2*maxsfx/(self.nx)
        dsfy = 2*maxsfy/(self.ny)
        
        self.sfx = pl.arange(-maxsfx/2,maxsfx/2+1e-15,dsfx/2)
        self.sfy = pl.arange(-maxsfy/2,maxsfy/2+1e-15,dsfy/2)

        print self.sfx
        print self.sfy

        [self.sfxgrid, self.sfygrid] = pl.meshgrid(self.sfx,self.sfy)

        # angular spectrum propagation kernel 
        aspk = pl.fftshift(pl.exp(1j*d*pl.sqrt(k**2 -(self.sfxgrid**2 + self.sfygrid**2))))

        print "Angular spectrum propagation kernel"
        print aspk

        # apply angular spectrum propagation kernel and inverse Fourier transform
        i2.i = pl.fftshift(pl.ifft2(aspk*a))
        
        print i2.i
        return i2
Exemple #19
0
def gaussian_blur(f, sigma):

    """ gaussian_blur - gaussian blurs an image
    %
    %   M = perform_blurring(M, sigma, options);
    %
    %   M is the original data
    %   sigma is the std of the Gaussian blur (in pixels)
    %
    %   Copyright (c) 2007 Gabriel Peyre
    """
    if sigma<=0:
        return;
    n = max(f.shape);
    t = np.concatenate( (np.arange(0,n/2+1), np.arange(-n/2,-1)) )
    [Y,X] = np.meshgrid(t,t)
    h = np.exp( -(X**2+Y**2)/(2.0*float(sigma)**2) )
    h = h/np.sum(h)
    return np.real( pylab.ifft2(pylab.fft2(f) * pylab.fft2(h)) )
def SpectralSynthesisFM2D(max_level, sigma, seed, H, normalise, lbound, ubound):

    N = 2 ** max_level
    A = numpy.zeros((N, N), dtype=complex)
    random.seed(seed)  # seed the random number generator
    PI = 3.141592
    for i in range(0, N / 2):
        for j in range(0, N / 2):
            phase = 2 * PI * random.random()  # /random.randrange(1,Arand)
            if i != 0 or j != 0:
                rad = pow((i * i + j * j), (-(H + 1) / 2)) * random.gauss(0.0, sigma)
            else:
                rad = 0.0

            A[i][j] = rad * math.cos(phase) + rad * math.sin(phase) * j

            if i == 0:
                i0 = 0
            else:
                i0 = N - i

            if j == 0:
                j0 = 0
            else:
                j0 = N - j

            A[i0][j0] = rad * math.cos(phase) - rad * math.sin(phase) * j

    for i in range(1, N / 2):
        for j in range(1, N / 2):
            phase = 2 * PI * random.random()  # /random.randrange(1,Arand)
            rad = pow((i * i + j * j), (-(H + 1) / 2)) * random.gauss(0.0, sigma)
            A[i][N - j] = rad * math.cos(phase) + rad * math.sin(phase) * j
            A[N - i][j] = rad * math.cos(phase) - rad * math.sin(phase) * j

    Grid = numpy.real(pylab.ifft2((A)))
    if normalise:
        Grid += numpy.amin(Grid) * -1 + lbound
        Grid = (Grid / numpy.amax(Grid)) * ubound
    return Grid
Exemple #21
0
    def find(self, image):
        if self.should_resize_image:
            self.image = scipy.misc.imresize(image, 0.5)
            self.image = self.image / 255.0  # hack around scipy
        else:
            self.image = image

        # get subwindow at current estimated target position,
        # to train classifer
        x = get_subwindow(self.image, self.pos, self.sz, self.cos_window)

        # calculate response of the classifier at all locations
        k = dense_gauss_kernel(self.sigma, x, self.z)
        kf = pylab.fft2(k)
        alphaf_kf = pylab.multiply(self.alphaf, kf)
        self.response = pylab.real(pylab.ifft2(alphaf_kf))  # Eq. 9

        # target location is at the maximum response
        r = self.response
        self.row, self.col = pylab.unravel_index(r.argmax(), r.shape)
        self.pos = self.pos - pylab.floor(self.sz / 2) + [self.row, self.col]

        return self.pos
    def find(self, image):
        if len(image.shape) == 3 and image.shape[2] > 1:
            image = rgb2gray(image)
        self.image = image
        if self.should_resize_image:
            self.image = scipy.misc.imresize(self.image, 0.5)
            self.image = self.image / 255.0

        # get subwindow at current estimated target position,
        # to train classifer
        x = get_subwindow(self.image, self.pos, self.sz, self.cos_window)

        # calculate response of the classifier at all locations
        k = dense_gauss_kernel(self.sigma, x, self.z)
        kf = pylab.fft2(k)
        alphaf_kf = pylab.multiply(self.alphaf, kf)
        self.response = pylab.real(pylab.ifft2(alphaf_kf))  # Eq. 9

        # target location is at the maximum response
        r = self.response
        self.row, self.col = pylab.unravel_index(r.argmax(), r.shape)
        self.pos = self.pos - pylab.floor(self.sz/2) + [self.row, self.col]

        return self.pos
    def update(self, new_img):
        self.canvas   = new_img.copy()
        self.trackNo +=1

        res_max = 0.
        for scale_rate in self.scale_ratios:
            template_size = scale_rate * self.window_sz_new
            # get subwindow at current estimated target position, to train classifer
            x = self.get_subwindow(new_img, self.pos_list[-1], template_size)
            # calculate response of the classifier at all locations
            k = self.dense_gauss_kernel(self.sigma, x, self.z)
            kf = pylab.fft2(k)
            alphaf_kf = pylab.multiply(self.alphaf, kf)
            response = pylab.real(pylab.ifft2(alphaf_kf))  # Eq. 9

            # target location is at the maximum response
            r = response
            row, col = pylab.unravel_index(r.argmax(), r.shape)
            if res_max< np.max(r):
               res_row = int(row*scale_rate)
               res_col = int(col*scale_rate)
               self.window_sz_new = template_size
               self.target_sz = self.target_sz*scale_rate
               res_ave, res_max, self.psr = self.response_win_ave_max(response, col, row, winsize=12)
               self.scale_rate = scale_rate


        #roi rect's topleft point add [row, col]
        pos = self.pos_list[-1] - pylab.floor(self.window_sz_new / 2) + [res_row, res_col]

        rect = pylab.array([pos[1] - self.target_sz[1] / 2, pos[0] - self.target_sz[0] / 2, self.target_sz[1], self.target_sz[0]])
        rect = rect.astype(np.int)
        #print (self.target_sz, self.psr, self.scale_rate)
        if debug:
            if self.trackNo == 1:
                #pylab.ion()  # interactive mode on
                self.fig, self.axes = pylab.subplots(ncols=3)
                self.fig.show()
                # We need to draw the canvas before we start animating...
                self.fig.canvas.draw()

                k_img = self.axes[0].imshow(k,animated=True)
                x_img = self.axes[1].imshow(x,animated=True)
                r_img = self.axes[2].imshow(response,animated=True)

                self.subimgs = [k_img, x_img, r_img]
                # Let's capture the background of the figure
                self.backgrounds = [self.fig.canvas.copy_from_bbox(ax.bbox) for ax in self.axes]

                # tracking_rectangle = pylab.Rectangle((0, 0), 0, 0)
                # tracking_rectangle.set_color((1, 0, 0, 0.5))
                # tracking_figure_axes.add_patch(tracking_rectangle)
                #
                # gt_point = pylab.Circle((0, 0), radius=5)
                # gt_point.set_color((0, 0, 1, 0.5))
                # tracking_figure_axes.add_patch(gt_point)
                # tracking_figure_title = tracking_figure.suptitle("")
                pylab.show(block=False)
                #self.fig.show()
            else:
                self.subimgs[0].set_data(k)
                self.subimgs[1].set_data(x)
                self.subimgs[2].set_data(response)
                items = enumerate(zip(self.subimgs, self.axes, self.backgrounds), start=1)
                for j, (subimg, ax, background) in items:
                    self.fig.canvas.restore_region(background)
                    ax.draw_artist(subimg)
                    self.fig.canvas.blit(ax.bbox)
                pylab.show(block=False)

        if self.psr > 10:
            #computing new_alphaf and observed x as z
            x = self.get_subwindow(new_img, pos, self.window_sz_new)

            # Kernel Regularized Least-Squares, calculate alphas (in Fourier domain)
            k = self.dense_gauss_kernel(self.sigma, x)
            new_alphaf = pylab.divide(self.yf, (pylab.fft2(k) + self.lambda_value))  # Eq. 7
            new_z = x

            # subsequent frames, interpolate model
            f = self.interpolation_factor
            self.alphaf = (1 - f) * self.alphaf + f * new_alphaf
            self.z = (1 - f) * self.z + f * new_z


        self.roi_list.append(self.get_imageROI(new_img, rect))
        self.pos_list.append(pos)
        self.rect_list.append(rect)
        ok = 1
        return ok, rect, self.psr
def perform_convolution(x, h, bound="sym"):
    """
        perform_convolution - compute convolution with centered filter.
        y = perform_convolution(x,h,bound);
        The filter 'h' is centred at 0 for odd
        length of the filter, and at 1/2 otherwise.
        This works either for 1D or 2D convolution.
        For 2D the matrix have to be square.
        'bound' is either 'per' (periodic extension)
        or 'sym' (symmetric extension).
        Copyright (c) 2004 Gabriel Peyre
    """

    if bound not in ["sym", "per"]:
        raise Exception('bound should be sym or per')

    if np.ndim(x) == 3 and np.shape(x)[2] < 4:
        #for color images
        y = x
        for i in range(np.shape(x)[2]):
            y[:, :, i] = perform_convolution(x[:, :, i], h, bound)
        return y

    if np.ndim(x) == 3 and np.shape(x)[2] >= 4:
        raise Exception(
            'Not yet implemented for 3D array, use smooth3 instead.')

    n = np.shape(x)
    p = np.shape(h)

    nd = np.ndim(x)

    if nd == 1:
        n = len(x)
        p = len(h)

    if bound == 'sym':

        #################################
        # symmetric boundary conditions #
        d1 = np.asarray(p).astype(int) / 2  # padding before
        d2 = p - d1 - 1  # padding after

        if nd == 1:
            ################################# 1D #################################
            nx = len(x)
            xx = np.vstack((x[d1:-1:-1], x, x[nx - 1:nx - d2 - 1:-1]))
            y = signal.convolve(xx, h)
            y = y[p:nx - p - 1]

        elif nd == 2:
            ################################# 2D #################################
            #double symmetry
            nx, ny = np.shape(x)
            xx = x
            xx = np.vstack(
                (xx[d1[0]:-1:-1, :], xx, xx[nx - 1:nx - d2[0] - 1:-1, :]))
            xx = np.hstack(
                (xx[:, d1[1]:-1:-1], xx, xx[:, ny - 1:ny - d2[1] - 1:-1]))
            y = signal.convolve2d(xx, h, mode="same")
            y = y[(2 * d1[0]):(2 * d1[0] + n[0] + 1),
                  (2 * d1[1]):(2 * d1[1] + n[1] + 1)]

    else:

        ################################
        # periodic boundary conditions #

        if p > n:
            raise Exception('h filter should be shorter than x.')
        n = np.asarray(n)
        p = np.asarray(p)
        d = np.floor((p - 1) / 2.)
        if nd == 1:
            h = np.vstack((h[d:], np.vstack((np.zeros(n - p), h[:d]))))
            y = np.real(pyl.ifft(pyl.fft(x) * pyl.fft(h)))
        else:
            h = np.vstack((h[int(d[0]):, :],
                           np.vstack((np.zeros([n[0] - p[0],
                                                p[1]]), h[:int(d[0]), :]))))
            h = np.hstack(
                (h[:, int(d[1]):],
                 np.hstack((np.zeros([n[0], n[1] - p[1]]), h[:, :int(d[1])]))))
            y = np.real(pyl.ifft2(pyl.fft2(x) * pyl.fft2(h)))
    return y
def track(descriptor):
    global options
    desc_channel_count = descriptor.initialize(options.use_gpu)

    roi = loader.track_bounding_box_from_first_frame()
    roi = [
        roi[0] + roi[2] / 2, roi[1] + roi[3] / 2, roi[2], roi[3],
        roi[2] * (1 + kcf_params.padding), roi[3] * (1 + kcf_params.padding)
    ]

    output_sigma = pylab.sqrt(pylab.prod([roi[3], roi[2]
                                          ])) * kcf_params.output_sigma_factor

    avg_count = 0

    global cos_window
    cos_window = None
    template = [None for i in range(desc_channel_count)]
    alpha_f = [None for i in range(desc_channel_count)]
    response = [None for i in range(desc_channel_count)]
    yf = None

    track_time = 0
    full_track_time = time.time()
    while loader.has_next_frame():
        im = loader.next_frame()

        if (loader.frame_number() % 10) == 0:
            print("Processing frame {}".format(loader.frame_number()))

        start_time = time.time()

        is_first_frame = loader.frame_number() == 0

        cropped = get_subwindow(im, roi)
        channels = descriptor.describe(cropped)
        subwindow = apply_cos_window(channels)
        subwindow = crop(subwindow)
        dmv = None

        if is_first_frame:
            grid_y = pylab.arange(subwindow.shape[1]) - pylab.floor(
                subwindow.shape[1] / 2)
            grid_x = pylab.arange(subwindow.shape[2]) - pylab.floor(
                subwindow.shape[2] / 2)

            rs, cs = pylab.meshgrid(grid_x, grid_y)
            y = pylab.exp(-0.5 / output_sigma**2 * (rs**2 + cs**2))
            yf = pylab.fft2(y)
        else:

            for i in range(0, subwindow.shape[0]):
                channel = subwindow[i, :, :]

                # calculate response of the classifier at all locations
                k = dense_gauss_kernel(kcf_params.sigma, channel, template[i])
                kf = pylab.fft2(k)
                alphaf_kf = pylab.multiply(alpha_f[i], kf)
                response[i] = pylab.real(pylab.ifft2(alphaf_kf))  # Eq. 9

                # argmax = response[i].argmax()
                #
                # if response[i].item(argmax) != 0:
                #     tmp = pylab.unravel_index(argmax, response[i].shape)
                #     if value < response[i][tmp[0],tmp[1]]:
                #         avg_x = tmp[1]
                #         avg_y = tmp[0]
                #         avg_count = 1
                #         value = response[i][tmp[0],tmp[1]]
                #         chosen_i = i

            anchor = torch.tensor(channels[:, channels.shape[1] / 2,
                                           channels.shape[2] / 2]).unsqueeze(0)
            points = torch.tensor(response).view(channels.shape[0], -1).t()

            dmv = distance_matrix_vector(anchor,
                                         points).view(channels.shape[1],
                                                      channels.shape[2])

            argmax = np.array(dmv).argmax()
            tmp = pylab.unravel_index(argmax, subwindow.shape[1:])
            moved_by = [
                float(tmp[0]) - float(subwindow.shape[1]) / 2,
                float(tmp[1]) - float(subwindow.shape[2]) / 2
            ]
            roi = descriptor.update_roi(roi, moved_by)

        cropped = get_subwindow(im, roi)
        channels = descriptor.describe(cropped)
        subwindow = apply_cos_window(channels)
        subwindow = crop(subwindow)

        for i in range(0, subwindow.shape[0]):

            channel = subwindow[i, :, :]

            k = dense_gauss_kernel(kcf_params.sigma, channel)
            new_alpha_f = pylab.divide(
                yf, (pylab.fft2(k) + kcf_params.lambda_value))  # Eq. 7
            new_template = channel

            if is_first_frame:
                alpha_f[i] = new_alpha_f
                template[i] = new_template
            else:
                f = kcf_params.interpolation_factor
                alpha_f[i] = (1 - f) * alpha_f[i] + f * new_alpha_f
                template[i] = (1 - f) * template[i] + f * new_template

        track_time += time.time() - start_time

        results.log_tracked(im, roi, False, template[0], dmv)
    # end of "for each image in video"

    results.log_meta("speed.frames_tracked", loader.frame_number())
    results.log_meta("speed.track_no_io_time", str(track_time) + "s")
    results.log_meta("speed.track_no_io_fps",
                     loader.frame_number() / track_time)
    results.log_meta("speed.track_no_init_time",
                     str(time.time() - full_track_time) + "s")

    results.show_precision()

    return
Exemple #26
0
def correction_light(I, method, show_light, mask=None):
    """Corrige la derive eclairement

    :I: array_like ou iplimage
    :method: 'polynomial' or 'frequency'
    :show_light: option affiche correction (true|false)
    :mask: array de zone non interet
    :returns: iplimage 32bit

    """
    from progress import *
    import Tkinter
    if type(I) == cv.iplimage:
        if I.nChannels == 3:
            if method == 'None':
                I = RGB2L(I)
                I = cv2array(I)[:, :, 0]
                I = pymorph.hmin(I, 15, pymorph.sedisk(3))
                I = array2cv(I)
                cv.EqualizeHist(I, I)
                return I
            I = RGB2L(I)
            I_32bit = cv.CreateImage(cv.GetSize(I), cv.IPL_DEPTH_32F, 1)
            cv.ConvertScale(I, I_32bit, 3000.0, 0.0)
            I = cv.CloneImage(I_32bit)
        I = cv2array(I)[:, :, 0]
    elif len(I.shape) == 3:
        I = (I[:, :, 0] + I[:, :, 0] + I[:, :, 0])\
            / 3.0  # A modifier: non utiliser dans notre cas
    elif method == 'None':
        I = array2cv(I)
        cv.EqualizeHist(I, I)
        return I

    I = np.log(I + 10 ** (-6))
    (H, W) = np.shape(I)
    I_out = I * 0 + 10 ** (-6)
    if method == 'polynomial':
        ## I = M.A avec A coeff. du polynome
        I_flat = I.flatten()
        degree = 3
        print("modification degree 3")
        #degree du polynome
        nb_coeff = (degree + 1) * (degree + 2) / 2  # nombre coefficient
        [yy, xx] = np.meshgrid(np.arange(W, dtype=np.float64),
                               np.arange(H, dtype=np.float64))
        if mask is not None:
            xx[mask] = 0
            yy[mask] = 0
        # Creation de M
        try:
            M = np.zeros((H * W, nb_coeff), dtype=np.float64)
        except MemoryError:
            print MemoryError
            return MemoryError
        i, j = 0, 0  # i,j degree de x,y
        #Bar progression
        bar = Tkinter.Tk(className='Correcting Light...')
        m = Meter(bar, relief='ridge', bd=3)
        m.pack(fill='x')
        m.set(0.0, 'Starting correction...')
        for col in np.arange(nb_coeff):
            M[:, col] = (xx.flatten() ** i) * (yy.flatten() ** j)
            i += 1
            m.set(0.5 * float(col) / (nb_coeff - 1))
            if i + j == degree + 1:
                i = 0
                j += 1

        # Resolution au sens des moindres carree: pseudo-inverse
        try:
            M = pl.pinv(M)
            A = np.dot(M, I_flat)
        except ValueError:
            return ValueError
        # Calcul de la surface
        i, j = 0, 0
        surface = np.zeros((H, W), dtype=np.float64)
        for cmpt in np.arange(nb_coeff):
            surface += A[cmpt] * (xx ** i) * (yy ** j)  # forme quadratique
            i += 1
            m.set(0.5 + 0.5 * float(cmpt) / (nb_coeff - 1))
            if i + j == degree + 1:
                i = 0
                j += 1
        bar.destroy()
        I_out = np.exp(I / surface)
        light = surface
    elif method == 'frequency':
        Rx, Ry = 2, 2
        # zero padding
        N = [H, W]
        filtre = np.zeros((N[1], N[0]))
        centre_x = round(N[0] / 2)
        centre_y = round(N[1] / 2)
        print("FFT2D...")
        I_fourier = pl.fftshift(pl.fft2(I, N))

        # Gaussian filter
        [xx, yy] = np.meshgrid(np.arange(N[0], dtype=np.float),
                               np.arange(N[1], dtype=np.float))
        filtre = np.exp(-2 * ((xx - centre_x) ** 2 + (yy - centre_y) ** 2) /
                        (Rx ** 2 + Ry ** 2))
        filtre = pl.transpose(filtre)
        I_fourier = I_fourier * filtre
        print("IFFT2D...")
        I_out = (np.abs(pl.ifft2(pl.ifftshift(I_fourier), N)))[0:H, 0:W]
        light = I_out
        I_out = np.exp(I / I_out)
    else:
        light = I * 0
        I_out = I
    # Display Light
    if show_light:
        light = ((light - light.min()) * 3000.0 /
                 light.max()).astype('float32')
        light = array2cv(light)
        fig = pl.figure()
        pl.imshow(light)
        fig.show()

    I_out = (I_out - I_out.min()) * 3000.0 / I_out.max()
    I_out = I_out.astype('uint8')

    #chapeau haut de forme
    I_out = pymorph.hmin(I_out, 25, pymorph.sedisk(3))
    #Conversion en iplimage et ajustement contraste
    gr = array2cv(I_out)
    cv.EqualizeHist(gr, gr)
    return gr
def perform_convolution(x,h,bound="sym"):
    """
        perform_convolution - compute convolution with centered filter.
        
        y = perform_convolution(x,h,bound);
        
        The filter 'h' is centred at 0 for odd
        length of the filter, and at 1/2 otherwise.
        
        This works either for 1D or 2D convolution.
        For 2D the matrix have to be square.
        
        'bound' is either 'per' (periodic extension) 
        or 'sym' (symmetric extension).
        
        Copyright (c) 2004 Gabriel Peyre
    """
    
    if bound not in ["sym", "per"]:
        raise Exception('bound should be sym or per')
    
    if np.ndim(x) == 3 and np.shape(x)[2] < 4:
        #for color images
        y = x;
        for i in range(np.shape(x)[2]):
            y[:,:,i] = perform_convolution(x[:,:,i],h, bound)
        return y
    
    if np.ndim(x) == 3 and np.shape(x)[2] >= 4:
        raise Exception('Not yet implemented for 3D array, use smooth3 instead.')
    
    n = np.shape(x)
    p = np.shape(h)
    
    nd = np.ndim(x)
    
    if nd == 1: 
        n = len(x)
        p = len(h)
    
    if bound == 'sym':
    
        #################################
        # symmetric boundary conditions #
        raise Exception('Not yet implemented')
        
    else:
    
        ################################
        # periodic boundary conditions #
        
        if p > n:
            raise Exception('h filter should be shorter than x.')
        
        n = np.asarray(n) 
        p = np.asarray(p)   
        d = np.floor((p-1)/2.)
        if nd == 1:    
            h = np.vstack((h[d:],np.vstack((np.zeros(n-p),h[:d]))))
            y = np.real(pyl.ifft(pyl.fft(x)*pyl.fft(h)))
        else:
            h = np.vstack((h[d[0]:,:],np.vstack((np.zeros([n[0]-p[0],p[1]]),h[:(d[0]),:]))))
            h = np.hstack((h[:,d[1]:],np.hstack((np.zeros([n[0],n[1]-p[1]]),h[:,:(d[1])]))))
            y = np.real(pyl.ifft2(pyl.fft2(x)*pyl.fft2(h)))     
    return y
Exemple #28
0
    def update(self, new_img):
        '''
        :param new_img: new frame should be normalized, for tracker_status estimating the rect_snr
        :return:
        '''
        self.canvas = new_img.copy()
        self.trackNo += 1

        # get subwindow at current estimated target position, to train classifier
        x = self.get_subwindow(new_img, self.pos, self.window_sz,
                               self.cos_window)
        # calculate response of the classifier at all locations
        k = self.dense_gauss_kernel(self.sigma, x, self.z)
        kf = pylab.fft2(k)
        alphaf_kf = pylab.multiply(self.alphaf, kf)
        response = pylab.real(pylab.ifft2(alphaf_kf))  # Eq. 9
        self.response = response
        self.responsePeak = np.max(response)
        # target location is at the maximum response
        row, col = pylab.unravel_index(response.argmax(), response.shape)
        #roi rect's topleft point add [row, col]
        self.tly, self.tlx = self.pos - pylab.floor(self.window_sz / 2)

        #here the pos is not given to self.pos at once, we need to check the psr first.
        #if it above the threashhold(default is 5), self.pos = pos.
        pos = np.array([self.tly, self.tlx]) + np.array([row, col])

        #Noting, for pos(cy,cx)! for cv2.rect rect(x,y,w,h)!
        rect = pylab.array([
            pos[1] - self.target_sz[1] / 2, pos[0] - self.target_sz[0] / 2,
            self.target_sz[1], self.target_sz[0]
        ])
        rect = rect.astype(np.int)
        self.rect = rect
        self.psr, self.trkStatus = self.tracker_status(col, row, response,
                                                       rect, new_img)
        self.pos = pos

        # #bad quality tracking results
        # if self.psr <= 5  and self.trackNo >=5:
        #     # computing offset based on the last 4 frame's obj_bbox'center.
        #     # using the average center shift as the (offset_x, offset_y)
        #     dif_rect = []
        #     #for iter in [-1, -2, -3]:
        #     for iter in [-1,-2,-3 ]:
        #         dif_rect.append(np.array(self.FourRecentRects[iter]) - np.array(self.FourRecentRects[iter - 1]))
        #     offset_rect = np.mean(dif_rect, 0)
        #     offset = (offset_rect[0] + offset_rect[2] / 2, offset_rect[1] + offset_rect[3] / 2)
        #     print('Tracker offset is activited (%d, %d)' % (offset[0], offset[1]))
        #     self.pos = self.pos + np.array([ offset[1], offset[0] ])
        #     # rect = pylab.array([self.pos[1] - self.target_sz[1] / 2, self.pos[0] - self.target_sz[0] / 2, self.target_sz[1], self.target_sz[0]])
        #     # rect = rect.astype(np.int)
        #     # self.FourRecentRects[self.trackNo % 4] = rect
        # else:
        #     self.pos = pos
        #     self.FourRecentRects[self.trackNo % 4] = rect

        #if self.psr <= 5:
        #     # computing offset based on the last 4 frame's obj_bbox'center.
        #     # using the average center shift as the (offset_x, offset_y)
        #
        #     self.pos = self.pos + self.posOffset
        #     print self
        #     print('Tracker Default Offset is activited (%d, %d)' % (self.posOffset[1], self.posOffset[0]))

        #
        #     # rect = pylab.array([self.pos[1] - self.target_sz[1] / 2, self.pos[0] - self.target_sz[0] / 2, self.target_sz[1], self.target_sz[0]])
        #     # rect = rect.astype(np.int)
        #     # self.FourRecentRects[self.trackNo % 4] = rect
        #else:
        #     self.pos = pos
        #     self.FourRecentRects[self.trackNo % 4] = rect
        #     if self.trackNo >= 5:
        #         dif_rect = []
        #         # for iter in [-1, -2, -3]:
        #         for iter in [-1, -2, -3]:
        #             dif_rect.append(np.array(self.FourRecentRects[iter]) - np.array(self.FourRecentRects[iter - 1]))
        #         offset_rect = np.mean(dif_rect, 0)
        #         offset = (offset_rect[0] + offset_rect[2] / 2, offset_rect[1] + offset_rect[3] / 2)
        #         self.posOffset =  np.array([offset[1], offset[0]])

        #print ('tracker\'status:res_win_ave,max,psr, rect_snr', self.trkStatus)
        # if debug == True:
        #     if self.trackNo == 1:
        #         #pylab.ion()  # interactive mode on
        #         self.fig, self.axes = pylab.subplots(ncols=3)
        #         self.fig.show()
        #         # We need to draw the canvas before we start animating...
        #         self.fig.canvas.draw()
        #
        #         k_img = self.axes[0].imshow(k,animated=True)
        #         x_img = self.axes[1].imshow(x,animated=True)
        #         r_img = self.axes[2].imshow(response,animated=True)
        #
        #         self.subimgs = [k_img, x_img, r_img]
        #         # Let's capture the background of the figure
        #         self.backgrounds = [self.fig.canvas.copy_from_bbox(ax.bbox) for ax in self.axes]
        #
        #         pylab.show(block=False)
        #     else:
        #         self.subimgs[0].set_data(k)
        #         self.subimgs[1].set_data(x)
        #         self.subimgs[2].set_data(response)
        #         items = enumerate(zip(self.subimgs, self.axes, self.backgrounds), start=1)
        #         for j, (subimg, ax, background) in items:
        #             self.fig.canvas.restore_region(background)
        #             ax.draw_artist(subimg)
        #             self.fig.canvas.blit(ax.bbox)
        #         pylab.show(block=False)

        #only update when tracker_status's psr is high
        if (self.psr > 10):
            #computing new_alphaf and observed x as z
            x = self.get_subwindow(new_img, self.pos, self.window_sz,
                                   self.cos_window)
            # Kernel Regularized Least-Squares, calculate alphas (in Fourier domain)
            k = self.dense_gauss_kernel(self.sigma, x)
            new_alphaf = pylab.divide(
                self.yf, (pylab.fft2(k) + self.lambda_value))  # Eq. 7
            new_z = x

            # subsequent frames, interpolate model
            f = self.interpolation_factor
            self.alphaf = (1 - f) * self.alphaf + f * new_alphaf
            self.z = (1 - f) * self.z + f * new_z
        ok = 1
        return ok, rect, self.psr, response
def track(input_video_path):
    """
    notation: variables ending with f are in the frequency domain.
    """

    # parameters according to the paper --
    padding = 1.0  # extra area surrounding the target
    # spatial bandwidth (proportional to target)
    output_sigma_factor = 1 / float(16)
    sigma = 0.2  # gaussian kernel bandwidth
    lambda_value = 1e-2  # regularization
    # linear interpolation factor for adaptation
    interpolation_factor = 0.075

    info = load_video_info(input_video_path)
    img_files, pos, target_sz, \
        should_resize_image, ground_truth, video_path = info

    # window size, taking padding into account
    sz = pylab.floor(target_sz * (1 + padding))

    # desired output (gaussian shaped), bandwidth proportional to target size
    output_sigma = pylab.sqrt(pylab.prod(target_sz)) * output_sigma_factor

    grid_y = pylab.arange(sz[0]) - pylab.floor(sz[0] / 2)
    grid_x = pylab.arange(sz[1]) - pylab.floor(sz[1] / 2)
    # [rs, cs] = ndgrid(grid_x, grid_y)
    rs, cs = pylab.meshgrid(grid_x, grid_y)
    y = pylab.exp(-0.5 / output_sigma**2 * (rs**2 + cs**2))
    yf = pylab.fft2(y)
    # print("yf.shape ==", yf.shape)
    # print("y.shape ==", y.shape)

    # store pre-computed cosine window
    cos_window = pylab.outer(pylab.hanning(sz[0]), pylab.hanning(sz[1]))

    total_time = 0  # to calculate FPS
    positions = pylab.zeros((len(img_files), 2))  # to calculate precision

    global z, response
    z = None
    alphaf = None
    response = None

    for frame, image_filename in enumerate(img_files):

        if True and ((frame % 10) == 0):
            print("Processing frame", frame)

        # load image
        image_path = os.path.join(video_path, image_filename)
        im = pylab.imread(image_path)
        if len(im.shape) == 3 and im.shape[2] > 1:
            im = rgb2gray(im)

        # print("Image max/min value==", im.max(), "/", im.min())

        if should_resize_image:
            im = scipy.misc.imresize(im, 0.5)

        start_time = time.time()

        # extract and pre-process subwindow
        x = get_subwindow(im, pos, sz, cos_window)

        is_first_frame = (frame == 0)

        if not is_first_frame:
            # calculate response of the classifier at all locations
            k = dense_gauss_kernel(sigma, x, z)
            kf = pylab.fft2(k)
            alphaf_kf = pylab.multiply(alphaf, kf)
            response = pylab.real(pylab.ifft2(alphaf_kf))  # Eq. 9

            # target location is at the maximum response
            r = response
            row, col = pylab.unravel_index(r.argmax(), r.shape)
            pos = pos - pylab.floor(sz / 2) + [row, col]

            if debug:
                print("Frame ==", frame)
                print("Max response", r.max(), "at", [row, col])
                pylab.figure()
                pylab.imshow(cos_window)
                pylab.title("cos_window")

                pylab.figure()
                pylab.imshow(x)
                pylab.title("x")

                pylab.figure()
                pylab.imshow(response)
                pylab.title("response")
                pylab.show(block=True)

        # end "if not first frame"

        # get subwindow at current estimated target position,
        # to train classifer
        x = get_subwindow(im, pos, sz, cos_window)

        # Kernel Regularized Least-Squares,
        # calculate alphas (in Fourier domain)
        k = dense_gauss_kernel(sigma, x)
        new_alphaf = pylab.divide(yf, (pylab.fft2(k) + lambda_value))  # Eq. 7
        new_z = x

        if is_first_frame:
            # first frame, train with a single image
            alphaf = new_alphaf
            z = x
        else:
            # subsequent frames, interpolate model
            f = interpolation_factor
            alphaf = (1 - f) * alphaf + f * new_alphaf
            z = (1 - f) * z + f * new_z
        # end "first frame or not"

        # save position and calculate FPS
        positions[frame, :] = pos
        total_time += time.time() - start_time

        # visualization
        plot_tracking(frame, pos, target_sz, im, ground_truth)
    # end of "for each image in video"

    if should_resize_image:
        positions = positions * 2

    print("Frames-per-second:", len(img_files) / total_time)

    title = os.path.basename(os.path.normpath(input_video_path))

    if len(ground_truth) > 0:
        # show the precisions plot
        show_precision(positions, ground_truth, video_path, title)

    return
Exemple #30
0
    def unweight2d(self, data, ov, bw):

        print("  Applying UNWEIGHT with ov=[" + str(ov[0]) + "," + str(ov[1]) +
              "] ... ")

        n0 = data.shape[0]
        n1 = data.shape[1]

        # Percentage -> Pixels, secure having integers
        #------------------------------------------------------------
        bw0 = int(np.floor((bw[0] * n0 / 100.) / 2) * 2)  # even integer
        bw1 = int(np.floor((bw[1] * n1 / 100.) / 2) * 2)  # even integer

        ov0 = int(np.floor(ov[0]))
        ov1 = int(np.floor(ov[1]))
        #------------------------------------------------------------

        spec0 = py.fft2(data)
        spec0 = np.roll(spec0, data.shape[0] / 2, axis=0)
        spec0 = np.roll(spec0, data.shape[1] / 2, axis=1)

        # Hamming at processed bandwidth
        #-----------------------------------------------
        if 1:
            t0 = np.arange(bw0) / float(bw0)
            t1 = np.arange(bw1) / float(bw1)
            hamming0 = 0.54 - 0.46 * np.cos(2 * np.pi * t0)
            hamming1 = 0.54 - 0.46 * np.cos(2 * np.pi * t1)

            unham0 = np.zeros(n0)
            unham1 = np.zeros(n1)

            unham0[n0 / 2 - bw0 / 2:n0 / 2 + bw0 / 2] = hamming0
            unham1[n1 / 2 - bw1 / 2:n1 / 2 + bw1 / 2] = hamming1
        #-----------------------------------------------

        spec0_profile0 = np.abs(spec0).mean(axis=1)
        maxv0 = 0.95 * np.max(np.abs(spec0_profile0))
        spec0_profile1 = np.abs(spec0).mean(axis=0)
        maxv1 = 0.95 * np.max(np.abs(spec0_profile1))

        # Remove doppler shift and range spectrum shift
        #------------------------------------------------------------------------------------------------------
        corr0 = np.abs(
            py.ifft(
                py.fft(np.abs(spec0_profile0)) *
                np.conj(py.fft(np.abs(unham0)))))
        corr1 = np.abs(
            py.ifft(
                py.fft(np.abs(spec0_profile1)) *
                np.conj(py.fft(np.abs(unham1)))))

        peak0 = np.where(abs(corr0) == np.max(abs(corr0)))
        off0 = n0 - peak0[0]
        peak1 = np.where(abs(corr1) == np.max(abs(corr1)))
        off1 = n1 - peak1[0]

        spec0 = np.roll(spec0, off0, axis=0)
        spec0 = np.roll(spec0, off1, axis=1)

        spec0_profile0 = np.abs(spec0).mean(axis=1)
        maxv0 = 0.95 * np.max(np.abs(spec0_profile0))
        spec0_profile1 = np.abs(spec0).mean(axis=0)
        maxv1 = 0.95 * np.max(np.abs(spec0_profile1))
        #------------------------------------------------------------------------------------------------------

        # Replace Unhamming filter by profile filter
        #------------------------------------------------------------------------------
        if 1:
            unham0 = self.smooth(spec0_profile0 / maxv0, window_len=11)
            unham1 = self.smooth(spec0_profile1 / maxv1, window_len=11)
        #------------------------------------------------------------------------------

        # Show profiles
        #------------------------------------------------
        show_plots = False
        if show_plots:
            plt.plot(spec0_profile0, 'k-', lw=1, color='blue')
            plt.show()

            plt.plot(spec0_profile1, 'k-', lw=1, color='red')
            plt.show()
        #------------------------------------------------

        # Compare profiles to hamming filter
        #----------------------------------------------------------------------------------------------------------------
        if show_plots:
            plt.plot(spec0_profile0, 'k-', lw=1, color='blue')
            plt.plot(self.smooth(spec0_profile0, window_len=21),
                     'k-',
                     lw=1,
                     color='green')
            plt.plot(maxv0 * unham0, 'k--', lw=1, color='red')
            plt.show()

            plt.plot(spec0_profile1, 'k-', lw=1, color='blue')
            plt.plot(self.smooth(spec0_profile1, window_len=21),
                     'k-',
                     lw=1,
                     color='green')
            plt.plot(maxv1 * unham1, 'k--', lw=1, color='red')
            plt.show()

            plt.plot(spec0_profile0[n0 / 2 - bw0 / 2:n0 / 2 + bw0 / 2],
                     'k-',
                     lw=1,
                     color='blue')
            plt.plot(maxv0 * unham0[n0 / 2 - bw0 / 2:n0 / 2 + bw0 / 2],
                     'k-',
                     lw=1,
                     color='red')
            plt.show()

            plt.plot(spec0_profile1[n1 / 2 - bw1 / 2:n1 / 2 + bw1 / 2],
                     'k-',
                     lw=1,
                     color='blue')
            plt.plot(maxv1 * unham1[n1 / 2 - bw1 / 2:n1 / 2 + bw1 / 2],
                     'k-',
                     lw=1,
                     color='red')
            plt.show()

            plt.plot(spec0_profile0[n0 / 2 - bw0 / 2:n0 / 2 + bw0 / 2] /
                     (maxv0 * unham0[n0 / 2 - bw0 / 2:n0 / 2 + bw0 / 2]),
                     'k-',
                     lw=1,
                     color='blue')
            plt.show()

            plt.plot(spec0_profile1[n1 / 2 - bw1 / 2:n1 / 2 + bw1 / 2] /
                     (maxv1 * unham1[n1 / 2 - bw1 / 2:n1 / 2 + bw1 / 2]),
                     'k-',
                     lw=1,
                     color='blue')
            plt.show()
        #----------------------------------------------------------------------------------------------------------------

        # Unhamming
        #------------------------------------------------------------------
        #print "  mean ..."+str(np.mean(abs(spec0)))
        #print "    Unhamming ..."
        for k in range(0, n1):
            spec0[n0 / 2 - bw0 / 2:n0 / 2 + bw0 / 2,
                  k] /= unham0[n0 / 2 - bw0 / 2:n0 / 2 + bw0 / 2]  # range (y)
        for k in range(0, n0):
            spec0[k, n1 / 2 - bw1 / 2:n1 / 2 +
                  bw1 / 2] /= unham1[n1 / 2 - bw1 / 2:n1 / 2 +
                                     bw1 / 2]  # azimuth (x)
        #print "    Unhamming done."
        #print "  mean ..."+str(np.mean(abs(spec0)))
        #------------------------------------------------------------------

        # Show profiles
        #------------------------------------------------
        if show_plots:
            abs_spec0 = np.abs(spec0)
            spec0_profile0 = abs_spec0.sum(axis=1)
            spec0_profile1 = abs_spec0.sum(axis=0)

            plt.plot(spec0_profile0, 'k-', lw=1, color='blue')
            plt.show()

            plt.plot(spec0_profile1, 'k-', lw=1, color='red')
            plt.show()
        #------------------------------------------------

        spec0 = np.roll(-spec0, data.shape[0] / 2, axis=0)
        spec0 = np.roll(-spec0, data.shape[1] / 2, axis=1)

        # Zero padding
        #--------------------------------------------------------------------------------------
        n0 = spec0.shape[0]
        n1 = spec0.shape[1]
        zeros0 = np.zeros((bw0 * (ov0 - 1), n1), float) + 1j * np.zeros(
            (bw0 * (ov0 - 1), n1), float)

        spec1 = np.concatenate(
            (spec0[0:bw0 / 2, :], zeros0, spec0[-bw0 / 2:, :]), axis=0) * ov0

        n0 = spec1.shape[0]
        n1 = spec1.shape[1]
        zeros1 = np.zeros((n0, bw1 * (ov1 - 1)), float) + 1j * np.zeros(
            (n0, bw1 * (ov1 - 1)), float)

        spec2 = np.concatenate(
            (spec1[:, 0:bw1 / 2], zeros1, spec1[:, -bw1 / 2:]), axis=1) * ov1
        #--------------------------------------------------------------------------------------

        # Show zeros padding results
        #--------------------------------------------------------------------------------
        '''
        plt.imshow(np.abs(spec0), origin='lower', interpolation='none', cmap=plt.cm.BuGn)
        plt.show()
        
        plt.imshow(np.abs(spec1), origin='lower', interpolation='none', cmap=plt.cm.BuGn)
        plt.show()
        
        plt.imshow(np.abs(spec2), origin='lower', interpolation='none', cmap=plt.cm.BuGn)
        plt.show()
        '''
        #--------------------------------------------------------------------------------

        data = py.ifft2(spec2)

        print("  Applying UNWEIGHT with ov=[" + str(ov[0]) + "," + str(ov[1]) +
              "] done. ")

        return data
def blur(x, h):
    return np.real(ifft2(fft2(x) * fft2(h)))
Exemple #32
0
def track(input_video_path, show_tracking):
    """
    注意:以 f 结尾的变量表示频率域
    """

    # 目标周围的额外区域
    padding = 1.0
    # 空间带宽,与目标成比例
    output_sigma_factor = 1 / float(16)
    # 高斯核带宽
    sigma = 0.2
    # 正则化系数
    lambda_value = 1e-2
    # 线性插值因子
    interpolation_factor = 0.075
    # 加载视频信息,包括待测试的每帧图片列表,首帧目标矩形框中心点坐标[y,x],矩形框高、宽一半的大小,是否进行图片缩放一半
    # 每帧图片的 ground truth 信息,视频路径
    info = load_video_info.load_video_info(input_video_path)
    img_files, pos, target_sz, should_resize_image, ground_truth, video_path = info

    # 把填充考虑进去,定义为窗口大小。
    sz = pylab.floor(target_sz * (1 + padding))

    # 计算想要的高斯形状的输出,其中带宽正比于目标矩形框大小
    output_sigma = pylab.sqrt(pylab.prod(target_sz)) * output_sigma_factor
    # 平移目标矩形框的高度,以中心点为圆点,得到高度坐标列表
    # 平移目标矩形框的宽度,以中心点为圆点,得到宽度坐标列表
    grid_y = pylab.arange(sz[0]) - pylab.floor(sz[0] / 2)
    grid_x = pylab.arange(sz[1]) - pylab.floor(sz[1] / 2)
    # 把坐标列表边长坐标矩阵,即对二维平面范围内的区域进行网格划分
    rs, cs = pylab.meshgrid(grid_x, grid_y)
    # 论文中公式 (19),计算得到 [0, 1] 值,越靠近中心点值越大,反之越小
    y = pylab.exp((-0.5 / output_sigma ** 2) * (rs ** 2 + cs ** 2))
    # 计算二维离散傅里叶变换
    yf = pylab.fft2(y)

    # 首先计算矩形框高(某一个整数值)的 Hanning 窗(加权的余弦窗),其次计算矩形框宽的 Hanning 窗
    # 最后计算两个向量的外积得到矩形框的余弦窗
    cos_window = pylab.outer(pylab.hanning(sz[0]), pylab.hanning(sz[1]))
    # 计算 FPS
    total_time = 0  # to calculate FPS
    # 计算精度值
    positions = pylab.zeros((len(img_files), 2))  # to calculate precision

    # global z, response
    plot_tracking.z = None
    alphaf = None
    plot_tracking.response = None
    # 依次访问图像从图像名列表中
    for frame, image_filename in enumerate(img_files):
        if (frame % 10) == 0:
            print("Processing frame", frame)
        # 读取图像
        image_path = os.path.join(video_path, image_filename)
        im = pylab.imread(image_path)
        # 如果图像是彩色图像,则转化为灰度图像
        if len(im.shape) == 3 and im.shape[2] > 1:
            im = rgb2gray.rgb2gray(im)
        # 如果需要进行图像缩放,则缩放为原来一半
        if should_resize_image:
            im = np.array(Image.fromarray(im).resize((int(im.shape[0] / 2), int(im.shape[1] / 2))))

        # 开始计时
        start_time = time.time()

        # 提取并预处理子窗口,采用余弦子窗口
        x = get_subwindow.get_subwindow(im, pos, sz, cos_window)

        is_first_frame = (frame == 0)
        # 不过不是第一帧,则计算分类器的响应
        if not is_first_frame:
            # 计算分类器在所有位置上的相应
            k = dense_gauss_kernel.dense_gauss_kernel(sigma, x, plot_tracking.z)
            kf = pylab.fft2(k)
            alphaf_kf = pylab.multiply(alphaf, kf)
            plot_tracking.response = pylab.real(pylab.ifft2(alphaf_kf))  # Eq. 9

            # 最大响应就是目标位置
            r = plot_tracking.response
            row, col = pylab.unravel_index(r.argmax(), r.shape)
            pos = pos - pylab.floor(sz / 2) + [row, col]

            if debug:
                print("Frame ==", frame)
                print("Max response", r.max(), "at", [row, col])
                pylab.figure()
                pylab.imshow(cos_window)
                pylab.title("cos_window")

                pylab.figure()
                pylab.imshow(x)
                pylab.title("x")

                pylab.figure()
                pylab.imshow(plot_tracking.response)
                pylab.title("response")
                pylab.show(block=True)

        # end "if not first frame"

        # 获取目标位置的余弦窗口,用于训练分类器
        x = get_subwindow.get_subwindow(im, pos, sz, cos_window)

        # kernel 最小方差正则化,在傅里叶域计算参数 ALPHA
        k = dense_gauss_kernel.dense_gauss_kernel(sigma, x)
        new_alphaf = pylab.divide(yf, (pylab.fft2(k) + lambda_value))  # Eq. 7
        new_z = x

        if is_first_frame:
            # 对于第一帧,训练单张图片
            alphaf = new_alphaf
            plot_tracking.z = x
        else:
            # 对于后续帧,进行模型参数插值
            f = interpolation_factor
            alphaf = (1 - f) * alphaf + f * new_alphaf
            plot_tracking.z = (1 - f) * plot_tracking.z + f * new_z

        # 保持当前位置,并计算 FPS
        positions[frame, :] = pos
        total_time += time.time() - start_time

        # 可视化显示跟踪的结果
        if show_tracking == "yes":
            plot_tracking.plot_tracking(frame, pos, target_sz, im, ground_truth)

    if should_resize_image:
        positions = positions * 2

    print("Frames-per-second:", len(img_files) / total_time)

    title = os.path.basename(os.path.normpath(input_video_path))

    if len(ground_truth) > 0:
        # 画出精确率图像
        show_precision.show_precision(positions, ground_truth, title)
def track(input_video_path):
    """
    notation: variables ending with f are in the frequency domain.
    """

    # parameters according to the paper --
    padding = 1.0  # extra area surrounding the target
    #spatial bandwidth (proportional to target)
    output_sigma_factor = 1 / float(16)
    sigma = 0.2  # gaussian kernel bandwidth
    lambda_value = 1e-2  # regularization
    # linear interpolation factor for adaptation
    interpolation_factor = 0.075

    info = load_video_info(input_video_path)
    img_files, pos, target_sz, \
        should_resize_image, ground_truth, video_path = info

    # window size, taking padding into account
    sz = pylab.floor(target_sz * (1 + padding))

    # desired output (gaussian shaped), bandwidth proportional to target size
    output_sigma = pylab.sqrt(pylab.prod(target_sz)) * output_sigma_factor

    grid_y = pylab.arange(sz[0]) - pylab.floor(sz[0]/2)
    grid_x = pylab.arange(sz[1]) - pylab.floor(sz[1]/2)
    #[rs, cs] = ndgrid(grid_x, grid_y)
    rs, cs = pylab.meshgrid(grid_x, grid_y)
    y = pylab.exp(-0.5 / output_sigma**2 * (rs**2 + cs**2))
    yf = pylab.fft2(y)
    #print("yf.shape ==", yf.shape)
    #print("y.shape ==", y.shape)

    # store pre-computed cosine window
    cos_window = pylab.outer(pylab.hanning(sz[0]),
                             pylab.hanning(sz[1]))

    total_time = 0  # to calculate FPS
    positions = pylab.zeros((len(img_files), 2))  # to calculate precision

    global z, response
    z = None
    alphaf = None
    response = None

    for frame, image_filename in enumerate(img_files):

        if True and ((frame % 10) == 0):
            print("Processing frame", frame)

        # load image
        image_path = os.path.join(video_path, image_filename)

        im = pylab.imread(image_path)
        if len(im.shape) == 3 and im.shape[2] > 1:
            im = rgb2gray(im)

        #print("Image max/min value==", im.max(), "/", im.min())

        if should_resize_image:
            im = scipy.misc.imresize(im, 0.5)

        start_time = time.time()

        # extract and pre-process subwindow
        x = get_subwindow(im, pos, sz, cos_window)

        if debug:
            pylab.figure()
            pylab.imshow(x)
            pylab.title("sub window")

        is_first_frame = (frame == 0)

        if not is_first_frame:
            # calculate response of the classifier at all locations
            k = dense_gauss_kernel(sigma, x, z)
            kf = pylab.fft2(k)
            alphaf_kf = pylab.multiply(alphaf, kf)
            response = pylab.real(pylab.ifft2(alphaf_kf))  # Eq. 9

            # target location is at the maximum response
            r = response
            row, col = pylab.unravel_index(r.argmax(), r.shape)
            pos = pos - pylab.floor(sz/2) + [row, col]

            if debug:
                print("Frame ==", frame)
                print("Max response", r.max(), "at", [row, col])
                pylab.figure()
                pylab.imshow(cos_window)
                pylab.title("cos_window")

                pylab.figure()
                pylab.imshow(x)
                pylab.title("x")

                pylab.figure()
                pylab.imshow(response)
                pylab.title("response")
                pylab.show(block=True)

        # end "if not first frame"

        # get subwindow at current estimated target position,
        # to train classifer
        x = get_subwindow(im, pos, sz, cos_window)

        # Kernel Regularized Least-Squares,
        # calculate alphas (in Fourier domain)
        k = dense_gauss_kernel(sigma, x)
        new_alphaf = pylab.divide(yf, (pylab.fft2(k) + lambda_value))  # Eq. 7
        new_z = x

        if is_first_frame:
            #first frame, train with a single image
            alphaf = new_alphaf
            z = x
        else:
            # subsequent frames, interpolate model
            f = interpolation_factor
            alphaf = (1 - f) * alphaf + f * new_alphaf
            z = (1 - f) * z + f * new_z
        # end "first frame or not"

        # save position and calculate FPS
        positions[frame, :] = pos
        total_time += time.time() - start_time

        # visualization
        plot_tracking(frame, pos, target_sz, im, ground_truth)
    # end of "for each image in video"

    if should_resize_image:
        positions = positions * 2

    print("Frames-per-second:",  len(img_files) / total_time)

    title = os.path.basename(os.path.normpath(input_video_path))

    if len(ground_truth) > 0:
        # show the precisions plot
        show_precision(positions, ground_truth, video_path, title)

    return
Exemple #34
0
    def unweight2d(self, data, ov, bw):
        
        print("  Applying UNWEIGHT with ov=["+str(ov[0])+","+str(ov[1])+"] ... ")
        
        n0 = data.shape[0]
        n1 = data.shape[1]

        # Percentage -> Pixels, secure having integers
        #------------------------------------------------------------
        bw0 = int(np.floor( (bw[0]*n0/100.) /2) * 2)  # even integer
        bw1 = int(np.floor( (bw[1]*n1/100.) /2) * 2)  # even integer
        
        ov0 = int(np.floor(ov[0]))  
        ov1 = int(np.floor(ov[1]))  
        #------------------------------------------------------------
        
        spec0 = py.fft2(data)
        spec0 = np.roll(spec0,data.shape[0]/2, axis=0)
        spec0 = np.roll(spec0,data.shape[1]/2, axis=1)
        

        # Hamming at processed bandwidth
        #-----------------------------------------------
        if 1:
            t0 = np.arange(bw0)/float(bw0)
            t1 = np.arange(bw1)/float(bw1)
            hamming0 = 0.54-0.46*np.cos(2*np.pi*t0)        
            hamming1 = 0.54-0.46*np.cos(2*np.pi*t1)
        
            unham0 = np.zeros(n0)
            unham1 = np.zeros(n1)
            
            unham0[n0/2-bw0/2:n0/2+bw0/2] = hamming0
            unham1[n1/2-bw1/2:n1/2+bw1/2] = hamming1
        #-----------------------------------------------


        spec0_profile0 = np.abs(spec0).mean(axis=1);  maxv0 = 0.95 * np.max(np.abs(spec0_profile0))
        spec0_profile1 = np.abs(spec0).mean(axis=0);  maxv1 = 0.95 * np.max(np.abs(spec0_profile1))


        # Remove doppler shift and range spectrum shift
        #------------------------------------------------------------------------------------------------------
        corr0 = np.abs( py.ifft( py.fft(np.abs(spec0_profile0)) * np.conj(py.fft(np.abs(unham0))) ))
        corr1 = np.abs( py.ifft( py.fft(np.abs(spec0_profile1)) * np.conj(py.fft(np.abs(unham1))) ))
        
        peak0 = np.where(abs(corr0) == np.max(abs(corr0)));  off0 = n0 - peak0[0]  
        peak1 = np.where(abs(corr1) == np.max(abs(corr1)));  off1 = n1 - peak1[0]  
        
        spec0 = np.roll(spec0, off0, axis=0)
        spec0 = np.roll(spec0, off1, axis=1)
        
        spec0_profile0 = np.abs(spec0).mean(axis=1);  maxv0 = 0.95 * np.max(np.abs(spec0_profile0))
        spec0_profile1 = np.abs(spec0).mean(axis=0);  maxv1 = 0.95 * np.max(np.abs(spec0_profile1))        
        #------------------------------------------------------------------------------------------------------
            
        
        # Replace Unhamming filter by profile filter
        #------------------------------------------------------------------------------
        if 1:
            unham0 = self.smooth(spec0_profile0 / maxv0, window_len=11)
            unham1 = self.smooth(spec0_profile1 / maxv1, window_len=11)
        #------------------------------------------------------------------------------


        # Show profiles
        #------------------------------------------------    
        show_plots = False
        if show_plots:
            plt.plot(spec0_profile0,'k-', lw=1, color='blue')
            plt.show()
            
            plt.plot(spec0_profile1,'k-', lw=1, color='red')
            plt.show()        
        #------------------------------------------------    
        
        
        # Compare profiles to hamming filter
        #----------------------------------------------------------------------------------------------------------------
        if show_plots:
            plt.plot(spec0_profile0,'k-', lw=1, color='blue')
            plt.plot(self.smooth(spec0_profile0,window_len=21),'k-', lw=1, color='green')
            plt.plot(maxv0 * unham0,'k--', lw=1, color='red')
            plt.show()

            plt.plot(spec0_profile1,'k-', lw=1, color='blue')
            plt.plot(self.smooth(spec0_profile1,window_len=21),'k-', lw=1, color='green')
            plt.plot(maxv1 * unham1,'k--', lw=1, color='red')
            plt.show()
            
            plt.plot(spec0_profile0[n0/2-bw0/2:n0/2+bw0/2],'k-', lw=1, color='blue')
            plt.plot(maxv0 * unham0[n0/2-bw0/2:n0/2+bw0/2],'k-', lw=1, color='red')
            plt.show()
            
            plt.plot(spec0_profile1[n1/2-bw1/2:n1/2+bw1/2], 'k-', lw=1, color='blue')
            plt.plot(maxv1 * unham1[n1/2-bw1/2:n1/2+bw1/2], 'k-', lw=1, color='red')
            plt.show()

            plt.plot(spec0_profile0[n0/2-bw0/2:n0/2+bw0/2] / (maxv0 * unham0[n0/2-bw0/2:n0/2+bw0/2]),'k-', lw=1, color='blue')
            plt.show()
            
            plt.plot(spec0_profile1[n1/2-bw1/2:n1/2+bw1/2] / (maxv1 * unham1[n1/2-bw1/2:n1/2+bw1/2]),'k-', lw=1, color='blue')
            plt.show()        
        #----------------------------------------------------------------------------------------------------------------
        

        # Unhamming
        #------------------------------------------------------------------
        #print "  mean ..."+str(np.mean(abs(spec0)))
        #print "    Unhamming ..."
        for k in range(0,n1):
            spec0[n0/2-bw0/2:n0/2+bw0/2,k] /= unham0[n0/2-bw0/2:n0/2+bw0/2]   # range (y)
        for k in range(0,n0):                                       
            spec0[k,n1/2-bw1/2:n1/2+bw1/2] /= unham1[n1/2-bw1/2:n1/2+bw1/2]   # azimuth (x)
        #print "    Unhamming done."    
        #print "  mean ..."+str(np.mean(abs(spec0)))
        #------------------------------------------------------------------
        
        
        # Show profiles
        #------------------------------------------------
        if show_plots:            
            abs_spec0 = np.abs(spec0)
            spec0_profile0 = abs_spec0.sum(axis=1)
            spec0_profile1 = abs_spec0.sum(axis=0)
            
            plt.plot(spec0_profile0,'k-', lw=1, color='blue')
            plt.show()
            
            plt.plot(spec0_profile1,'k-', lw=1, color='red')
            plt.show()        
        #------------------------------------------------


        spec0 = np.roll(-spec0,data.shape[0]/2, axis=0)
        spec0 = np.roll(-spec0,data.shape[1]/2, axis=1)
        
        
        # Zero padding
        #--------------------------------------------------------------------------------------
        n0 = spec0.shape[0]
        n1 = spec0.shape[1]
        zeros0 =  np.zeros((bw0 * (ov0-1),n1),float)  + 1j * np.zeros((bw0 * (ov0-1),n1),float)
        
        spec1 = np.concatenate( (spec0[0:bw0/2,:], zeros0, spec0[-bw0/2:,:]), axis=0)  *  ov0
        
        n0 = spec1.shape[0]
        n1 = spec1.shape[1]
        zeros1 =  np.zeros((n0,bw1 * (ov1-1)),float)  + 1j * np.zeros((n0,bw1 * (ov1-1)),float)
        
        spec2 = np.concatenate( (spec1[:,0:bw1/2], zeros1, spec1[:,-bw1/2:]), axis=1)  *  ov1
        #--------------------------------------------------------------------------------------
        
        
        # Show zeros padding results
        #--------------------------------------------------------------------------------
        '''
        plt.imshow(np.abs(spec0), origin='lower', interpolation='none', cmap=plt.cm.BuGn)
        plt.show()
        
        plt.imshow(np.abs(spec1), origin='lower', interpolation='none', cmap=plt.cm.BuGn)
        plt.show()
        
        plt.imshow(np.abs(spec2), origin='lower', interpolation='none', cmap=plt.cm.BuGn)
        plt.show()
        '''
        #--------------------------------------------------------------------------------
        
        
        data = py.ifft2(spec2)

        print("  Applying UNWEIGHT with ov=["+str(ov[0])+","+str(ov[1])+"] done. ")
        
        return data