Example #1
0
def plotGreeks(S, K, T, r, v, var, fig):
    x = pylab.arange(0.9, 1.1, 0.001)
    putPrices = []
    callPrices = []
    if var == 'Market Price':
        x = pylab.multiply(x, S)
        putPrices = x.copy()
        callPrices = x.copy()
        for i in range(len(x)):
            putPrices[i] = putPrice(x[i], K, T, r, v)
        for i in range(len(x)):
            callPrices[i] = callPrice(x[i], K, T, r, v)
    elif var == 'Strike Price':
        x = pylab.multiply(x, K)
        putPrices = x.copy()
        callPrices = x.copy()
        for i in range(len(x)):
            putPrices[i] = putPrice(S, x[i], T, r, v)
        for i in range(len(x)):
            callPrices[i] = callPrice(S, x[i], T, r, v)
    elif var == 'Risk-free-rate':
        x = pylab.multiply(x, r)
        putPrices = x.copy()
        callPrices = x.copy()
        for i in range(len(x)):
            putPrices[i] = putPrice(S, K, T, x[i], v)
        for i in range(len(x)):
            callPrices[i] = callPrice(S, K, T, x[i], v)
    elif var == 'Volatility':
        x = pylab.multiply(x, v)
        putPrices = x.copy()
        callPrices = x.copy()
        for i in range(len(x)):
            putPrices[i] = putPrice(S, K, T, r, x[i])
        for i in range(len(x)):
            callPrices[i] = callPrice(S, K, T, r, x[i])
    elif var == 'Maturity in Days':
        x = pylab.multiply(x, T)
        putPrices = x.copy()
        callPrices = x.copy()
        for i in range(len(x)):
            putPrices[i] = putPrice(S, K, x[i], r, v)
        for i in range(len(x)):
            callPrices[i] = callPrice(S, K, x[i], r, v)
    else:
        raise NameError('No such variable')
    ax = fig.add_subplot(111)
    titleP1 = 'European Options Simulation \n'
    titleP2 = '$t=' + str(
        T * 12 / 365) + '\,(months),\,\sigma=' + str(v) + ',\,r=' + str(r)
    titleP3 = ',\,S_{0}=' + str(S) + ',\,K=' + str(K) + '$'
    plotTitle = titleP1 + titleP2 + titleP3
    ax.set_title(plotTitle)
    ax.plot(x, putPrices, label='Put Option')
    ax.plot(x, callPrices, label='Call Option')
    ax.set_xlabel(var)
    ax.set_ylabel('Option Prices')
    ax.legend(loc=9)
Example #2
0
def trapezoidArray(f,dx):
    """
    same as trapezoidRule() but return the integral as fn of z
    (mostly in-place operations for optimized memory usage)
    """
    i = pl.add.accumulate(f)
    pl.add(i,-0.5*f,  i)
    pl.add(i,-0.5*f[0],  i)
    pl.multiply(i,dx, i)
    return i
Example #3
0
    def update(self, img):
        img_now = ops.read_image(img)
        if img_now.ndim == 3:
            img_now = ops.rgb2gray(img_now)
        x = ops.get_subwindow(img_now, self.pos, self.sz, self.cos_window)
        # print(x)
        k = ops.dense_gauss_kernel(self.sigma, x, self.z)
        kf = pylab.fft2(k)
        alphaf_kf = pylab.multiply(self.alphaf, kf)
        response = pylab.real(pylab.ifft2(alphaf_kf))  # Eq. 9

        # target location is at the maximum response
        r = response
        row, col = pylab.unravel_index(r.argmax(), r.shape)

        self.pos = self.pos - pylab.floor(self.sz / 2) + [row, col]
        x = ops.get_subwindow(img_now, self.pos, self.sz, self.cos_window)
        k = ops.dense_gauss_kernel(self.sigma, x)

        new_alphaf = pylab.divide(self.yf,
                                  (pylab.fft2(k) + self.lambda_value))  # Eq. 7
        new_z = x
        f = self.interpolation_factor
        self.alphaf = (1 - f) * self.alphaf + f * new_alphaf
        self.z = (1 - f) * self.z + f * new_z

        box_new = np.array([
            self.pos[1] - (self.sz[1]) / 2 + 1, self.pos[0] -
            (self.sz[0]) / 2 + 1, self.sz[1], self.sz[0]
        ],
                           dtype=np.float32)
        return box_new
Example #4
0
def get_subwindow(im, pos, sz, cos_window):
    """
    使用 replication padding 从图像中获得子窗口。子窗口以 [y, x] 为坐标中心,大小为 [height, width].
    如果子窗口超过图像边界,则复制图像的边界像素值。获得的子窗口将使用余弦窗口标准化到 [-0.5, 0.5]
    :param im: 输入图像
    :param pos: 子窗口中心点坐标 [y, x]
    :param sz: 子窗口大小 [height, width]
    :param cos_window: 余弦子窗口矩阵
    :return: 返回经过余弦子窗口截取的图像矩形框部分
    """
    # 如果不是高、宽组成的数组,而是一个一维数值,则转化为一个数组
    # 目标是子窗矩形化
    if pylab.isscalar(sz):  # square sub-window
        sz = [sz, sz]
    # 以 pos 为中心,以 sz 为窗口大小建立子窗
    ys = pylab.floor(pos[0]) + pylab.arange(sz[0], dtype=int) - pylab.floor(
        sz[0] / 2)
    xs = pylab.floor(pos[1]) + pylab.arange(sz[1], dtype=int) - pylab.floor(
        sz[1] / 2)
    ys = ys.astype(int)
    xs = xs.astype(int)
    # 如果子窗超过坐标,则设置为边界值
    ys[ys < 0] = 0
    ys[ys >= im.shape[0]] = im.shape[0] - 1
    xs[xs < 0] = 0
    xs[xs >= im.shape[1]] = im.shape[1] - 1
    # 提取子窗剪切的图像块
    out = im[pylab.ix_(ys, xs)]
    # 将图像像素值从 [0, 1] 平移到 [-0.5, 0.5]
    out = out.astype(pylab.float64) - 0.5
    # 余弦窗口化,论文公式 (18)

    return pylab.multiply(cos_window, out)
Example #5
0
def convolve(f, g):
    ftilda = numpy.fft.fft(f)  #FT of f
    gtilda = numpy.fft.fft(g)  #FT of g
    convolution = numpy.fft.ifft(
        pl.multiply(ftilda, gtilda)
    )  #Convolution using properties of fourier transforms and convolution
    return pl.divide(convolution, len(ftilda)) * T
def dense_gauss_kernel(sigma, x, y=None):
    xf = pylab.fft2(x)  # x in Fourier domain
    x_flat = x.flatten()
    xx = pylab.dot(x_flat.transpose(), x_flat)  # squared norm of x

    if y is not None:
        yf = pylab.fft2(y)
        y_flat = y.flatten()
        yy = pylab.dot(y_flat.transpose(), y_flat)
    else:
        yf = xf
        yy = xx

    xyf = pylab.multiply(xf, pylab.conj(yf))

    xyf_ifft = pylab.ifft2(xyf)
    row_shift, col_shift = pylab.floor(pylab.array(x.shape) / 2).astype(int)
    xy_complex = pylab.roll(xyf_ifft, row_shift, axis=0)
    xy_complex = pylab.roll(xy_complex, col_shift, axis=1)
    xy = pylab.real(xy_complex)

    scaling = -1 / (sigma**2)
    xx_yy = xx + yy
    xx_yy_2xy = xx_yy - 2 * xy
    k = pylab.exp(scaling * pylab.maximum(0, xx_yy_2xy / x.size))

    return k
Example #7
0
    def estimate_haversine(self):
        if self.verbose:
            print("Estimating Spatial locality with Haversine distance .")
        self.haversine = DistanceMetric.get_metric("haversine")
        # prepare features  Coordinates
        longi, lat = pl.deg2rad(
            hp.pix2ang(
                nside=self._nside,
                ipix=pl.arange(hp.nside2npix(self._nside)),
                lonlat=True,
            ))
        mask = pl.ma.masked_inside(longi, 0, pl.pi).mask
        longi[mask] = -longi[mask]
        longi[pl.logical_not(mask)] = 2 * pl.pi - longi[pl.logical_not(mask)]
        Theta = pl.array([lat[self.galactic_mask],
                          longi[self.galactic_mask]]).T

        angdist_matr = self.haversine.pairwise(Theta)
        angdist_matr = pl.ma.fix_invalid(angdist_matr, fill_value=pl.pi).data

        # all the distances are equally weighted and so far range in 0,1

        weight_eucl = 1.0 / self._distance_matr.max()
        weight_hav = 1.0 / angdist_matr.max()

        self._distance_matr = pl.multiply(weight_eucl * self._distance_matr,
                                          weight_hav * angdist_matr)

        self._X = pl.concatenate([self._X, Theta], axis=1)
        pass
def apply_cos_window(channels):
    global cos_window

    if cos_window is None:
        cos_window = pylab.outer(pylab.hanning(channels.shape[1]),
                                 pylab.hanning(channels.shape[2]))

    return pylab.multiply(channels[:] - 0.5, cos_window)
Example #9
0
def plotGreeks(S, K, T, r, v, var, fig):
    x = pylab.arange(0.9,1.1,0.001)
    putPrices = []
    callPrices = []
    if var == 'Market Price': 
        x = pylab.multiply(x,S)
        putPrices = x.copy()
        callPrices = x.copy()
        for i in range(len(x)): putPrices[i] = putPrice(x[i], K, T, r, v)
        for i in range(len(x)): callPrices[i] = callPrice(x[i], K, T, r, v)
    elif var == 'Strike Price' : 
        x = pylab.multiply(x,K)
        putPrices = x.copy()
        callPrices = x.copy()
        for i in range(len(x)): putPrices[i] = putPrice(S, x[i], T, r, v)
        for i in range(len(x)): callPrices[i] = callPrice(S, x[i], T, r, v)        
    elif var == 'Risk-free-rate' : 
        x = pylab.multiply(x,r)
        putPrices = x.copy()
        callPrices = x.copy()
        for i in range(len(x)): putPrices[i] = putPrice(S, K, T, x[i], v)
        for i in range(len(x)): callPrices[i] = callPrice(S, K, T, x[i], v)        
    elif var == 'Volatility' : 
        x = pylab.multiply(x,v)
        putPrices = x.copy()
        callPrices = x.copy()
        for i in range(len(x)): putPrices[i] = putPrice(S, K, T, r, x[i])
        for i in range(len(x)): callPrices[i] = callPrice(S, K, T, r, x[i])
    elif var == 'Maturity in Days' : 
        x = pylab.multiply(x,T)
        putPrices = x.copy()
        callPrices = x.copy()
        for i in range(len(x)): putPrices[i] = putPrice(S, K, x[i], r, v)
        for i in range(len(x)): callPrices[i] = callPrice(S, K, x[i], r, v)
    else : raise NameError('No such variable')
    ax = fig.add_subplot(111)
    titleP1 = 'European Options Simulation \n'
    titleP2 = '$t='+str(T*12/365)+'\,(months),\,\sigma='+str(v)+',\,r='+str(r)
    titleP3 = ',\,S_{0}='+str(S)+',\,K='+str(K)+'$'
    plotTitle = titleP1+titleP2+titleP3 
    ax.set_title(plotTitle)
    ax.plot(x, putPrices, label = 'Put Option')
    ax.plot(x, callPrices, label = 'Call Option')
    ax.set_xlabel(var)
    ax.set_ylabel('Option Prices')
    ax.legend(loc=9)
Example #10
0
def beep(freq_phase_amp, L):
    '''Additive synthesis of sinewaves.

    freq_phase_amp -- a numpy array with a row for each sinewave, and
    frequency, phase and amplitude in each column.
    L -- length in samples.
    '''
    res = pl.zeros(L)
    ii = pl.arange(L)
    tmp = pl.empty(L)
    for f, p, a in freq_phase_amp:
        pl.multiply(ii, f * tau / sr, tmp)
        pl.add(tmp, p, tmp)
        pl.sin(tmp, tmp)
        pl.multiply(tmp, a, tmp)
        pl.add(res, tmp, res)

    return res
Example #11
0
 def update(self, data):
   blue=pl.less(data,0.) # Fill in True where less than 0.0
   red=~blue # Reverse of the above
   #Blue
   self.image[...,2][blue]=pl.minimum(pl.absolute(pl.divide(data[blue],255.)),1.)
   #Red -- Max 40C, so we increase the intensity of the red color 6 times
   self.image[...,0][red]=pl.minimum(1.,pl.divide(pl.multiply(data[red],6.),255.))
   pl.imshow(self.image)
   pl.draw()
Example #12
0
def calculateProbability(name, dependencies, weights=0):
    #given that no specific weights are specified
    if weights == 0:
        weights = [1] * len(dependencies)
    assert len(weights) == len(dependencies)
    #get the current values of the dependencies
    return mc.Lambda(name,
                     lambda dependencies=dependencies, weights=weights:
                     (pK - pM) * sum(pl.multiply(dependencies, weights)) / sum(
                         weights) + pM)
Example #13
0
    def get_subwindow(self, im, pos, sz):
        """
        Obtain sub-window from image, with replication-padding.
        Returns sub-window of image IM centered at POS ([y, x] coordinates),
        with size SZ ([height, width]). If any pixels are outside of the image,
        they will replicate the values at the borders.

        The subwindow is also normalized to range -0.5 .. 0.5, and the given
        cosine window COS_WINDOW is applied
        (though this part could be omitted to make the function more general).
        """

        if pylab.isscalar(sz):  # square sub-window
            sz = [sz, sz]

        ys = pylab.floor(pos[0]) \
            + pylab.arange(sz[0], dtype=int) - pylab.floor(sz[0]/2)
        xs = pylab.floor(pos[1]) \
            + pylab.arange(sz[1], dtype=int) - pylab.floor(sz[1]/2)

        ys = ys.astype(int)
        xs = xs.astype(int)

        # check for out-of-bounds coordinates,
        # and set them to the values at the borders
        ys[ys < 0] = 0
        ys[ys >= im.shape[0]] = im.shape[0] - 1

        xs[xs < 0] = 0
        xs[xs >= im.shape[1]] = im.shape[1] - 1
        #zs = range(im.shape[2])

        # extract image
        #out = im[pylab.ix_(ys, xs, zs)]
        out = im[pylab.ix_(ys, xs)]

        out = cv2.resize(out, dsize = (int(self.window_sz[1]), int(self.window_sz[0])), fx=0, fy=0)
        #cos_window = pylab.outer(pylab.hanning(sz[0]), pylab.hanning(sz[1]))

        if debug:
            print("Out max/min value==", out.max(), "/", out.min())
            pylab.figure()
            pylab.imshow(out, cmap=pylab.cm.gray)
            pylab.title("cropped subwindow")

        #pre-process window --
        # normalize to range -0.5 .. 0.5
        # pixels are already in range 0 to 1
        out = out.astype(pylab.float64) - 0.5

        # apply cosine window
        #out = pylab.multiply(cos_window, out)
        out = pylab.multiply(self.cos_window, out)

        return out
Example #14
0
    def update_ret_response(self, new_img):
        '''
        :param new_img: new frame should be normalized, for tracker_status estimating the rect_snr
        :return:
        '''
        self.canvas = new_img.copy()
        self.trackNo += 1

        # get subwindow at current estimated target position, to train classifier
        x = self.get_subwindow(new_img, self.pos, self.window_sz,
                               self.cos_window)
        # calculate response of the classifier at all locations
        k = self.dense_gauss_kernel(self.sigma, x, self.z)
        kf = pylab.fft2(k)
        alphaf_kf = pylab.multiply(self.alphaf, kf)
        response = pylab.real(pylab.ifft2(alphaf_kf))  # Eq. 9

        # target location is at the maximum response
        row, col = pylab.unravel_index(response.argmax(), response.shape)
        # roi rect's topleft point add [row, col]
        self.tly, self.tlx = self.pos - pylab.floor(self.window_sz / 2)

        # here the pos is not given to self.pos at once, we need to check the psr first.
        # if it above the threashhold(default is 5), self.pos = pos.
        pos = np.array([self.tly, self.tlx]) + np.array([row, col])

        # Noting, for pos(cy,cx)! for cv2.rect rect(x,y,w,h)!
        rect = pylab.array([
            pos[1] - self.target_sz[1] / 2, pos[0] - self.target_sz[0] / 2,
            self.target_sz[1], self.target_sz[0]
        ])
        rect = rect.astype(np.int)

        self.psr, self.trkStatus = self.tracker_status(col, row, response,
                                                       rect, new_img)
        self.pos = pos
        #only update when tracker_status's psr is high
        if (self.psr > 10):
            #computing new_alphaf and observed x as z
            x = self.get_subwindow(new_img, self.pos, self.window_sz,
                                   self.cos_window)
            # Kernel Regularized Least-Squares, calculate alphas (in Fourier domain)
            k = self.dense_gauss_kernel(self.sigma, x)
            new_alphaf = pylab.divide(
                self.yf, (pylab.fft2(k) + self.lambda_value))  # Eq. 7
            new_z = x

            # subsequent frames, interpolate model
            f = self.interpolation_factor
            self.alphaf = (1 - f) * self.alphaf + f * new_alphaf
            self.z = (1 - f) * self.z + f * new_z
        ok = 1
        return ok, rect, self.psr, response
def get_subwindow(im, pos, sz, cos_window):
    """
    Obtain sub-window from image, with replication-padding.
    Returns sub-window of image IM centered at POS ([y, x] coordinates),
    with size SZ ([height, width]). If any pixels are outside of the image,
    they will replicate the values at the borders.

    The subwindow is also normalized to range -0.5 .. 0.5, and the given
    cosine window COS_WINDOW is applied
    (though this part could be omitted to make the function more general).
    """

    if pylab.isscalar(sz):  # square sub-window
        sz = [sz, sz]

    ys = pylab.floor(pos[0]) \
        + pylab.arange(sz[0], dtype=int) - pylab.floor(sz[0]/2)
    xs = pylab.floor(pos[1]) \
        + pylab.arange(sz[1], dtype=int) - pylab.floor(sz[1]/2)

    ys = ys.astype(int)
    xs = xs.astype(int)

    # check for out-of-bounds coordinates,
    # and set them to the values at the borders
    ys[ys < 0] = 0
    ys[ys >= im.shape[0]] = im.shape[0] - 1

    xs[xs < 0] = 0
    xs[xs >= im.shape[1]] = im.shape[1] - 1
    #zs = range(im.shape[2])

    # extract image
    #out = im[pylab.ix_(ys, xs, zs)]
    out = im[pylab.ix_(ys, xs)]

    if debug:
        print("Out max/min value==", out.max(), "/", out.min())
        pylab.figure()
        pylab.imshow(out, cmap=pylab.cm.gray)
        pylab.title("cropped subwindow")

    #pre-process window --
    # normalize to range -0.5 .. 0.5
    # pixels are already in range 0 to 1
    out = out.astype(pylab.float64) / 255 - 0.5

    # apply cosine window
    out = pylab.multiply(cos_window, out)

    return out
def dense_gauss_kernel(sigma, x, y=None):
    """
    Gaussian Kernel with dense sampling.
    Evaluates a gaussian kernel with bandwidth SIGMA for all displacements
    between input images X and Y, which must both be MxN. They must also
    be periodic (ie., pre-processed with a cosine window). The result is
    an MxN map of responses.

    If X and Y are the same, ommit the third parameter to re-use some
    values, which is faster.
    """

    xf = pylab.fft2(x)  # x in Fourier domain
    x_flat = x.flatten()
    xx = pylab.dot(x_flat.transpose(), x_flat)  # squared norm of x

    if y is not None:
        # general case, x and y are different
        yf = pylab.fft2(y)
        y_flat = y.flatten()
        yy = pylab.dot(y_flat.transpose(), y_flat)
    else:
        # auto-correlation of x, avoid repeating a few operations
        yf = xf
        yy = xx

    # cross-correlation term in Fourier domain
    xyf = pylab.multiply(xf, pylab.conj(yf))

    # to spatial domain
    xyf_ifft = pylab.ifft2(xyf)
    #xy_complex = circshift(xyf_ifft, floor(x.shape/2))
    row_shift, col_shift = pylab.floor(pylab.array(x.shape)/2).astype(int)
    xy_complex = pylab.roll(xyf_ifft, row_shift, axis=0)
    xy_complex = pylab.roll(xy_complex, col_shift, axis=1)
    xy = pylab.real(xy_complex)

    # calculate gaussian response for all positions
    scaling = -1 / (sigma**2)
    xx_yy = xx + yy
    xx_yy_2xy = xx_yy - 2 * xy
    k = pylab.exp(scaling * pylab.maximum(0, xx_yy_2xy / x.size))

    #print("dense_gauss_kernel x.shape ==", x.shape)
    #print("dense_gauss_kernel k.shape ==", k.shape)

    return k
Example #17
0
def dense_gauss_kernel(sigma, x, y=None):
    """
    Gaussian Kernel with dense sampling.
    Evaluates a gaussian kernel with bandwidth SIGMA for all displacements
    between input images X and Y, which must both be MxN. They must also
    be periodic (ie., pre-processed with a cosine window). The result is
    an MxN map of responses.

    If X and Y are the same, omit the third parameter to re-use some
    values, which is faster.
    """

    xf = pylab.fft2(x)  # x in Fourier domain
    x_flat = x.flatten()
    xx = pylab.dot(x_flat.transpose(), x_flat)  # squared norm of x

    if y is not None:
        # general case, x and y are different
        yf = pylab.fft2(y)
        y_flat = y.flatten()
        yy = pylab.dot(y_flat.transpose(), y_flat)
    else:
        # auto-correlation of x, avoid repeating a few operations
        yf = xf
        yy = xx

    # cross-correlation term in Fourier domain
    xyf = pylab.multiply(xf, pylab.conj(yf))

    # to spatial domain
    xyf_ifft = pylab.ifft2(xyf)
    #xy_complex = circshift(xyf_ifft, floor(x.shape/2))
    row_shift, col_shift = pylab.floor(pylab.array(x.shape) / 2).astype(int)
    xy_complex = pylab.roll(xyf_ifft, row_shift, axis=0)
    xy_complex = pylab.roll(xy_complex, col_shift, axis=1)
    xy = pylab.real(xy_complex)

    # calculate gaussian response for all positions
    scaling = -1 / (sigma**2)
    xx_yy = xx + yy
    xx_yy_2xy = xx_yy - 2 * xy
    k = pylab.exp(scaling * pylab.maximum(0, xx_yy_2xy / x.size))

    #print("dense_gauss_kernel x.shape ==", x.shape)
    #print("dense_gauss_kernel k.shape ==", k.shape)

    return k
Example #18
0
def dense_gauss_kernel(sigma, x, y=None):
    """
    通过高斯核计算余弦子窗口图像块的响应图
    利用带宽是 sigma 的高斯核估计两个图像块 X (MxN) 和 Y (MxN) 的关系。X, Y 是循环的、经余弦窗处理的。输出结果是
    响应图矩阵 MxN. 如果 X = Y, 则函数调用时取消 y,则加快计算。
    该函数对应原文中的公式 (16),以及算法1中的 function k = dgk(x1, x2, sigma)
    :param sigma: 高斯核带宽
    :param x: 余弦子窗口图像块
    :param y: 空或者模板图像块
    :return: 响应图
    """
    # 计算图像块 x 的傅里叶变换
    xf = pylab.fft2(x)  # x in Fourier domain
    # 把图像块 x 拉平
    x_flat = x.flatten()
    # 计算 x 的2范数平方
    xx = pylab.dot(x_flat.transpose(), x_flat)  # squared norm of x

    if y is not None:
        # 一半情况, x 和 y 是不同的,计算 y 的傅里叶变化和2范数平方
        yf = pylab.fft2(y)
        y_flat = y.flatten()
        yy = pylab.dot(y_flat.transpose(), y_flat)
    else:
        # x 的自相关,避免重复计算
        yf = xf
        yy = xx

    # 傅里叶域的互相关计算,逐元素相乘
    xyf = pylab.multiply(xf, pylab.conj(yf))

    # 转化为频率域
    xyf_ifft = pylab.ifft2(xyf)
    # 对频率域里的矩阵块进行滚动平移,分别沿 row 和 col 轴
    row_shift, col_shift = pylab.floor(pylab.array(x.shape) / 2).astype(int)
    xy_complex = pylab.roll(xyf_ifft, row_shift, axis=0)
    xy_complex = pylab.roll(xy_complex, col_shift, axis=1)
    xy = pylab.real(xy_complex)

    # 计算高斯核响应图
    scaling = -1 / (sigma**2)
    xx_yy = xx + yy
    xx_yy_2xy = xx_yy - 2 * xy

    return pylab.exp(scaling * pylab.maximum(0, xx_yy_2xy / x.size))
Example #19
0
    def minimize_partition_measures(self):
        self.Vu = pl.zeros_like(self.Kvals).reshape(-1, 1) * 1.0
        self.Vo = pl.zeros_like(self.Kvals).reshape(-1, 1) * 1.0
        nvals = len(self.Kvals)
        for j, K in enumerate(self.Kvals):
            if self.verbose:
                print(f"Running with K={K} clusters")
            clusters = AgglomerativeClustering(
                n_clusters=K,
                affinity="precomputed",
                linkage="average",
                connectivity=self.connectivity,
            )
            clusters.fit_predict(self._Affinity)
            mu, MD = self.intracluster_distance(K, clusters.labels_)
            dmu = self._metric.pairwise(mu[:, :self._nfeat])
            dmu = pl.ma.fix_invalid(dmu, fill_value=1.0).data
            if self._has_angles:
                dmuang = self.haversine.pairwise(mu[:, self._nfeat:])
                # dmuang =pl.ma.fix_invalid(dmuang,  fill_value=pl.pi).data

                dmu = pl.multiply(dmu, dmuang)

            pl.fill_diagonal(dmu, pl.inf)
            self.Vo[j] = K / dmu.min()  # overpartition meas.
            self.Vu[j] = MD.sum() / K  # underpartition meas.

        # We have to match  Vo and Vu, we rescale Vo so that it ranges as Vu
        min_max_scaler = preprocessing.MinMaxScaler(
            feature_range=(self.Vu.min(), self.Vu.max()))

        self.Vu = min_max_scaler.fit_transform((self.Vu)).reshape(nvals)
        self.Vo = min_max_scaler.fit_transform((self.Vo)).reshape(nvals)
        # minimizing the squared sum

        Vsv = interp1d(self.Kvals,
                       pl.sqrt(self.Vu**2 + self.Vo**2).T,
                       kind="slinear")

        Krange = pl.arange(self.Kvals.min(), self.Kvals.max())
        minval = pl.argmin(Vsv(Krange) - Vsv(Krange).min())

        Kopt = Krange[minval]
        return Kopt
Example #20
0
    def find(self, image):
        if self.should_resize_image:
            self.image = scipy.misc.imresize(image, 0.5)
            self.image = self.image / 255.0  # hack around scipy
        else:
            self.image = image

        # get subwindow at current estimated target position,
        # to train classifer
        x = get_subwindow(self.image, self.pos, self.sz, self.cos_window)

        # calculate response of the classifier at all locations
        k = dense_gauss_kernel(self.sigma, x, self.z)
        kf = pylab.fft2(k)
        alphaf_kf = pylab.multiply(self.alphaf, kf)
        self.response = pylab.real(pylab.ifft2(alphaf_kf))  # Eq. 9

        # target location is at the maximum response
        r = self.response
        self.row, self.col = pylab.unravel_index(r.argmax(), r.shape)
        self.pos = self.pos - pylab.floor(self.sz / 2) + [self.row, self.col]

        return self.pos
Example #21
0
    def intracluster_distance(self, K, labels):

        mu = pl.zeros(K * self._X.shape[1]).reshape(K, self._X.shape[1])
        MD = pl.zeros(K)
        for k in range(K):
            ck = pl.where(labels == k)[0]
            Xk = self._X[ck, :]
            mu[k] = Xk.mean(axis=0)

            Nk = len(ck)
            E = self._metric.pairwise(X=Xk[:, :self._nfeat],
                                      Y=mu[k, :self._nfeat].reshape(
                                          -1, self._nfeat))

            E = pl.ma.fix_invalid(E, fill_value=1.0).data
            if self._has_angles and Nk > 1:
                H = self.haversine.pairwise(X=Xk[:, self._nfeat:],
                                            Y=mu[k,
                                                 self._nfeat:].reshape(-1, 2))
                # H =pl.ma.fix_invalid(H,  fill_value=pl.pi).data
                E = pl.multiply(E, H)
            MD[k] = E.sum() / Nk
        return mu, MD
    def find(self, image):
        if len(image.shape) == 3 and image.shape[2] > 1:
            image = rgb2gray(image)
        self.image = image
        if self.should_resize_image:
            self.image = scipy.misc.imresize(self.image, 0.5)
            self.image = self.image / 255.0

        # get subwindow at current estimated target position,
        # to train classifer
        x = get_subwindow(self.image, self.pos, self.sz, self.cos_window)

        # calculate response of the classifier at all locations
        k = dense_gauss_kernel(self.sigma, x, self.z)
        kf = pylab.fft2(k)
        alphaf_kf = pylab.multiply(self.alphaf, kf)
        self.response = pylab.real(pylab.ifft2(alphaf_kf))  # Eq. 9

        # target location is at the maximum response
        r = self.response
        self.row, self.col = pylab.unravel_index(r.argmax(), r.shape)
        self.pos = self.pos - pylab.floor(self.sz/2) + [self.row, self.col]

        return self.pos
Example #23
0
    def update(self, new_img):
        '''
        :param new_img: new frame should be normalized, for tracker_status estimating the rect_snr
        :return:
        '''
        self.canvas = new_img.copy()
        self.trackNo += 1

        # get subwindow at current estimated target position, to train classifier
        x = self.get_subwindow(new_img, self.pos, self.window_sz,
                               self.cos_window)
        # calculate response of the classifier at all locations
        k = self.dense_gauss_kernel(self.sigma, x, self.z)
        kf = pylab.fft2(k)
        alphaf_kf = pylab.multiply(self.alphaf, kf)
        response = pylab.real(pylab.ifft2(alphaf_kf))  # Eq. 9
        self.response = response
        self.responsePeak = np.max(response)
        # target location is at the maximum response
        row, col = pylab.unravel_index(response.argmax(), response.shape)
        #roi rect's topleft point add [row, col]
        self.tly, self.tlx = self.pos - pylab.floor(self.window_sz / 2)

        #here the pos is not given to self.pos at once, we need to check the psr first.
        #if it above the threashhold(default is 5), self.pos = pos.
        pos = np.array([self.tly, self.tlx]) + np.array([row, col])

        #Noting, for pos(cy,cx)! for cv2.rect rect(x,y,w,h)!
        rect = pylab.array([
            pos[1] - self.target_sz[1] / 2, pos[0] - self.target_sz[0] / 2,
            self.target_sz[1], self.target_sz[0]
        ])
        rect = rect.astype(np.int)
        self.rect = rect
        self.psr, self.trkStatus = self.tracker_status(col, row, response,
                                                       rect, new_img)
        self.pos = pos

        # #bad quality tracking results
        # if self.psr <= 5  and self.trackNo >=5:
        #     # computing offset based on the last 4 frame's obj_bbox'center.
        #     # using the average center shift as the (offset_x, offset_y)
        #     dif_rect = []
        #     #for iter in [-1, -2, -3]:
        #     for iter in [-1,-2,-3 ]:
        #         dif_rect.append(np.array(self.FourRecentRects[iter]) - np.array(self.FourRecentRects[iter - 1]))
        #     offset_rect = np.mean(dif_rect, 0)
        #     offset = (offset_rect[0] + offset_rect[2] / 2, offset_rect[1] + offset_rect[3] / 2)
        #     print('Tracker offset is activited (%d, %d)' % (offset[0], offset[1]))
        #     self.pos = self.pos + np.array([ offset[1], offset[0] ])
        #     # rect = pylab.array([self.pos[1] - self.target_sz[1] / 2, self.pos[0] - self.target_sz[0] / 2, self.target_sz[1], self.target_sz[0]])
        #     # rect = rect.astype(np.int)
        #     # self.FourRecentRects[self.trackNo % 4] = rect
        # else:
        #     self.pos = pos
        #     self.FourRecentRects[self.trackNo % 4] = rect

        #if self.psr <= 5:
        #     # computing offset based on the last 4 frame's obj_bbox'center.
        #     # using the average center shift as the (offset_x, offset_y)
        #
        #     self.pos = self.pos + self.posOffset
        #     print self
        #     print('Tracker Default Offset is activited (%d, %d)' % (self.posOffset[1], self.posOffset[0]))

        #
        #     # rect = pylab.array([self.pos[1] - self.target_sz[1] / 2, self.pos[0] - self.target_sz[0] / 2, self.target_sz[1], self.target_sz[0]])
        #     # rect = rect.astype(np.int)
        #     # self.FourRecentRects[self.trackNo % 4] = rect
        #else:
        #     self.pos = pos
        #     self.FourRecentRects[self.trackNo % 4] = rect
        #     if self.trackNo >= 5:
        #         dif_rect = []
        #         # for iter in [-1, -2, -3]:
        #         for iter in [-1, -2, -3]:
        #             dif_rect.append(np.array(self.FourRecentRects[iter]) - np.array(self.FourRecentRects[iter - 1]))
        #         offset_rect = np.mean(dif_rect, 0)
        #         offset = (offset_rect[0] + offset_rect[2] / 2, offset_rect[1] + offset_rect[3] / 2)
        #         self.posOffset =  np.array([offset[1], offset[0]])

        #print ('tracker\'status:res_win_ave,max,psr, rect_snr', self.trkStatus)
        # if debug == True:
        #     if self.trackNo == 1:
        #         #pylab.ion()  # interactive mode on
        #         self.fig, self.axes = pylab.subplots(ncols=3)
        #         self.fig.show()
        #         # We need to draw the canvas before we start animating...
        #         self.fig.canvas.draw()
        #
        #         k_img = self.axes[0].imshow(k,animated=True)
        #         x_img = self.axes[1].imshow(x,animated=True)
        #         r_img = self.axes[2].imshow(response,animated=True)
        #
        #         self.subimgs = [k_img, x_img, r_img]
        #         # Let's capture the background of the figure
        #         self.backgrounds = [self.fig.canvas.copy_from_bbox(ax.bbox) for ax in self.axes]
        #
        #         pylab.show(block=False)
        #     else:
        #         self.subimgs[0].set_data(k)
        #         self.subimgs[1].set_data(x)
        #         self.subimgs[2].set_data(response)
        #         items = enumerate(zip(self.subimgs, self.axes, self.backgrounds), start=1)
        #         for j, (subimg, ax, background) in items:
        #             self.fig.canvas.restore_region(background)
        #             ax.draw_artist(subimg)
        #             self.fig.canvas.blit(ax.bbox)
        #         pylab.show(block=False)

        #only update when tracker_status's psr is high
        if (self.psr > 10):
            #computing new_alphaf and observed x as z
            x = self.get_subwindow(new_img, self.pos, self.window_sz,
                                   self.cos_window)
            # Kernel Regularized Least-Squares, calculate alphas (in Fourier domain)
            k = self.dense_gauss_kernel(self.sigma, x)
            new_alphaf = pylab.divide(
                self.yf, (pylab.fft2(k) + self.lambda_value))  # Eq. 7
            new_z = x

            # subsequent frames, interpolate model
            f = self.interpolation_factor
            self.alphaf = (1 - f) * self.alphaf + f * new_alphaf
            self.z = (1 - f) * self.z + f * new_z
        ok = 1
        return ok, rect, self.psr, response
def track(input_video_path):
    """
    notation: variables ending with f are in the frequency domain.
    """

    # parameters according to the paper --
    padding = 1.0  # extra area surrounding the target
    # spatial bandwidth (proportional to target)
    output_sigma_factor = 1 / float(16)
    sigma = 0.2  # gaussian kernel bandwidth
    lambda_value = 1e-2  # regularization
    # linear interpolation factor for adaptation
    interpolation_factor = 0.075

    info = load_video_info(input_video_path)
    img_files, pos, target_sz, \
        should_resize_image, ground_truth, video_path = info

    # window size, taking padding into account
    sz = pylab.floor(target_sz * (1 + padding))

    # desired output (gaussian shaped), bandwidth proportional to target size
    output_sigma = pylab.sqrt(pylab.prod(target_sz)) * output_sigma_factor

    grid_y = pylab.arange(sz[0]) - pylab.floor(sz[0] / 2)
    grid_x = pylab.arange(sz[1]) - pylab.floor(sz[1] / 2)
    # [rs, cs] = ndgrid(grid_x, grid_y)
    rs, cs = pylab.meshgrid(grid_x, grid_y)
    y = pylab.exp(-0.5 / output_sigma**2 * (rs**2 + cs**2))
    yf = pylab.fft2(y)
    # print("yf.shape ==", yf.shape)
    # print("y.shape ==", y.shape)

    # store pre-computed cosine window
    cos_window = pylab.outer(pylab.hanning(sz[0]), pylab.hanning(sz[1]))

    total_time = 0  # to calculate FPS
    positions = pylab.zeros((len(img_files), 2))  # to calculate precision

    global z, response
    z = None
    alphaf = None
    response = None

    for frame, image_filename in enumerate(img_files):

        if True and ((frame % 10) == 0):
            print("Processing frame", frame)

        # load image
        image_path = os.path.join(video_path, image_filename)
        im = pylab.imread(image_path)
        if len(im.shape) == 3 and im.shape[2] > 1:
            im = rgb2gray(im)

        # print("Image max/min value==", im.max(), "/", im.min())

        if should_resize_image:
            im = scipy.misc.imresize(im, 0.5)

        start_time = time.time()

        # extract and pre-process subwindow
        x = get_subwindow(im, pos, sz, cos_window)

        is_first_frame = (frame == 0)

        if not is_first_frame:
            # calculate response of the classifier at all locations
            k = dense_gauss_kernel(sigma, x, z)
            kf = pylab.fft2(k)
            alphaf_kf = pylab.multiply(alphaf, kf)
            response = pylab.real(pylab.ifft2(alphaf_kf))  # Eq. 9

            # target location is at the maximum response
            r = response
            row, col = pylab.unravel_index(r.argmax(), r.shape)
            pos = pos - pylab.floor(sz / 2) + [row, col]

            if debug:
                print("Frame ==", frame)
                print("Max response", r.max(), "at", [row, col])
                pylab.figure()
                pylab.imshow(cos_window)
                pylab.title("cos_window")

                pylab.figure()
                pylab.imshow(x)
                pylab.title("x")

                pylab.figure()
                pylab.imshow(response)
                pylab.title("response")
                pylab.show(block=True)

        # end "if not first frame"

        # get subwindow at current estimated target position,
        # to train classifer
        x = get_subwindow(im, pos, sz, cos_window)

        # Kernel Regularized Least-Squares,
        # calculate alphas (in Fourier domain)
        k = dense_gauss_kernel(sigma, x)
        new_alphaf = pylab.divide(yf, (pylab.fft2(k) + lambda_value))  # Eq. 7
        new_z = x

        if is_first_frame:
            # first frame, train with a single image
            alphaf = new_alphaf
            z = x
        else:
            # subsequent frames, interpolate model
            f = interpolation_factor
            alphaf = (1 - f) * alphaf + f * new_alphaf
            z = (1 - f) * z + f * new_z
        # end "first frame or not"

        # save position and calculate FPS
        positions[frame, :] = pos
        total_time += time.time() - start_time

        # visualization
        plot_tracking(frame, pos, target_sz, im, ground_truth)
    # end of "for each image in video"

    if should_resize_image:
        positions = positions * 2

    print("Frames-per-second:", len(img_files) / total_time)

    title = os.path.basename(os.path.normpath(input_video_path))

    if len(ground_truth) > 0:
        # show the precisions plot
        show_precision(positions, ground_truth, video_path, title)

    return
Example #25
0
def calculateProbability(name, dependencies, weights=0):
    #given that no specific weights are specified
    if weights == 0:
        weights = [1]*len(dependencies)
    assert len(weights) == len(dependencies)
    #get the current values of the dependencies
    return mc.Lambda(name, lambda dependencies=dependencies, weights=weights: (pK - pM) * sum(pl.multiply(dependencies,weights))/sum(weights) + pM)
def track(descriptor):
    global options
    desc_channel_count = descriptor.initialize(options.use_gpu)

    roi = loader.track_bounding_box_from_first_frame()
    roi = [
        roi[0] + roi[2] / 2, roi[1] + roi[3] / 2, roi[2], roi[3],
        roi[2] * (1 + kcf_params.padding), roi[3] * (1 + kcf_params.padding)
    ]

    output_sigma = pylab.sqrt(pylab.prod([roi[3], roi[2]
                                          ])) * kcf_params.output_sigma_factor

    avg_count = 0

    global cos_window
    cos_window = None
    template = [None for i in range(desc_channel_count)]
    alpha_f = [None for i in range(desc_channel_count)]
    response = [None for i in range(desc_channel_count)]
    yf = None

    track_time = 0
    full_track_time = time.time()
    while loader.has_next_frame():
        im = loader.next_frame()

        if (loader.frame_number() % 10) == 0:
            print("Processing frame {}".format(loader.frame_number()))

        start_time = time.time()

        is_first_frame = loader.frame_number() == 0

        cropped = get_subwindow(im, roi)
        channels = descriptor.describe(cropped)
        subwindow = apply_cos_window(channels)
        subwindow = crop(subwindow)
        dmv = None

        if is_first_frame:
            grid_y = pylab.arange(subwindow.shape[1]) - pylab.floor(
                subwindow.shape[1] / 2)
            grid_x = pylab.arange(subwindow.shape[2]) - pylab.floor(
                subwindow.shape[2] / 2)

            rs, cs = pylab.meshgrid(grid_x, grid_y)
            y = pylab.exp(-0.5 / output_sigma**2 * (rs**2 + cs**2))
            yf = pylab.fft2(y)
        else:

            for i in range(0, subwindow.shape[0]):
                channel = subwindow[i, :, :]

                # calculate response of the classifier at all locations
                k = dense_gauss_kernel(kcf_params.sigma, channel, template[i])
                kf = pylab.fft2(k)
                alphaf_kf = pylab.multiply(alpha_f[i], kf)
                response[i] = pylab.real(pylab.ifft2(alphaf_kf))  # Eq. 9

                # argmax = response[i].argmax()
                #
                # if response[i].item(argmax) != 0:
                #     tmp = pylab.unravel_index(argmax, response[i].shape)
                #     if value < response[i][tmp[0],tmp[1]]:
                #         avg_x = tmp[1]
                #         avg_y = tmp[0]
                #         avg_count = 1
                #         value = response[i][tmp[0],tmp[1]]
                #         chosen_i = i

            anchor = torch.tensor(channels[:, channels.shape[1] / 2,
                                           channels.shape[2] / 2]).unsqueeze(0)
            points = torch.tensor(response).view(channels.shape[0], -1).t()

            dmv = distance_matrix_vector(anchor,
                                         points).view(channels.shape[1],
                                                      channels.shape[2])

            argmax = np.array(dmv).argmax()
            tmp = pylab.unravel_index(argmax, subwindow.shape[1:])
            moved_by = [
                float(tmp[0]) - float(subwindow.shape[1]) / 2,
                float(tmp[1]) - float(subwindow.shape[2]) / 2
            ]
            roi = descriptor.update_roi(roi, moved_by)

        cropped = get_subwindow(im, roi)
        channels = descriptor.describe(cropped)
        subwindow = apply_cos_window(channels)
        subwindow = crop(subwindow)

        for i in range(0, subwindow.shape[0]):

            channel = subwindow[i, :, :]

            k = dense_gauss_kernel(kcf_params.sigma, channel)
            new_alpha_f = pylab.divide(
                yf, (pylab.fft2(k) + kcf_params.lambda_value))  # Eq. 7
            new_template = channel

            if is_first_frame:
                alpha_f[i] = new_alpha_f
                template[i] = new_template
            else:
                f = kcf_params.interpolation_factor
                alpha_f[i] = (1 - f) * alpha_f[i] + f * new_alpha_f
                template[i] = (1 - f) * template[i] + f * new_template

        track_time += time.time() - start_time

        results.log_tracked(im, roi, False, template[0], dmv)
    # end of "for each image in video"

    results.log_meta("speed.frames_tracked", loader.frame_number())
    results.log_meta("speed.track_no_io_time", str(track_time) + "s")
    results.log_meta("speed.track_no_io_fps",
                     loader.frame_number() / track_time)
    results.log_meta("speed.track_no_init_time",
                     str(time.time() - full_track_time) + "s")

    results.show_precision()

    return
def track(input_video_path):
    """
    notation: variables ending with f are in the frequency domain.
    """

    # parameters according to the paper --
    padding = 1.0  # extra area surrounding the target
    #spatial bandwidth (proportional to target)
    output_sigma_factor = 1 / float(16)
    sigma = 0.2  # gaussian kernel bandwidth
    lambda_value = 1e-2  # regularization
    # linear interpolation factor for adaptation
    interpolation_factor = 0.075

    info = load_video_info(input_video_path)
    img_files, pos, target_sz, \
        should_resize_image, ground_truth, video_path = info

    # window size, taking padding into account
    sz = pylab.floor(target_sz * (1 + padding))

    # desired output (gaussian shaped), bandwidth proportional to target size
    output_sigma = pylab.sqrt(pylab.prod(target_sz)) * output_sigma_factor

    grid_y = pylab.arange(sz[0]) - pylab.floor(sz[0]/2)
    grid_x = pylab.arange(sz[1]) - pylab.floor(sz[1]/2)
    #[rs, cs] = ndgrid(grid_x, grid_y)
    rs, cs = pylab.meshgrid(grid_x, grid_y)
    y = pylab.exp(-0.5 / output_sigma**2 * (rs**2 + cs**2))
    yf = pylab.fft2(y)
    #print("yf.shape ==", yf.shape)
    #print("y.shape ==", y.shape)

    # store pre-computed cosine window
    cos_window = pylab.outer(pylab.hanning(sz[0]),
                             pylab.hanning(sz[1]))

    total_time = 0  # to calculate FPS
    positions = pylab.zeros((len(img_files), 2))  # to calculate precision

    global z, response
    z = None
    alphaf = None
    response = None

    for frame, image_filename in enumerate(img_files):

        if True and ((frame % 10) == 0):
            print("Processing frame", frame)

        # load image
        image_path = os.path.join(video_path, image_filename)

        im = pylab.imread(image_path)
        if len(im.shape) == 3 and im.shape[2] > 1:
            im = rgb2gray(im)

        #print("Image max/min value==", im.max(), "/", im.min())

        if should_resize_image:
            im = scipy.misc.imresize(im, 0.5)

        start_time = time.time()

        # extract and pre-process subwindow
        x = get_subwindow(im, pos, sz, cos_window)

        if debug:
            pylab.figure()
            pylab.imshow(x)
            pylab.title("sub window")

        is_first_frame = (frame == 0)

        if not is_first_frame:
            # calculate response of the classifier at all locations
            k = dense_gauss_kernel(sigma, x, z)
            kf = pylab.fft2(k)
            alphaf_kf = pylab.multiply(alphaf, kf)
            response = pylab.real(pylab.ifft2(alphaf_kf))  # Eq. 9

            # target location is at the maximum response
            r = response
            row, col = pylab.unravel_index(r.argmax(), r.shape)
            pos = pos - pylab.floor(sz/2) + [row, col]

            if debug:
                print("Frame ==", frame)
                print("Max response", r.max(), "at", [row, col])
                pylab.figure()
                pylab.imshow(cos_window)
                pylab.title("cos_window")

                pylab.figure()
                pylab.imshow(x)
                pylab.title("x")

                pylab.figure()
                pylab.imshow(response)
                pylab.title("response")
                pylab.show(block=True)

        # end "if not first frame"

        # get subwindow at current estimated target position,
        # to train classifer
        x = get_subwindow(im, pos, sz, cos_window)

        # Kernel Regularized Least-Squares,
        # calculate alphas (in Fourier domain)
        k = dense_gauss_kernel(sigma, x)
        new_alphaf = pylab.divide(yf, (pylab.fft2(k) + lambda_value))  # Eq. 7
        new_z = x

        if is_first_frame:
            #first frame, train with a single image
            alphaf = new_alphaf
            z = x
        else:
            # subsequent frames, interpolate model
            f = interpolation_factor
            alphaf = (1 - f) * alphaf + f * new_alphaf
            z = (1 - f) * z + f * new_z
        # end "first frame or not"

        # save position and calculate FPS
        positions[frame, :] = pos
        total_time += time.time() - start_time

        # visualization
        plot_tracking(frame, pos, target_sz, im, ground_truth)
    # end of "for each image in video"

    if should_resize_image:
        positions = positions * 2

    print("Frames-per-second:",  len(img_files) / total_time)

    title = os.path.basename(os.path.normpath(input_video_path))

    if len(ground_truth) > 0:
        # show the precisions plot
        show_precision(positions, ground_truth, video_path, title)

    return
Example #28
0
def track(input_video_path, show_tracking):
    """
    注意:以 f 结尾的变量表示频率域
    """

    # 目标周围的额外区域
    padding = 1.0
    # 空间带宽,与目标成比例
    output_sigma_factor = 1 / float(16)
    # 高斯核带宽
    sigma = 0.2
    # 正则化系数
    lambda_value = 1e-2
    # 线性插值因子
    interpolation_factor = 0.075
    # 加载视频信息,包括待测试的每帧图片列表,首帧目标矩形框中心点坐标[y,x],矩形框高、宽一半的大小,是否进行图片缩放一半
    # 每帧图片的 ground truth 信息,视频路径
    info = load_video_info.load_video_info(input_video_path)
    img_files, pos, target_sz, should_resize_image, ground_truth, video_path = info

    # 把填充考虑进去,定义为窗口大小。
    sz = pylab.floor(target_sz * (1 + padding))

    # 计算想要的高斯形状的输出,其中带宽正比于目标矩形框大小
    output_sigma = pylab.sqrt(pylab.prod(target_sz)) * output_sigma_factor
    # 平移目标矩形框的高度,以中心点为圆点,得到高度坐标列表
    # 平移目标矩形框的宽度,以中心点为圆点,得到宽度坐标列表
    grid_y = pylab.arange(sz[0]) - pylab.floor(sz[0] / 2)
    grid_x = pylab.arange(sz[1]) - pylab.floor(sz[1] / 2)
    # 把坐标列表边长坐标矩阵,即对二维平面范围内的区域进行网格划分
    rs, cs = pylab.meshgrid(grid_x, grid_y)
    # 论文中公式 (19),计算得到 [0, 1] 值,越靠近中心点值越大,反之越小
    y = pylab.exp((-0.5 / output_sigma ** 2) * (rs ** 2 + cs ** 2))
    # 计算二维离散傅里叶变换
    yf = pylab.fft2(y)

    # 首先计算矩形框高(某一个整数值)的 Hanning 窗(加权的余弦窗),其次计算矩形框宽的 Hanning 窗
    # 最后计算两个向量的外积得到矩形框的余弦窗
    cos_window = pylab.outer(pylab.hanning(sz[0]), pylab.hanning(sz[1]))
    # 计算 FPS
    total_time = 0  # to calculate FPS
    # 计算精度值
    positions = pylab.zeros((len(img_files), 2))  # to calculate precision

    # global z, response
    plot_tracking.z = None
    alphaf = None
    plot_tracking.response = None
    # 依次访问图像从图像名列表中
    for frame, image_filename in enumerate(img_files):
        if (frame % 10) == 0:
            print("Processing frame", frame)
        # 读取图像
        image_path = os.path.join(video_path, image_filename)
        im = pylab.imread(image_path)
        # 如果图像是彩色图像,则转化为灰度图像
        if len(im.shape) == 3 and im.shape[2] > 1:
            im = rgb2gray.rgb2gray(im)
        # 如果需要进行图像缩放,则缩放为原来一半
        if should_resize_image:
            im = np.array(Image.fromarray(im).resize((int(im.shape[0] / 2), int(im.shape[1] / 2))))

        # 开始计时
        start_time = time.time()

        # 提取并预处理子窗口,采用余弦子窗口
        x = get_subwindow.get_subwindow(im, pos, sz, cos_window)

        is_first_frame = (frame == 0)
        # 不过不是第一帧,则计算分类器的响应
        if not is_first_frame:
            # 计算分类器在所有位置上的相应
            k = dense_gauss_kernel.dense_gauss_kernel(sigma, x, plot_tracking.z)
            kf = pylab.fft2(k)
            alphaf_kf = pylab.multiply(alphaf, kf)
            plot_tracking.response = pylab.real(pylab.ifft2(alphaf_kf))  # Eq. 9

            # 最大响应就是目标位置
            r = plot_tracking.response
            row, col = pylab.unravel_index(r.argmax(), r.shape)
            pos = pos - pylab.floor(sz / 2) + [row, col]

            if debug:
                print("Frame ==", frame)
                print("Max response", r.max(), "at", [row, col])
                pylab.figure()
                pylab.imshow(cos_window)
                pylab.title("cos_window")

                pylab.figure()
                pylab.imshow(x)
                pylab.title("x")

                pylab.figure()
                pylab.imshow(plot_tracking.response)
                pylab.title("response")
                pylab.show(block=True)

        # end "if not first frame"

        # 获取目标位置的余弦窗口,用于训练分类器
        x = get_subwindow.get_subwindow(im, pos, sz, cos_window)

        # kernel 最小方差正则化,在傅里叶域计算参数 ALPHA
        k = dense_gauss_kernel.dense_gauss_kernel(sigma, x)
        new_alphaf = pylab.divide(yf, (pylab.fft2(k) + lambda_value))  # Eq. 7
        new_z = x

        if is_first_frame:
            # 对于第一帧,训练单张图片
            alphaf = new_alphaf
            plot_tracking.z = x
        else:
            # 对于后续帧,进行模型参数插值
            f = interpolation_factor
            alphaf = (1 - f) * alphaf + f * new_alphaf
            plot_tracking.z = (1 - f) * plot_tracking.z + f * new_z

        # 保持当前位置,并计算 FPS
        positions[frame, :] = pos
        total_time += time.time() - start_time

        # 可视化显示跟踪的结果
        if show_tracking == "yes":
            plot_tracking.plot_tracking(frame, pos, target_sz, im, ground_truth)

    if should_resize_image:
        positions = positions * 2

    print("Frames-per-second:", len(img_files) / total_time)

    title = os.path.basename(os.path.normpath(input_video_path))

    if len(ground_truth) > 0:
        # 画出精确率图像
        show_precision.show_precision(positions, ground_truth, title)
Example #29
0
 def dl(self,z):
     d = self.rtc(z)
     M.multiply(d,1+z, d)
     return d
Example #30
0
	def rtssmooth(self,Y):

		''' RTS smoother

		Arguments:
		----------
		Y: list of matrices
			observation vectors
		Returns:
		--------
		xb:list of matrices
			Backward posterior state estimates
		Pb:list of matrices
			Backward posterior covariabce matrices
		xhat:list of matrices
			Forward posterior state estimates
		Phat:list of matrices
			Forward posterior covariabce matrices

		'''

		# initialise
		P=self.model.P0 
		xf=self.model.x0
		# filter quantities
		xfStore =[]
		PfStore=[]

		#calculate the sigma vector weights
		Wm_i,Wc_i=self.sigma_vectors_weights()



		for y in Y:
			#calculate the sigma points matrix
			Chi=self.sigma_vectors(xf,P)
			# update sigma vectors
			Chi_update=pb.matrix(pb.empty_like(Chi))
			for i in range(Chi.shape[1]):
				Chi_update[:,i]=self.model.state_equation(Chi[:,i])	
			#calculate forward prior state estimate
			xf_=pb.sum(pb.multiply(Wm_i,Chi_update),1)
			#perturbation
			Chi_perturbation=Chi_update-xf_
			#weighting
			weighted_Chi_perturbation=pb.multiply(Wc_i,Chi_perturbation)
			#calculate forward prior covariance estimate
			Pf_=Chi_perturbation*weighted_Chi_perturbation.T+self.model.Sigma_e
			#measurement update equation
			Pyy=self.model.C*Pf_*self.model.C.T+self.model.Sigma_varepsilon 
			Pxy=Pf_*self.model.C.T
			K=Pxy*(Pyy.I)
			yhat_=self.model.C*xf_
			#calculate forward posterior state and covariance estimates
			xf=xf_+K*(y-yhat_)
			Pf=(pb.eye(self.model.nx)-K*self.model.C)*Pf_
			#store
			xfStore.append(xf)
			PfStore.append(Pf)

		# initialise the smoother
		T=len(Y)
		xb = [None]*T
		Pb = [None]*T

		xb[-1], Pb[-1] = xfStore[-1], PfStore[-1]

		## smoother
		for t in range(T-2,-1,-1):
			#calculate the sigma points matrix from filterd states
			Chi_smooth=self.sigma_vectors(xfStore[t],PfStore[t]) 
			Chi_smooth_update=pb.matrix(pb.empty_like(Chi))
			for i in range(Chi_smooth.shape[1]):
				Chi_smooth_update[:,i]=self.model.state_equation(Chi_smooth[:,i])
			
			#calculate backward prior state estimate
			xb_=pb.sum(pb.multiply(Wm_i,Chi_smooth_update),1) 
			#perturbation
			Chi_smooth_perturbation=Chi_smooth-xfStore[t] 
			Chi_smooth_update_perturbation=Chi_smooth_update-xb_ 
			#weighting
			weighted_Chi_smooth_perturbation=pb.multiply(Wc_i,Chi_smooth_perturbation) 
			weighted_Chi_smooth_update_perturbation=pb.multiply(Wc_i,Chi_smooth_update_perturbation)
			#calculate backward prior covariance
			Pb_=Chi_smooth_update_perturbation*weighted_Chi_smooth_update_perturbation.T+self.model.Sigma_e
			#calculate cross-covariance matrix
			M=weighted_Chi_smooth_perturbation*Chi_smooth_update_perturbation.T
			#calculate smoother gain
			S=M*Pb_.I
			#calculate backward posterior state and covariance estimates
			xb[t]=xfStore[t]+S*(xb[t+1]-xb_)
			Pb[t]=PfStore[t]+S*(Pb[t+1]-Pb_)*S.T

			
		return xb,Pb,xfStore,PfStore
Example #31
0
    def update(self, new_img):
        self.canvas   = new_img.copy()
        self.trackNo +=1

        res_max = 0.
        for scale_rate in self.scale_ratios:
            template_size = scale_rate * self.window_sz_new
            # get subwindow at current estimated target position, to train classifer
            x = self.get_subwindow(new_img, self.pos_list[-1], template_size)
            # calculate response of the classifier at all locations
            k = self.dense_gauss_kernel(self.sigma, x, self.z)
            kf = pylab.fft2(k)
            alphaf_kf = pylab.multiply(self.alphaf, kf)
            response = pylab.real(pylab.ifft2(alphaf_kf))  # Eq. 9

            # target location is at the maximum response
            r = response
            row, col = pylab.unravel_index(r.argmax(), r.shape)
            if res_max< np.max(r):
               res_row = int(row*scale_rate)
               res_col = int(col*scale_rate)
               self.window_sz_new = template_size
               self.target_sz = self.target_sz*scale_rate
               res_ave, res_max, self.psr = self.response_win_ave_max(response, col, row, winsize=12)
               self.scale_rate = scale_rate


        #roi rect's topleft point add [row, col]
        pos = self.pos_list[-1] - pylab.floor(self.window_sz_new / 2) + [res_row, res_col]

        rect = pylab.array([pos[1] - self.target_sz[1] / 2, pos[0] - self.target_sz[0] / 2, self.target_sz[1], self.target_sz[0]])
        rect = rect.astype(np.int)
        #print (self.target_sz, self.psr, self.scale_rate)
        if debug:
            if self.trackNo == 1:
                #pylab.ion()  # interactive mode on
                self.fig, self.axes = pylab.subplots(ncols=3)
                self.fig.show()
                # We need to draw the canvas before we start animating...
                self.fig.canvas.draw()

                k_img = self.axes[0].imshow(k,animated=True)
                x_img = self.axes[1].imshow(x,animated=True)
                r_img = self.axes[2].imshow(response,animated=True)

                self.subimgs = [k_img, x_img, r_img]
                # Let's capture the background of the figure
                self.backgrounds = [self.fig.canvas.copy_from_bbox(ax.bbox) for ax in self.axes]

                # tracking_rectangle = pylab.Rectangle((0, 0), 0, 0)
                # tracking_rectangle.set_color((1, 0, 0, 0.5))
                # tracking_figure_axes.add_patch(tracking_rectangle)
                #
                # gt_point = pylab.Circle((0, 0), radius=5)
                # gt_point.set_color((0, 0, 1, 0.5))
                # tracking_figure_axes.add_patch(gt_point)
                # tracking_figure_title = tracking_figure.suptitle("")
                pylab.show(block=False)
                #self.fig.show()
            else:
                self.subimgs[0].set_data(k)
                self.subimgs[1].set_data(x)
                self.subimgs[2].set_data(response)
                items = enumerate(zip(self.subimgs, self.axes, self.backgrounds), start=1)
                for j, (subimg, ax, background) in items:
                    self.fig.canvas.restore_region(background)
                    ax.draw_artist(subimg)
                    self.fig.canvas.blit(ax.bbox)
                pylab.show(block=False)

        if self.psr > 10:
            #computing new_alphaf and observed x as z
            x = self.get_subwindow(new_img, pos, self.window_sz_new)

            # Kernel Regularized Least-Squares, calculate alphas (in Fourier domain)
            k = self.dense_gauss_kernel(self.sigma, x)
            new_alphaf = pylab.divide(self.yf, (pylab.fft2(k) + self.lambda_value))  # Eq. 7
            new_z = x

            # subsequent frames, interpolate model
            f = self.interpolation_factor
            self.alphaf = (1 - f) * self.alphaf + f * new_alphaf
            self.z = (1 - f) * self.z + f * new_z


        self.roi_list.append(self.get_imageROI(new_img, rect))
        self.pos_list.append(pos)
        self.rect_list.append(rect)
        ok = 1
        return ok, rect, self.psr