Exemplo n.º 1
0
def plot_phases(in_file, plot_type, plot_log):
    flags = ['histogram','phases']
    plot_flag = 0
    log_flag = 0

    def no_log(x):
        return x

    fig = pylab.figure(1)
    ax = fig.add_subplot(111)

    try:
        img = spimage.sp_image_read(in_file,0)
    except:
        raise IOError("Can't read %s." % in_file)

    values = img.image.reshape(pylab.size(img.image))

    if plot_log:
        log_function = pylab.log
    else:
        log_function = no_log

    if plot_type == PHASES:
        hist = pylab.histogram(pylab.angle(values),bins=500)
        ax.plot((hist[1][:-1]+hist[1][1:])/2.0,log_function(hist[0]))
    elif plot_flag == HISTOGRAM:
        hist = pylab.histogram2d(pylab.real(values),pylab.imag(values),bins=500)
        ax.imshow(log_function(hist[0]),extent=(hist[2][0],hist[2][-1],-hist[1][-1],-hist[1][0]),interpolation='nearest')
    else:
        ax.plot(pylab.real(values),pylab.imag(values),'.')
    return fig
Exemplo n.º 2
0
def plot_phases(in_file, plot_type, plot_log):
    plot_flag = 0

    def no_log(x):
        return x

    fig = pylab.figure(1)
    ax = fig.add_subplot(111)

    try:
        img = spimage.sp_image_read(in_file, 0)
    except IOError:
        raise IOError("Can't read %s." % in_file)

    values = img.image.reshape(pylab.size(img.image))

    if plot_log:
        log_function = pylab.log
    else:
        log_function = no_log

    if plot_type == PHASES:
        hist = pylab.histogram(pylab.angle(values), bins=500)
        ax.plot((hist[1][:-1] + hist[1][1:]) / 2, log_function(hist[0]))
    elif plot_flag == HISTOGRAM:
        hist = pylab.histogram2d(pylab.real(values),
                                 pylab.imag(values),
                                 bins=500)
        ax.imshow(log_function(hist[0]),
                  extent=(hist[2][0], hist[2][-1], -hist[1][-1], -hist[1][0]),
                  interpolation='nearest')
    else:
        ax.plot(pylab.real(values), pylab.imag(values), '.')
    return fig
Exemplo n.º 3
0
def GetData(datafile, N, seed, Nsamples = 0, bonds = [], Z = [], X = [], ZZ = [], beta = 1.0):
    # Either from a pre-generated set
    if (datafile is not None):
        data = np.loadtxt(datafile, dtype="int")
        if data.ndim == 1: (Nsamples, Nclamped) = (1, len(data)); data = [data]
        else:              (Nsamples, Nclamped) = data.shape

        if Nclamped>N:
            print 'Training set vectors exceed the graph size'
            return 0


    # Or generate it from a  Hamiltonian at a given beta with help of ED
    else:
        Nclamped = N
        if Nclamped>N:
            print 'Training set vectors exceed the graph size'
            return 0
        BM = bm.BoltzmannMachine(N, bonds, Z, X, ZZ , beta)
        probTable = np.zeros(2**Nclamped)
        for i in range(2**Nclamped):
            cbits = bitfield(i)                              # convert i to a list of bits
            cbits = [0]*(Nclamped-len(cbits))+cbits.tolist() # keep the list length constant
            BM.setProjector(cbits)
            if i==0: probTable[i] = real(BM.evaluateProjector())
            else:    probTable[i] = probTable[i-1] + real(BM.evaluateProjector())

        rm.seed(seed)
        data = []
        for i in range(Nsamples):
            RN = rm.random()
            index = np.searchsorted(probTable, RN)
            cbits = bitfield(index)
            cbits = [0]*(Nclamped-len(cbits))+cbits.tolist() # keep the list length constant
            data += [cbits]
        del BM

    # Find unique states and count them
    udata = []
    cdata = collections.OrderedDict()
    for i,d in enumerate(data):
        if not(d.tolist() in udata): udata += [d.tolist()]; cdata[repr(d)]  = 1
        else:                                               cdata[repr(d)] += 1
    weights = np.array(cdata.values())/float(Nsamples)
    data    = udata


    return data, weights
Exemplo n.º 4
0
    def update(self, img):
        img_now = ops.read_image(img)
        if img_now.ndim == 3:
            img_now = ops.rgb2gray(img_now)
        x = ops.get_subwindow(img_now, self.pos, self.sz, self.cos_window)
        # print(x)
        k = ops.dense_gauss_kernel(self.sigma, x, self.z)
        kf = pylab.fft2(k)
        alphaf_kf = pylab.multiply(self.alphaf, kf)
        response = pylab.real(pylab.ifft2(alphaf_kf))  # Eq. 9

        # target location is at the maximum response
        r = response
        row, col = pylab.unravel_index(r.argmax(), r.shape)

        self.pos = self.pos - pylab.floor(self.sz / 2) + [row, col]
        x = ops.get_subwindow(img_now, self.pos, self.sz, self.cos_window)
        k = ops.dense_gauss_kernel(self.sigma, x)

        new_alphaf = pylab.divide(self.yf,
                                  (pylab.fft2(k) + self.lambda_value))  # Eq. 7
        new_z = x
        f = self.interpolation_factor
        self.alphaf = (1 - f) * self.alphaf + f * new_alphaf
        self.z = (1 - f) * self.z + f * new_z

        box_new = np.array([
            self.pos[1] - (self.sz[1]) / 2 + 1, self.pos[0] -
            (self.sz[0]) / 2 + 1, self.sz[1], self.sz[0]
        ],
                           dtype=np.float32)
        return box_new
Exemplo n.º 5
0
def chebt1(f):
    #TODO
    """chebyshev transformation, see chebfun"""
    n = len(f)
    oncircle = concatenate((f[-1::-1], f[1:-1]))
    fftcoef = real(fft(oncircle))/(2*n-2)
    return fftcoef[n-1::-1]
def dense_gauss_kernel(sigma, x, y=None):
    xf = pylab.fft2(x)  # x in Fourier domain
    x_flat = x.flatten()
    xx = pylab.dot(x_flat.transpose(), x_flat)  # squared norm of x

    if y is not None:
        yf = pylab.fft2(y)
        y_flat = y.flatten()
        yy = pylab.dot(y_flat.transpose(), y_flat)
    else:
        yf = xf
        yy = xx

    xyf = pylab.multiply(xf, pylab.conj(yf))

    xyf_ifft = pylab.ifft2(xyf)
    row_shift, col_shift = pylab.floor(pylab.array(x.shape) / 2).astype(int)
    xy_complex = pylab.roll(xyf_ifft, row_shift, axis=0)
    xy_complex = pylab.roll(xy_complex, col_shift, axis=1)
    xy = pylab.real(xy_complex)

    scaling = -1 / (sigma**2)
    xx_yy = xx + yy
    xx_yy_2xy = xx_yy - 2 * xy
    k = pylab.exp(scaling * pylab.maximum(0, xx_yy_2xy / x.size))

    return k
 def f( t, *args ):
     for i,arg in enumerate(args): params[ free_params[i] ] = arg
     tshift = params[-1]
     ideal = fmodel( t, *args )
     irf = cspline1d_eval( self.irf_generator, t-tshift, dx=self.irf_dt, x0=self.irf_t0 )
     convoluted = pylab.real(pylab.ifft( pylab.fft(ideal)*pylab.fft(irf) )) # very small imaginary anyway
     return convoluted
Exemplo n.º 8
0
def chebt1(f):
    #TODO
    """chebyshev transformation, see chebfun"""
    n = len(f)
    oncircle = concatenate((f[-1::-1], f[1:-1]))
    fftcoef = real(fft(oncircle)) / (2 * n - 2)
    return fftcoef[n - 1::-1]
Exemplo n.º 9
0
    def _istftm(self, X_hat=None, Phi_hat=None, pvoc=False, usewin=True, resamp=None):
        """
        :: 
            Inverse short-time Fourier transform magnitude. Make a signal from a |STFT| transform.
            Uses phases from self.STFT if Phi_hat is None.

            Inputs:
            X_hat - N/2+1 magnitude STFT [None=abs(self.STFT)]
            Phi_hat - N/2+1 phase STFT   [None=exp(1j*angle(self.STFT))]
            pvoc - whether to use phase vocoder [False]      
            usewin - whether to use overlap-add [False]

            Returns:
             x_hat - estimated signal
        """
        if not self._have_stft:
                return None
        X_hat = P.np.abs(self.STFT) if X_hat is None else P.np.abs(X_hat)
        if pvoc:
            self._pvoc(X_hat, Phi_hat, pvoc)
        else:
            Phi_hat = P.angle(self.STFT) if Phi_hat is None else Phi_hat
            self.X_hat = X_hat *  P.exp( 1j * Phi_hat )
        if usewin:
            self.win = P.hanning(self.nfft) 
            self.win *= 1.0 / ((float(self.nfft)*(self.win**2).sum())/self.nhop)
        else:
            self.win = P.ones(self.nfft)
        if resamp:
            self.win = sig.resample(self.win, int(P.np.round(self.nfft * resamp)))
        fp = self._check_feature_params()
        self.x_hat = self._overlap_add(P.real(self.nfft * P.irfft(self.X_hat.T)), usewin=usewin, resamp=resamp)
        if self.verbosity:
            print "Extracted iSTFTM->self.x_hat"        
        return self.x_hat
Exemplo n.º 10
0
    def mfreqz(self, b,a=1):
        '''
        plotting freqz of filter , like matlab representation.
        :param b: nominator
        :param a: denominator
        default: a = 1
        '''
        from matplotlib import pyplot as plt
        from pylab import unwrap, arctan2, imag, real, log10

        w, h = signal.freqz(b,a)
        h_dB = 20 * log10(abs(h))
        plt.subplot(211)
        plt.plot(w/max(w), h_dB)
        plt.grid()
        plt.ylim(-150, 5)
        plt.ylabel('Magnitude (db)')
        plt.xlabel(r'Normalized Frequency (x$\pi$rad/sample)')
        plt.title(r'Frequency response')
        plt.subplot(212)
        h_Phase = unwrap(arctan2(imag(h),real(h)))
        plt.plot(w/max(w),h_Phase)
        plt.grid()
        plt.ylabel('Phase (radians)')
        plt.xlabel(r'Normalized Frequency (x$\pi$rad/sample)')
        plt.title(r'Phase response')
        plt.subplots_adjust(hspace=0.5)
        plt.show(block=False)
Exemplo n.º 11
0
def ichebt2(c):
    """inverse chebyshev transformation, values of function in Chebyshev
    nodes of the second kind, see chebfun for details"""
    n = len(c)
    oncircle = concatenate(([c[-1]],c[-2:0:-1]/2, c[0:-1]/2));
    v = real(ifft(oncircle));
    f = (n-1)*concatenate(([2*v[0]], v[1:n-1]+v[-1:n-1:-1], [2*v[n-1]] ))
    return f
Exemplo n.º 12
0
def ichebt2(c):
    """inverse chebyshev transformation, values of function in Chebyshev
    nodes of the second kind, see chebfun for details"""
    n = len(c)
    oncircle = concatenate(([c[-1]],c[-2:0:-1]/2, c[0:-1]/2));
    v = real(ifft(oncircle));
    f = (n-1)*concatenate(([2*v[0]], v[1:n-1]+v[-1:n-1:-1], [2*v[n-1]] ))
    return f
Exemplo n.º 13
0
def get_undef_blade():
    blade = {}
    blade["tower"] = py.array(
        [[0.0, 4.15 / 2, 4.15 / 2, -4.15 / 2, -4.15 / 2, 0.0],
         [0.0, 0.0, 115.63, 115.63, 0.0, 0.0]])
    blade["shaft"] = py.array(
        [[
            blade["tower"][0, 1],
            blade["tower"][0, 1] - 7.1 * py.cos(5 * py.pi / 180)
        ],
         [
             blade["tower"][1, 2] + 2.75,
             blade["tower"][1, 2] + 2.75 + abs(7.1) * py.sin(5 * py.pi / 180)
         ]])
    shaft_tan = py.diff(blade["shaft"])
    shaft_tan = shaft_tan[0] + 1j * shaft_tan[1]
    shaft_tan /= abs(shaft_tan)
    shaft_normal = shaft_tan * 1j

    blade["hub_fun"] = lambda r: blade["shaft"][0, -1] + 1j * blade["shaft"][
        1, -1] + r * shaft_normal

    blade["hub"] = py.array(
        [[py.real(blade["hub_fun"](0)),
          py.real(blade["hub_fun"](2.8))],
         [py.imag(blade["hub_fun"](0)),
          py.imag(blade["hub_fun"](2.8))]])
    cone = -2.5 * py.pi / 180  # Cone angle
    blade_normal = (py.cos(cone) + 1j * py.sin(cone)) * shaft_normal
    blade["blade_fun"] = lambda r, R, defl: blade["hub"][0, -1] + 1j * blade[
        "hub"
    ][
        1, -1
    ] + r * blade_normal + r / R * 2.332 * blade_normal / 1j + defl * blade_normal / 1j
    R = 86.366
    blade["blade"] = py.array([[
        py.real(blade["blade_fun"](0, R, 0)),
        py.real(blade["blade_fun"](R, R, 0))
    ],
                               [
                                   py.imag(blade["blade_fun"](0, R, 0)),
                                   py.imag(blade["blade_fun"](R, R, 0))
                               ]])
    #print(py.angle(blade_normal)*180/py.pi,py.angle(shaft_normal)*180/py.pi)
    return (blade)
Exemplo n.º 14
0
def fracdiff(x, d):
    T = len(x)
    np2 = int(2**np.ceil(np.log2(2 * T - 1)))
    k = np.arange(1, T)
    b = (1, ) + tuple(np.cumprod((k - d - 1) / k))
    z = (0, ) * (np2 - T)
    z1 = b + z
    z2 = tuple(x) + z
    dx = pl.ifft(pl.fft(z1) * pl.fft(z2))
    return pl.real(dx[0:T])
Exemplo n.º 15
0
def voiSb(xp, sigma, gamma) :
    z = (xp+1j*gamma)/(sigma*pylab.sqrt(2))
    ff = scipy.special.wofz(z)
    vf = pylab.real(ff)

    #CC = pylab.array(vf)
    #CCNormalised = CC/CC.max()
    #CC = CCNormalised*a

    return pylab.array(vf)
Exemplo n.º 16
0
def chebt2(f):
    """chebyshev transformation, coefficients in expansion using
    Chebyshev polynomials T_n(x), see chebfun for details"""
    n = len(f)
    oncircle = concatenate((f[-1::-1], f[1:-1]))
    fftcoef = real(fft(oncircle)) / (2 * n - 2)
    #print n, len(fftcoef)
    #print fftcoef[n-1:]
    #print fftcoef[n-1:0:-1]
    fftcoef[n - 1:0:-1] += fftcoef[n - 1:]  # z+conj(z)
    return fftcoef[n - 1::-1]
Exemplo n.º 17
0
def FourierDerivative(f):
    """
    this derivatie just works for periodic 2*pi multiple series
    have to figure out how to make that work for any function
    """
    N = np.size(f)
    n = np.arange(0, N)
    # df discrete differential operator
    df = np.complex(0, 1) * py.fftshift(n - N / 2)
    dfdt = py.ifft(df * py.fft(f))
    return py.real(dfdt)
Exemplo n.º 18
0
    def plot_complex_(ax, z):
        verts = map(lambda z: (real(z), imag(z)), z)
        codes = [Path.MOVETO
                 ] + [Path.LINETO] * (len(verts) - 2) + [Path.CLOSEPOLY]

        path = mpath.Path(verts, codes)
        patch = mpatches.PathPatch(path,
                                   facecolor=[1, 0.5, 0.8],
                                   edgecolor='black',
                                   alpha=1)
        ax.add_patch(patch)
Exemplo n.º 19
0
def FourierDerivative(f):
    """
    this derivatie just works for periodic 2*pi multiple series
    have to figure out how to make that work for any function
    """
    N = np.size(f)
    n = np.arange(0,N)
    # df discrete differential operator
    df = np.complex(0,1)*py.fftshift(n-N/2)
    dfdt = py.ifft( df*py.fft(f) )  
    return py.real(dfdt)
Exemplo n.º 20
0
def chebt2(f):
    """chebyshev transformation, coefficients in expansion using
    Chebyshev polynomials T_n(x), see chebfun for details"""
    n = len(f)
    oncircle = concatenate((f[-1::-1], f[1:-1]))
    fftcoef = real(fft(oncircle))/(2*n-2)
    #print n, len(fftcoef)
    #print fftcoef[n-1:]
    #print fftcoef[n-1:0:-1]
    fftcoef[n-1:0:-1] += fftcoef[n-1:] # z+conj(z)
    return fftcoef[n-1::-1]
Exemplo n.º 21
0
    def update_ret_response(self, new_img):
        '''
        :param new_img: new frame should be normalized, for tracker_status estimating the rect_snr
        :return:
        '''
        self.canvas = new_img.copy()
        self.trackNo += 1

        # get subwindow at current estimated target position, to train classifier
        x = self.get_subwindow(new_img, self.pos, self.window_sz,
                               self.cos_window)
        # calculate response of the classifier at all locations
        k = self.dense_gauss_kernel(self.sigma, x, self.z)
        kf = pylab.fft2(k)
        alphaf_kf = pylab.multiply(self.alphaf, kf)
        response = pylab.real(pylab.ifft2(alphaf_kf))  # Eq. 9

        # target location is at the maximum response
        row, col = pylab.unravel_index(response.argmax(), response.shape)
        # roi rect's topleft point add [row, col]
        self.tly, self.tlx = self.pos - pylab.floor(self.window_sz / 2)

        # here the pos is not given to self.pos at once, we need to check the psr first.
        # if it above the threashhold(default is 5), self.pos = pos.
        pos = np.array([self.tly, self.tlx]) + np.array([row, col])

        # Noting, for pos(cy,cx)! for cv2.rect rect(x,y,w,h)!
        rect = pylab.array([
            pos[1] - self.target_sz[1] / 2, pos[0] - self.target_sz[0] / 2,
            self.target_sz[1], self.target_sz[0]
        ])
        rect = rect.astype(np.int)

        self.psr, self.trkStatus = self.tracker_status(col, row, response,
                                                       rect, new_img)
        self.pos = pos
        #only update when tracker_status's psr is high
        if (self.psr > 10):
            #computing new_alphaf and observed x as z
            x = self.get_subwindow(new_img, self.pos, self.window_sz,
                                   self.cos_window)
            # Kernel Regularized Least-Squares, calculate alphas (in Fourier domain)
            k = self.dense_gauss_kernel(self.sigma, x)
            new_alphaf = pylab.divide(
                self.yf, (pylab.fft2(k) + self.lambda_value))  # Eq. 7
            new_z = x

            # subsequent frames, interpolate model
            f = self.interpolation_factor
            self.alphaf = (1 - f) * self.alphaf + f * new_alphaf
            self.z = (1 - f) * self.z + f * new_z
        ok = 1
        return ok, rect, self.psr, response
Exemplo n.º 22
0
 def plot_upd_mfreqz(self, fig, taps, a=1):
     if not signal:
         return
     self.plot_init_mfreqz(fig)
     ax1, ax2 = fig.get_axes()
     w, h = signal.freqz(taps, a)
     if sum(abs(h)) == 0:
         return
     h_dB = 20 * pylab.log10(abs(h))
     ax1.plot(w / max(w), h_dB)
     h_Phase = pylab.unwrap(pylab.arctan2(pylab.imag(h), pylab.real(h)))
     ax2.plot(w / max(w), h_Phase)
Exemplo n.º 23
0
def _ConvFft(signal, FilterKernel): 
    """
    Convolution with fft much faster approach
    works exactly as convolve(x,y)
    """
    ss = numpy.size(signal);
    fs = numpy.size(FilterKernel)
    # padd zeros all until they have the size N+M-1
    signal = numpy.append(signal, numpy.zeros(fs+ss-1-ss));
    FilterKernel = numpy.append(FilterKernel, numpy.zeros(fs+ss-1-fs));
    signal = pylab.real(pylab.ifft(pylab.fft(signal)*pylab.fft(FilterKernel)));
    return signal[:fs+ss-1];
Exemplo n.º 24
0
def _ConvFft(signal, filterkernel):
    """
    Convolution with fft much faster approach
    works exatcly as convolve(x,y)
    """
    ss = numpy.size(signal)
    fs = numpy.size(filterkernel)
    # padd zeros all until they have the size N+M-1
    signal = numpy.append(signal, numpy.zeros(fs + ss - 1 - ss))
    filterkernel = numpy.append(filterkernel, numpy.zeros(fs + ss - 1 - fs))
    signal = pylab.real(pylab.ifft(
        pylab.fft(signal) * pylab.fft(filterkernel)))
    return signal[:fs + ss - 1]
Exemplo n.º 25
0
    def _istftm(self,
                X_hat=None,
                Phi_hat=None,
                pvoc=False,
                usewin=True,
                resamp=None):
        """
        :: 
            Inverse short-time Fourier transform magnitude. Make a signal from a |STFT| transform.
            Uses phases from self.STFT if Phi_hat is None.

            Inputs:
            X_hat - N/2+1 magnitude STFT [None=abs(self.STFT)]
            Phi_hat - N/2+1 phase STFT   [None=exp(1j*angle(self.STFT))]
            pvoc - whether to use phase vocoder [False]      
            usewin - whether to use overlap-add [False]

            Returns:
             x_hat - estimated signal
        """
        if not self._have_stft:
            return None
        X_hat = self.X if X_hat is None else P.np.abs(X_hat)
        if pvoc:
            self._pvoc(X_hat, Phi_hat, pvoc)
        else:
            Phi_hat = P.angle(self.STFT) if Phi_hat is None else Phi_hat
            self.X_hat = X_hat * P.exp(1j * Phi_hat)
        if usewin:
            if self.win is None:
                self.win = P.ones(
                    self.wfft) if self.window == 'rect' else P.np.sqrt(
                        P.hanning(self.wfft))
            if len(self.win) != self.nfft:
                self.win = P.r_[self.win, P.np.zeros(self.nfft - self.wfft)]
            if len(self.win) != self.nfft:
                error.BregmanError(
                    "features_base.Features._istftm(): assertion failed len(self.win)==self.nfft"
                )
        else:
            self.win = P.ones(self.nfft)
        if resamp:
            self.win = sig.resample(self.win,
                                    int(P.np.round(self.nfft * resamp)))
        fp = self._check_feature_params()
        self.x_hat = self._overlap_add(P.real(P.irfft(self.X_hat.T)),
                                       usewin=usewin,
                                       resamp=resamp)
        if self.verbosity:
            print("Extracted iSTFTM->self.x_hat")
        return self.x_hat
Exemplo n.º 26
0
def ichebt1(c):
    #TODO
    """inverse chebyshev transformation, see chebfun"""
    n = len(c)
    print("tam===", n)
    oncircle = concatenate((c[-1::-1], c[1:-1]));
    print("v=", oncircle, n)
    v = real(ifft(oncircle));
    print(v)
    print(v[-2:n:-1])
    print("|", v[1:-1])
    f = (n-1)*concatenate(([2*v(1)], v[-2:n:-1]+v[1:-1], 2*v[-1]));
    print("|", f)
    return f
Exemplo n.º 27
0
    def _icqft(self, V_hat):
        """
        ::

            Inverse constant-Q Fourier transform. Make a signal from a constant-Q transform.
        """
        if not self._have_cqft:
                return False        
        fp = self._check_feature_params()
        X_hat = pylab.array( pylab.dot(self.Q.T, V_hat) ) * pylab.exp( 1j * pylab.angle(self.STFT) )
        self.x_hat = self._overlap_add( pylab.real(fp['nfft'] * pylab.irfft(X_hat.T)) )
        if fp['verbosity']:
            print "Extracted iCQFT->x_hat"
        return True
Exemplo n.º 28
0
def ichebt1(c):
    #TODO
    """inverse chebyshev transformation, see chebfun"""
    n = len(c)
    print("tam===", n)
    oncircle = concatenate((c[-1::-1], c[1:-1]))
    print("v=", oncircle, n)
    v = real(ifft(oncircle))
    print(v)
    print(v[-2:n:-1])
    print("|", v[1:-1])
    f = (n - 1) * concatenate(([2 * v(1)], v[-2:n:-1] + v[1:-1], 2 * v[-1]))
    print("|", f)
    return f
Exemplo n.º 29
0
def plot_image(in_file,*arguments):
    
    try:
        img = spimage.sp_image_read(in_file,0)
    except:
        print "Error: %s is not a readable .h5 file\n" % in_file

    plot_flags = ['abs','mask','phase','real','imag']
    shift_flags = ['shift']
    log_flags = ['log']

    plot_flag = 0
    shift_flag = 0
    log_flag = 0

    for flag in arguments:
        flag = flag.lower()
        if flag in plot_flags:
            plot_flag = flag
        elif flag in shift_flags:
            shift_flag = flag
        elif flag in log_flags:
            log_flag = flag
        else:
            print "unknown flag %s" % flag

    if shift_flag:
        img = spimage.sp_image_shift(img)

    def no_log(x):
        return x

    if log_flag:
        log_function = pylab.log
    else:
        log_function = no_log

    if (plot_flag == "mask"):
        pylab.imshow(img.mask,origin='lower',interpolation="nearest")
    elif(plot_flag == "phase"):
        pylab.imshow(pylab.angle(img.image),cmap='hsv',origin='lower',interpolation="nearest")
    elif(plot_flag == "real"):
        pylab.imshow(log_function(pylab.real(img.image)),origin='lower',interpolation="nearest")
    elif(plot_flag == "imag"):
        pylab.imshow(log_function(pylab.imag(img.image)),origin='lower',interpolation="nearest")
    else:
        pylab.imshow(log_function(abs(img.image)),origin='lower',interpolation="nearest")

    pylab.show()
Exemplo n.º 30
0
def dense_gauss_kernel(sigma, x, y=None):
    """
    Gaussian Kernel with dense sampling.
    Evaluates a gaussian kernel with bandwidth SIGMA for all displacements
    between input images X and Y, which must both be MxN. They must also
    be periodic (ie., pre-processed with a cosine window). The result is
    an MxN map of responses.

    If X and Y are the same, omit the third parameter to re-use some
    values, which is faster.
    """

    xf = pylab.fft2(x)  # x in Fourier domain
    x_flat = x.flatten()
    xx = pylab.dot(x_flat.transpose(), x_flat)  # squared norm of x

    if y is not None:
        # general case, x and y are different
        yf = pylab.fft2(y)
        y_flat = y.flatten()
        yy = pylab.dot(y_flat.transpose(), y_flat)
    else:
        # auto-correlation of x, avoid repeating a few operations
        yf = xf
        yy = xx

    # cross-correlation term in Fourier domain
    xyf = pylab.multiply(xf, pylab.conj(yf))

    # to spatial domain
    xyf_ifft = pylab.ifft2(xyf)
    #xy_complex = circshift(xyf_ifft, floor(x.shape/2))
    row_shift, col_shift = pylab.floor(pylab.array(x.shape) / 2).astype(int)
    xy_complex = pylab.roll(xyf_ifft, row_shift, axis=0)
    xy_complex = pylab.roll(xy_complex, col_shift, axis=1)
    xy = pylab.real(xy_complex)

    # calculate gaussian response for all positions
    scaling = -1 / (sigma**2)
    xx_yy = xx + yy
    xx_yy_2xy = xx_yy - 2 * xy
    k = pylab.exp(scaling * pylab.maximum(0, xx_yy_2xy / x.size))

    #print("dense_gauss_kernel x.shape ==", x.shape)
    #print("dense_gauss_kernel k.shape ==", k.shape)

    return k
def dense_gauss_kernel(sigma, x, y=None):
    """
    Gaussian Kernel with dense sampling.
    Evaluates a gaussian kernel with bandwidth SIGMA for all displacements
    between input images X and Y, which must both be MxN. They must also
    be periodic (ie., pre-processed with a cosine window). The result is
    an MxN map of responses.

    If X and Y are the same, ommit the third parameter to re-use some
    values, which is faster.
    """

    xf = pylab.fft2(x)  # x in Fourier domain
    x_flat = x.flatten()
    xx = pylab.dot(x_flat.transpose(), x_flat)  # squared norm of x

    if y is not None:
        # general case, x and y are different
        yf = pylab.fft2(y)
        y_flat = y.flatten()
        yy = pylab.dot(y_flat.transpose(), y_flat)
    else:
        # auto-correlation of x, avoid repeating a few operations
        yf = xf
        yy = xx

    # cross-correlation term in Fourier domain
    xyf = pylab.multiply(xf, pylab.conj(yf))

    # to spatial domain
    xyf_ifft = pylab.ifft2(xyf)
    #xy_complex = circshift(xyf_ifft, floor(x.shape/2))
    row_shift, col_shift = pylab.floor(pylab.array(x.shape)/2).astype(int)
    xy_complex = pylab.roll(xyf_ifft, row_shift, axis=0)
    xy_complex = pylab.roll(xy_complex, col_shift, axis=1)
    xy = pylab.real(xy_complex)

    # calculate gaussian response for all positions
    scaling = -1 / (sigma**2)
    xx_yy = xx + yy
    xx_yy_2xy = xx_yy - 2 * xy
    k = pylab.exp(scaling * pylab.maximum(0, xx_yy_2xy / x.size))

    #print("dense_gauss_kernel x.shape ==", x.shape)
    #print("dense_gauss_kernel k.shape ==", k.shape)

    return k
Exemplo n.º 32
0
    def _istftm(self, X_hat, Phi_hat=None):
        """
        ::

            Inverse short-time Fourier transform magnitude. Make a signal from a |STFT| transform.
            Uses phases from self.STFT if Phi_hat is None.
        """
        if not self._have_stft:
                return False        
        if Phi_hat is None:
            Phi_hat = pylab.exp( 1j * pylab.angle(self.STFT))
        fp = self._check_feature_params()
        X_hat = X_hat *  Phi_hat
        self.x_hat = self._overlap_add( pylab.real(fp['nfft'] * pylab.irfft(X_hat.T)) )
        if fp['verbosity']:
            print "Extracted iSTFTM->self.x_hat"
        return True
Exemplo n.º 33
0
def dense_gauss_kernel(sigma, x, y=None):
    """
    通过高斯核计算余弦子窗口图像块的响应图
    利用带宽是 sigma 的高斯核估计两个图像块 X (MxN) 和 Y (MxN) 的关系。X, Y 是循环的、经余弦窗处理的。输出结果是
    响应图矩阵 MxN. 如果 X = Y, 则函数调用时取消 y,则加快计算。
    该函数对应原文中的公式 (16),以及算法1中的 function k = dgk(x1, x2, sigma)
    :param sigma: 高斯核带宽
    :param x: 余弦子窗口图像块
    :param y: 空或者模板图像块
    :return: 响应图
    """
    # 计算图像块 x 的傅里叶变换
    xf = pylab.fft2(x)  # x in Fourier domain
    # 把图像块 x 拉平
    x_flat = x.flatten()
    # 计算 x 的2范数平方
    xx = pylab.dot(x_flat.transpose(), x_flat)  # squared norm of x

    if y is not None:
        # 一半情况, x 和 y 是不同的,计算 y 的傅里叶变化和2范数平方
        yf = pylab.fft2(y)
        y_flat = y.flatten()
        yy = pylab.dot(y_flat.transpose(), y_flat)
    else:
        # x 的自相关,避免重复计算
        yf = xf
        yy = xx

    # 傅里叶域的互相关计算,逐元素相乘
    xyf = pylab.multiply(xf, pylab.conj(yf))

    # 转化为频率域
    xyf_ifft = pylab.ifft2(xyf)
    # 对频率域里的矩阵块进行滚动平移,分别沿 row 和 col 轴
    row_shift, col_shift = pylab.floor(pylab.array(x.shape) / 2).astype(int)
    xy_complex = pylab.roll(xyf_ifft, row_shift, axis=0)
    xy_complex = pylab.roll(xy_complex, col_shift, axis=1)
    xy = pylab.real(xy_complex)

    # 计算高斯核响应图
    scaling = -1 / (sigma**2)
    xx_yy = xx + yy
    xx_yy_2xy = xx_yy - 2 * xy

    return pylab.exp(scaling * pylab.maximum(0, xx_yy_2xy / x.size))
Exemplo n.º 34
0
    def _istftm(self, X_hat=None, Phi_hat=None, pvoc=False, usewin=True, resamp=None):
        """
        :: 
            Inverse short-time Fourier transform magnitude. Make a signal from a |STFT| transform.
            Uses phases from self.STFT if Phi_hat is None.

            Inputs:
            X_hat - N/2+1 magnitude STFT [None=abs(self.STFT)]
            Phi_hat - N/2+1 phase STFT   [None=exp(1j*angle(self.STFT))]
            pvoc - whether to use phase vocoder [False]      
            usewin - whether to use overlap-add [False]

            Returns:
             x_hat - estimated signal
        """
        if not self._have_stft:
            return None
        X_hat = self.X if X_hat is None else P.np.abs(X_hat)
        if pvoc:
            self._pvoc(X_hat, Phi_hat, pvoc)
        else:
            Phi_hat = P.angle(self.STFT) if Phi_hat is None else Phi_hat
            self.X_hat = X_hat * P.exp(1j * Phi_hat)
        if usewin:
            if self.win is None:
                self.win = P.ones(self.wfft) if self.window == 'rect' else P.np.sqrt(
                    P.hanning(self.wfft))
            if len(self.win) != self.nfft:
                self.win = P.r_[self.win, P.np.zeros(self.nfft - self.wfft)]
            if len(self.win) != self.nfft:
                error.BregmanError(
                    "features_base.Features._istftm(): assertion failed len(self.win)==self.nfft")
        else:
            self.win = P.ones(self.nfft)
        if resamp:
            self.win = sig.resample(
                self.win, int(P.np.round(self.nfft * resamp)))
        fp = self._check_feature_params()
        self.x_hat = self._overlap_add(
            P.real(P.irfft(self.X_hat.T)), usewin=usewin, resamp=resamp)
        if self.verbosity:
            print("Extracted iSTFTM->self.x_hat")
        return self.x_hat
Exemplo n.º 35
0
def getGrowthNakata(ky_list, Setup, init = -0.07 -.015j):
  
  results = []
  
  eta_e     = Setup['eta_e']
  kx        = Setup['kx']
  v_te      = Setup['v_te']
  rho_te2   = Setup['rho_te2']
  tau       = Setup['tau']
  theta     = Setup['theta']

  for ky in ky_list:
    
    kp  = mp.sqrt(2.) * theta * ky 
    # Dispersion Relation Equation (9)
    if ky > 2.5 : init = 0.01 - 0.01j
    def DispersionRelation(w):
        ko2 = kx**2 + ky**2
        def Lambda(b): return mp.exp(b) * (1. + tau - Gamma0(b))
        #def Lambda(b): return mp.exp(b) * (tau + b/(1.+b))
      
        #zeta = w / (kp * v_te)
        zeta = w / (kp * v_te)
        w_star_e = ky * pylab.sqrt(rho_te2) * v_te
 
        # Take care of extra pie due to normalization
        #return  1. + Lambda(ko2 * rho_te2) + zeta * Z(zeta) - ky/kp *  eta_e * zeta - ky/kp * ( eta_e * zeta**2 +\
        return  1. + Lambda(ko2 * rho_te2) + zeta * Z(zeta) - ky/kp *  eta_e * zeta - ky/kp * ( eta_e * zeta**2 +\
                    (1. - eta_e/2. * (1. + ko2 * rho_te2)))*Z(zeta) * mp.sqrt(mp.pi)
                    #(1. - eta_e/2. * (1. + ko2 * rho_te2)))*Z(zeta) 

    try:
        omega = complex(mp.findroot(DispersionRelation, init, solver='muller', maxsteps=1000))
        #omega = complex(PT.zermuller(DispersionRelation, 0., -0.2 - 0.05j, dx=.001)[0])

    except:
        omega = .0
        print "Not found : ", ky,  "  Theta : ", theta
    results.append(float(pylab.real(omega))  + 1.j * pylab.imag(omega))

  return (pylab.array(ky_list), pylab.array(results))
Exemplo n.º 36
0
def numform(element,tol):
            """numform returns a string representing num -- the string is blank if |num|<tol"""
            st=""
            reelement=real(element)
            imelement=imag(element)
            if abs(reelement)<tol: # don't print the real part
                if abs(imelement)>tol: # print the imag part
                    st+=inumform(imelement)
                    st+="i"
            elif abs(imag(element))<tol: # print real but not imag
                st+=rnumform(reelement)
            else:                       # print both
                st+=rnumform(reelement)
                if imelement>0:
                    st+="+"
                else:
                    st+="-"
                    imelement=-imelement
                st+=inumform(imelement)
                st+="i"
            return st
Exemplo n.º 37
0
    def find(self, image):
        if self.should_resize_image:
            self.image = scipy.misc.imresize(image, 0.5)
            self.image = self.image / 255.0  # hack around scipy
        else:
            self.image = image

        # get subwindow at current estimated target position,
        # to train classifer
        x = get_subwindow(self.image, self.pos, self.sz, self.cos_window)

        # calculate response of the classifier at all locations
        k = dense_gauss_kernel(self.sigma, x, self.z)
        kf = pylab.fft2(k)
        alphaf_kf = pylab.multiply(self.alphaf, kf)
        self.response = pylab.real(pylab.ifft2(alphaf_kf))  # Eq. 9

        # target location is at the maximum response
        r = self.response
        self.row, self.col = pylab.unravel_index(r.argmax(), r.shape)
        self.pos = self.pos - pylab.floor(self.sz / 2) + [self.row, self.col]

        return self.pos
    def find(self, image):
        if len(image.shape) == 3 and image.shape[2] > 1:
            image = rgb2gray(image)
        self.image = image
        if self.should_resize_image:
            self.image = scipy.misc.imresize(self.image, 0.5)
            self.image = self.image / 255.0

        # get subwindow at current estimated target position,
        # to train classifer
        x = get_subwindow(self.image, self.pos, self.sz, self.cos_window)

        # calculate response of the classifier at all locations
        k = dense_gauss_kernel(self.sigma, x, self.z)
        kf = pylab.fft2(k)
        alphaf_kf = pylab.multiply(self.alphaf, kf)
        self.response = pylab.real(pylab.ifft2(alphaf_kf))  # Eq. 9

        # target location is at the maximum response
        r = self.response
        self.row, self.col = pylab.unravel_index(r.argmax(), r.shape)
        self.pos = self.pos - pylab.floor(self.sz/2) + [self.row, self.col]

        return self.pos
Exemplo n.º 39
0
total_bits=50
samples_per_cycle=float(sampling_rate)/float(local_osc)
samples_per_bit=cycles_per_bit*samples_per_cycle
len_iq_samples=int(float(total_bits)*samples_per_bit)
total_samples_to_read=len_iq_samples*2
fileh=open(sys.argv[1],'rb')
samples=bytearray(fileh.read(total_samples_to_read))
print len(samples)
iq_samples=pl.array([complex(samples[i*2]-128, samples[i*2+1]-128) for i in range(0,len_iq_samples) ])
dc_component=pl.sum(iq_samples)/len_iq_samples
for i in range(-5,5):
    lo=pl.exp(-1j*pl.frange(0,len_iq_samples-1)*(local_osc+i*1000)*2*pl.pi/sampling_rate)
#pl.plot(iq_samples)
    down_convert=iq_samples*lo
    decimated_samples=[pl.sum(down_convert[i:i+samples_per_bit-1]) for i in range(0,len_iq_samples-int(samples_per_bit))]
    x_axis_range=pl.frange(len(decimated_samples)-1)/samples_per_bit
    print len(x_axis_range), len(decimated_samples)
    pl.subplot(211)
    pl.plot(x_axis_range,pl.real(decimated_samples),'b',
            x_axis_range,pl.imag(decimated_samples),'r')
    pl.title('IQ Samples')
    pl.subplot(212)
    pl.plot(x_axis_range,pl.arctan(pl.imag(decimated_samples)/pl.real(decimated_samples)))
    pl.title('Phase')
    pl.figure(2)
    f_range=pl.frange(-sampling_rate/2 , sampling_rate/2 ,(sampling_rate)/len_iq_samples)
    print len(f_range), len_iq_samples
    pl.plot(f_range[0:len_iq_samples],abs(pl.fft(iq_samples)))
    pl.plot('FFT of IQ Samples')
    pl.show()
Exemplo n.º 40
0
def getGrowth(ky_list, Setup, disp="Gyro", init = -0.02 -.006j):
  mp.dps=15

  results  = []
  residuum = []
  eta     = Setup['eta']
  kx        = Setup['kx']
  v_te      = Setup['v_te']
  rho_te2   = Setup['rho_te2']
  tau       = Setup['tau']
  theta     = Setup['theta']
  m_ie      = Setup['m_ie']
  lambda_D2 = Setup['lambda_D2']

  for ky in ky_list:
    kp  = theta * ky * v_te 
        
    ko2 = kx**2 + ky**2
    kyp = ky/kp
    b   = ko2 * rho_te2
    G0  = Gamma0(b)
    G0mG1 = Gamma0(b) - Gamma1(b)
   

    if disp == 'Gyro1st':
      def DispersionRelation(w):
        
        Lambda = lambda_D2 * b + mp.exp(b) * (1. + 1. - G0) 
        zeta = w / kp
        Z = Z_PDF(zeta) 
        # Take care of extra pie due to normalization
        return  - (1. - eta/2. * (1. + b))*kyp * Z \
                - eta * kyp * (zeta + zeta**2 * Z) + zeta * Z + 1. +  Lambda 
    if disp == 'Gyro1stKin':
      def DispersionRelation(w):
        
        def Disp(w, kp, b, eta):
            zeta = w / kp
            Z  = Z_PDF(zeta)
            kyp = ky/kp
            return    - (1. - eta/2. * (1. + b))*kyp * Z \
                - eta * kyp * (zeta + zeta**2 * Z) + zeta * Z + 1. 
       
        # proton
        sum1MG0 = lambda_D2 * b + (1. - G0) +  (1. - G0/m_ie) 

        #return -mp.exp(-b) * Disp(w, kp, b, eta) +  mp.exp(-b/m_ie) * Disp(w,kp * mp.sqrt(m_ie), b/m_ie, eta*mp.sqrt(m_ie)) + sum1MG0
        return mp.exp(-b) * Disp(w, kp, b, eta) -  mp.exp(-b/m_ie) * Disp(w,kp * mp.sqrt(m_ie), b/m_ie, eta) + sum1MG0


    elif disp == 'Fluid':
      def DispersionRelation(w):
        K    = 1. + eta
        K_12 = 1. + eta/2.
        Lambda = 0.#lambda_D2 * b + mp.exp(b) * (1. + 1. - G0) 
        return -ko2 + ( (ky + w)/(K_12 * ky - w) + kp**2 /w**2 * (0.5 * (K * ky - w))  /(K_12 * ky - w)) + Lambda
        return -ko2 + ( (ky+w)/(K_12 * ky-w) + kp**2 /w**2 * (0.5 * (K * ky - w))  /(K_12 * ky-w))
    
     


    elif disp == 'Gyro':
      def DispersionRelation(w):
        Lambda = lambda_D2 *b  + (1. - G0) + 1.
        zeta = w / kp
        Z  = Z_PDF(zeta)
        # Take care of extra pie due to normalization
        return  - (1. - eta/2.)*kyp * G0 * Z +   eta * kyp * b * G0mG1 * Z   \
                - eta * kyp * G0 * ( zeta +  zeta**2 * Z) +  zeta * Z * G0  + G0  +  Lambda 
    elif disp == 'GyroKin':
      def DispersionRelation(w):
        
        # Take care of extra pie due to normalization
        # what is Debye lengtth effect here ?
        def Disp(w, kp, b, eta):
            zeta = w / kp
            Z  = Z_PDF(zeta)
            kyp = ky/kp
            G0    = Gamma0(b)
            G0mG1 = Gamma0(b) - Gamma1(b)
            return  - (1. - eta/2.)*kyp * G0 * Z +   eta * kyp * b * G0mG1 * Z   \
                    - eta * kyp * G0 * ( zeta +  zeta**2 * Z) +  zeta * Z * G0  + G0 
        
        sum1MG0 = (1. - G0) + (1. - Gamma0(b/m_ie))

        return Disp(w, kp, b, eta)  - Disp(w,kp * mp.sqrt(m_ie), b/m_ie, eta/mp.sqrt(m_ie)              ) + sum1MG0
        return Disp(w, kp, b, eta) + (1.-G0) + 1. # - Disp(w,kp * mp.sqrt(m_ie), b/m_ie, eta              ) + sum1MG0
        return Disp(w, kp, b, eta) - Disp(w,kp * mp.sqrt(m_ie), b/m_ie, eta/mp.sqrt(m_ie)) + sum1MG0
    else : print  "No such Dispersion Relation : ", disp
 
    omega = init

    def checkAndImprove(omega, solver, tol=1.e-9):
       omega2 = omega
       try  : omega2 = complex(mp.findroot(DispersionRelation, omega , solver=solver, maxsteps=100, tol=tol))
       except : pass
       if (abs(DispersionRelation(omega2)) < abs(DisperisonRelation(omega))) : return omega2
       else : return omega

    #omega = checkAndImprove(omega, "newton", 1.e-6)
    try:
                        if (ky < 0.3): omega = complex(mp.findroot(DispersionRelation, omega, solver='anewton', maxsteps=10, verify=False))
                        omega = complex(mp.findroot(DispersionRelation, omega, solver='halley', maxsteps=300))
                             
                             #omega = complex(mp.findroot(DispersionRelation, omega, solver='newton', maxsteps=100 ))
    except : 
      omega = float('nan')
    print "ky : ", ky , " w : ", omega 
    results.append(float(pylab.real(omega))  + 1.j * pylab.imag(omega))
    res = DispersionRelation(results[-1])
    residuum.append(min(1.,float(abs(res))))
  
  return (pylab.array(ky_list), pylab.array(results), pylab.array(residuum))
Exemplo n.º 41
0
    m = MicGeom()
    m.mpos_tot = array([[0, 0, 0]])

    t = PointSource(signal=n1, mpos=m, loc=(1, 0, 1))

    f = PowerSpectra(time_data=t,
                     window='Hanning',
                     overlap='50%',
                     block_size=4096)
    ###################################################################
    ### Plotting ###
    from pylab import figure, plot, show, xlim, ylim, xscale, xticks, xlabel, ylabel, grid, real
    from acoular import L_p

    band = 3  # octave: 1 ;   1/3-octave: 3
    (f_borders, p, f_center) = barspectrum(real(f.csm[:, 0, 0]), f.fftfreq(),
                                           band)

    label_freqs = [str(int(_)) for _ in f_center]

    figure(figsize=(20, 6))

    plot(f_borders, L_p(p))

    xlim(f_borders[0] * 2**(-1. / 6), f_borders[-1] * 2**(1. / 6))
    ylim(40, 90)

    xscale('symlog')
    xticks(f_center, label_freqs)
    xlabel('f in Hz')
    ylabel('SPL in dB')
Exemplo n.º 42
0
band = 3  # octave: 1 ;   1/3-octave: 3 (for plotting)

# set up microphone at (0,0,0)
m = MicGeom()
m.mpos_tot = array([[0, 0, 0]])

# create noise source
n1 = WNoiseGenerator(sample_freq=sfreq, numsamples=10 * sfreq, seed=1)

t = PointSource(signal=n1, mics=m, loc=(1, 0, 1))

# create power spectrum
f = PowerSpectra(time_data=t, window='Hanning', overlap='50%', block_size=4096)

# get spectrum data
spectrum_data = real(f.csm[:, 0,
                           0])  # get power spectrum from cross-spectral matrix
freqs = f.fftfreq()  # FFT frequencies

# use barspectrum from acoular.tools to create third octave plot data
(f_borders, p, f_center) = barspectrum(spectrum_data, freqs, band, bar=True)
(f_borders_, p_, f_center_) = barspectrum(spectrum_data,
                                          freqs,
                                          band,
                                          bar=False)

# create figure with barspectra
figure(figsize=(20, 6))
title("Powerspectrum")
plot(f_borders, L_p(p), label="bar=True")
plot(f_borders_, L_p(p_), label="bar=False")
xlim(f_borders[0] * 2**(-1. / 6), f_borders[-1] * 2**(1. / 6))
Exemplo n.º 43
0
from PDE_FIND import *
import scipy.io as sio
import pylab
import random

def wgn(x, snr):
    snr = 10**(snr/10.0)
    xpower = np.sum(x**2)/len(x)
    npower = xpower / snr
    return np.random.randn(len(x)) * np.sqrt(npower)


pylab.rcParams['figure.figsize'] = (12, 8)
# Load data
data = sio.loadmat('C:/Users/15307/Desktop/热传导方程解集构建/initialdata.mat')
u1 = np.transpose(pylab.real(data['Ex_plasma']))
u2 = np.transpose(pylab.real(data['Hy_plasma']))
jx = np.transpose(pylab.real(data['Jx_plasma']))
#jy = pylab.real(data['Jy_plasma'])
#jz = pylab.real(data['Jz_plasma'])
x = data['x'][0]
t = data['t'][0]
for i in range(800):
    a1 = u1[i]
    a2 = u2[i]
    a3 = jx[i]
    n1 = wgn(a1,snr)
    n2 = wgn(a2,snr)
    n3 = wgn(a3,snr)
    
    u1[i] = a1 + n1
Exemplo n.º 44
0
#!/usr/bin/python
import spimage
import pylab
# Read input image
img = spimage.sp_image_read('../ring/raw_ring.h5',0)
# Convolute with a 2 pixel
# standard deviation gaussian
img_blur = spimage.sp_gaussian_blur(img,2.0)
rel_diff = abs((pylab.real(img_blur.image)-
                pylab.real(img.image))
               /pylab.real(img_blur.image))
# Plot relative difference
pylab.imshow(rel_diff,vmin = 0,vmax = 0.5)
pylab.colorbar()
pylab.show()

Exemplo n.º 45
0
    def calculateinitsunc(self,H,l,sigma_L = 1e-6,sigma_Theta = 1,n_exact = 1,filename=None):
        # Calculates the uncertianty on n and k according to:
        # W. Withayachumnankul, B. M. Fisher, H. Lin, D. Abbott, "Uncertainty in terahertz time-domain spectroscopy measurement", J. Opt. Soc. Am. B., Vol. 25, No. 6, June 2008, pp. 1059-1072
        #
        # sigma_L = standard uncertainty on sample thickness in meter
        # sigma_Theta = interval of sample misallignment in degree
        # n_exact = exact value of the refractive index of air during the measurements
        
        n, k = self.calculateinits(H,l)
        n = py.asarray(n)
        k = py.asarray(k)      
        
        Asam = []
        Aref = []
        Bsam = []
        Bref = []
        for i in range(len(self.H.getfreqs())):
            Asam.append((py.sum(py.imag(self.H.fdsam.getFAbs().tolist()[i]*py.exp(1j*2*py.pi*self.H.getfreqs().tolist()[i]*self.H.fdsam._tdData.getTimes()))*self.H.fdsam._tdData.getUncEX())**2))
            Aref.append((py.sum(py.imag(self.H.fdref.getFAbs().tolist()[i]*py.exp(1j*2*py.pi*self.H.getfreqs().tolist()[i]*self.H.fdref._tdData.getTimes()))*self.H.fdref._tdData.getUncEX())**2))
            Bsam.append((py.sum(py.real(self.H.fdsam.getFAbs().tolist()[i]*py.exp(1j*2*py.pi*self.H.getfreqs().tolist()[i]*self.H.fdsam._tdData.getTimes()))*self.H.fdsam._tdData.getUncEX())**2))
            Bref.append((py.sum(py.real(self.H.fdref.getFAbs().tolist()[i]*py.exp(1j*2*py.pi*self.H.getfreqs().tolist()[i]*self.H.fdref._tdData.getTimes()))*self.H.fdref._tdData.getUncEX())**2))
        
        # Uncertainty on n
        sn_Esam_2 = ((c/(2*py.pi*self.H.getfreqs()*l))**2 * py.asarray(Asam)/self.H.fdsam.getFAbs()**4)/self.H.fdsam._tdData.numberOfDataSets
        sn_Eref_2 = ((c/(2*py.pi*self.H.getfreqs()*l))**2 * py.asarray(Aref)/self.H.fdref.getFAbs()**4)/self.H.fdref._tdData.numberOfDataSets
        sn_l_2 = ((n-self.n_0)*sigma_L/l)**2
        #sn_l_2_1 = (c*self.H.getFPh()/(2*py.pi*self.H.getfreqs()*l*l))**2 * sigma_L**2
        #sn_H_2 = (c/(2*py.pi*self.H.getfreqs()*l))**2 * self.H.getFPhUnc()**2
        fn_Theta = (n-self.n_0.real)*(1/py.cos(sigma_Theta*py.pi/180)-1)
        fn_H = (c/(2*py.pi*self.H.getfreqs()*l))*py.absolute(-py.angle(4*(n-1j*k)*self.n_0/(n-1j*k+self.n_0)**2))
        fn_FP = (c/(2*py.pi*self.H.getfreqs()*l))*py.absolute(-py.angle(1/(1-((n-1j*k-self.n_0)/(n-1j*k+self.n_0))**2*py.exp(-2*1j*(n-1j*k)*2*py.pi*self.H.getfreqs()*l/c))))
        fn_n0 = abs(self.n_0.real - n_exact)*py.ones(len(self.H.getFPh()))
        u_n = py.sqrt(sn_l_2+sn_Esam_2+sn_Eref_2)+fn_Theta+fn_H+fn_FP+fn_n0

        # Uncertianty on k
        sk_Esam_2 = ((c/(2*py.pi*self.H.getfreqs()*l))**2 *(Bsam/self.H.fdsam.getFAbs()**4 + ((n-self.n_0)/(n+self.n_0))**2 * sn_Esam_2/n**2))/self.H.fdsam._tdData.numberOfDataSets
        sk_Eref_2 = ((c/(2*py.pi*self.H.getfreqs()*l))**2 *(Bref/self.H.fdref.getFAbs()**4 + ((n-self.n_0)/(n+self.n_0))**2 * sn_Eref_2/n**2))/self.H.fdref._tdData.numberOfDataSets
        sk_l_2 = (k*sigma_L/l)**2 + (c*(n-self.n_0)/((n+self.n_0)*n*2*py.pi*self.H.getfreqs()*l))**2*sn_l_2
        #sk_l_2_1 = ((c/(2*py.pi*self.H.getfreqs()*l*l))*py.log(self.H.getFAbs()*(n+self.n_0.real)**2/(4*n*self.n_0.real)))**2 * sigma_L**2
        #sk_H_2 = (-c/(2*py.pi*self.H.getfreqs()*l*self.H.getFAbs()))**2 * self.H.getFAbsUnc()**2
        fk_Theta = k*(1/py.cos(sigma_Theta*py.pi/180)-1)+c*(n-self.n_0.real)*fn_Theta/(n*2*py.pi*self.H.getfreqs()*l*(n+self.n_0.real))
        fk_H = (c/(2*py.pi*self.H.getfreqs()*l))*(py.log(py.absolute(n/(n-1j*k)*((n-1j*k+self.n_0.real)/(n+self.n_0.real))**2))+py.absolute(fn_H)*(n-self.n_0.real)/(n*(n+self.n_0.real)))
        fk_FP = (c/(2*py.pi*self.H.getfreqs()*l))*(py.absolute(-py.log(py.absolute(1/(1-((n-1j*k-self.n_0)/(n-1j*k+self.n_0))**2*py.exp(-2*1j*(n-1j*k)*2*py.pi*self.H.getfreqs()*l/c)))))+py.absolute(fn_FP)*(n-self.n_0.real)/(n*(n+self.n_0.real)))
        fk_n0 = (c/(2*py.pi*self.H.getfreqs()*l))*(n-self.n_0.real)*(self.n_0.real - n_exact)/(n*self.n_0.real)
        u_k = py.sqrt(sk_l_2+sk_Esam_2+sk_Eref_2)+fk_Theta+fk_H+fk_FP+fk_n0
        
        # Convert n in epsilon Epsilon = Epsilon_1 + j Epsilon_2 = (n+jk)**2
        # Epsilon_1 = n**2 - k**2
        # Epsilon_2 = -2nk
        Epsilon_1 = n**2 - k **2
        Epsilon_2 = -2 * n * k
        u_Epsilon_1 = py.sqrt((2*n*u_n)**2 + (-2*k*u_k)**2)
        u_Epsilon_2 = py.sqrt((-2*k*u_n)**2 + (-2*n*u_k)**2)
        
        # Calculate absorption coefficient
        # alpha = 4 * pi * k * f / c1
        alpha = 4 * py.pi * k * self.H.getfreqs() / (100 * c)      # in cm^-1
        u_alpha = 4 * py.pi * u_k * self.H.getfreqs() / (100 * c)  # in cm^-1
        
        # Calculate maximum measurable absorption coefficient according to
        # P. U. Jepsen and B. M. Fisher: "Dynamic Range in terahertz time-domain transmission and reflection spectroscopy", Optics Letters, Vol. 30, n. 1, pp. 29-31, Jan 2005
        
        alpha_max = 2 * py.log((self.H.fdsam.getDR() * 4 * n)/(n + 1)**2) / (100 * l) # in cm^-1
        
        # Save results into a table accessible from outside
        self.n_with_unc=py.real(py.column_stack((
        self.H.getfreqs(),                            # frequencies
        n, k,                                         # real and imaginary part of n
        u_n, u_k,                                     # k=1 combined uncertainty on n and k
        py.sqrt(sn_l_2), py.sqrt(sn_Esam_2), py.sqrt(sn_Eref_2), fn_Theta, fn_H, fn_FP, fn_n0, # Uncertainty components of n due to thickness, H, sample misallignment, k<<<, Neglect FP, ref ind of air
        py.sqrt(sk_l_2), py.sqrt(sk_Esam_2), py.sqrt(sk_Eref_2), fk_Theta, fk_H, fk_FP, fk_n0, # Uncertainty components of k due to thickness, H, sample misallignment, k<<<, Neglect FP, ref ind of air
        Epsilon_1, Epsilon_2,                         # Real and imaginary part of Epsilon
        u_Epsilon_1, u_Epsilon_2,                     # k = 1 uncertainty on the real and imaginary part of Epsilon
        alpha, u_alpha,                               # Absorption coefficient and its k = 1 uncertainty
        alpha_max,                                     # Maximum measurable absorption coefficient
        )))
        return
def track(input_video_path):
    """
    notation: variables ending with f are in the frequency domain.
    """

    # parameters according to the paper --
    padding = 1.0  # extra area surrounding the target
    # spatial bandwidth (proportional to target)
    output_sigma_factor = 1 / float(16)
    sigma = 0.2  # gaussian kernel bandwidth
    lambda_value = 1e-2  # regularization
    # linear interpolation factor for adaptation
    interpolation_factor = 0.075

    info = load_video_info(input_video_path)
    img_files, pos, target_sz, \
        should_resize_image, ground_truth, video_path = info

    # window size, taking padding into account
    sz = pylab.floor(target_sz * (1 + padding))

    # desired output (gaussian shaped), bandwidth proportional to target size
    output_sigma = pylab.sqrt(pylab.prod(target_sz)) * output_sigma_factor

    grid_y = pylab.arange(sz[0]) - pylab.floor(sz[0] / 2)
    grid_x = pylab.arange(sz[1]) - pylab.floor(sz[1] / 2)
    # [rs, cs] = ndgrid(grid_x, grid_y)
    rs, cs = pylab.meshgrid(grid_x, grid_y)
    y = pylab.exp(-0.5 / output_sigma**2 * (rs**2 + cs**2))
    yf = pylab.fft2(y)
    # print("yf.shape ==", yf.shape)
    # print("y.shape ==", y.shape)

    # store pre-computed cosine window
    cos_window = pylab.outer(pylab.hanning(sz[0]), pylab.hanning(sz[1]))

    total_time = 0  # to calculate FPS
    positions = pylab.zeros((len(img_files), 2))  # to calculate precision

    global z, response
    z = None
    alphaf = None
    response = None

    for frame, image_filename in enumerate(img_files):

        if True and ((frame % 10) == 0):
            print("Processing frame", frame)

        # load image
        image_path = os.path.join(video_path, image_filename)
        im = pylab.imread(image_path)
        if len(im.shape) == 3 and im.shape[2] > 1:
            im = rgb2gray(im)

        # print("Image max/min value==", im.max(), "/", im.min())

        if should_resize_image:
            im = scipy.misc.imresize(im, 0.5)

        start_time = time.time()

        # extract and pre-process subwindow
        x = get_subwindow(im, pos, sz, cos_window)

        is_first_frame = (frame == 0)

        if not is_first_frame:
            # calculate response of the classifier at all locations
            k = dense_gauss_kernel(sigma, x, z)
            kf = pylab.fft2(k)
            alphaf_kf = pylab.multiply(alphaf, kf)
            response = pylab.real(pylab.ifft2(alphaf_kf))  # Eq. 9

            # target location is at the maximum response
            r = response
            row, col = pylab.unravel_index(r.argmax(), r.shape)
            pos = pos - pylab.floor(sz / 2) + [row, col]

            if debug:
                print("Frame ==", frame)
                print("Max response", r.max(), "at", [row, col])
                pylab.figure()
                pylab.imshow(cos_window)
                pylab.title("cos_window")

                pylab.figure()
                pylab.imshow(x)
                pylab.title("x")

                pylab.figure()
                pylab.imshow(response)
                pylab.title("response")
                pylab.show(block=True)

        # end "if not first frame"

        # get subwindow at current estimated target position,
        # to train classifer
        x = get_subwindow(im, pos, sz, cos_window)

        # Kernel Regularized Least-Squares,
        # calculate alphas (in Fourier domain)
        k = dense_gauss_kernel(sigma, x)
        new_alphaf = pylab.divide(yf, (pylab.fft2(k) + lambda_value))  # Eq. 7
        new_z = x

        if is_first_frame:
            # first frame, train with a single image
            alphaf = new_alphaf
            z = x
        else:
            # subsequent frames, interpolate model
            f = interpolation_factor
            alphaf = (1 - f) * alphaf + f * new_alphaf
            z = (1 - f) * z + f * new_z
        # end "first frame or not"

        # save position and calculate FPS
        positions[frame, :] = pos
        total_time += time.time() - start_time

        # visualization
        plot_tracking(frame, pos, target_sz, im, ground_truth)
    # end of "for each image in video"

    if should_resize_image:
        positions = positions * 2

    print("Frames-per-second:", len(img_files) / total_time)

    title = os.path.basename(os.path.normpath(input_video_path))

    if len(ground_truth) > 0:
        # show the precisions plot
        show_precision(positions, ground_truth, video_path, title)

    return
Exemplo n.º 47
0
	def autoCorr(self, timeSeries):
		self.N = len(timeSeries)
		self.nfft = int(2 ** math.ceil(math.log(abs(self.N),2)))
		self.ACF = p.ifft(p.fft(timeSeries,self.nfft) * p.conjugate(p.fft(timeSeries,self.nfft)))
		self.ACF = list(p.real(self.ACF[:int(math.ceil((self.nfft+1)/2.0))]))
		self.plotAutoCorr()
Exemplo n.º 48
0
            plot_flag = flag
        elif flag in log_flags:
            log_flag = flag
        else:
            print "unknown flag %s" % flag

    if log_flag == 'log':
        log_function = pylab.log
    else:
        log_function = no_log

    if plot_flag == 'phases':
        hist = pylab.histogram(pylab.angle(values),bins=50)
        ax.plot((hist[1][:-1]+hist[1][1:])/2.0,log_function(hist[0]))
    elif plot_flag == 'histogram':
        hist = pylab.histogram2d(pylab.real(values),pylab.imag(values),bins=500)
        ax.imshow(log_function(hist[0]),extent=(hist[2][0],hist[2][-1],-hist[1][-1],-hist[1][0]),interpolation='nearest')
    else:
        ax.plot(pylab.real(values),pylab.imag(values),'.')
    return fig
    


if __name__ == "__main__":
    try:
        plot_phases(sys.argv[1],*sys.argv[2:])
        pylab.show()
    except:
        print """
Usage: plot_phases datafile [flags]
def track(input_video_path):
    """
    notation: variables ending with f are in the frequency domain.
    """

    # parameters according to the paper --
    padding = 1.0  # extra area surrounding the target
    #spatial bandwidth (proportional to target)
    output_sigma_factor = 1 / float(16)
    sigma = 0.2  # gaussian kernel bandwidth
    lambda_value = 1e-2  # regularization
    # linear interpolation factor for adaptation
    interpolation_factor = 0.075

    info = load_video_info(input_video_path)
    img_files, pos, target_sz, \
        should_resize_image, ground_truth, video_path = info

    # window size, taking padding into account
    sz = pylab.floor(target_sz * (1 + padding))

    # desired output (gaussian shaped), bandwidth proportional to target size
    output_sigma = pylab.sqrt(pylab.prod(target_sz)) * output_sigma_factor

    grid_y = pylab.arange(sz[0]) - pylab.floor(sz[0]/2)
    grid_x = pylab.arange(sz[1]) - pylab.floor(sz[1]/2)
    #[rs, cs] = ndgrid(grid_x, grid_y)
    rs, cs = pylab.meshgrid(grid_x, grid_y)
    y = pylab.exp(-0.5 / output_sigma**2 * (rs**2 + cs**2))
    yf = pylab.fft2(y)
    #print("yf.shape ==", yf.shape)
    #print("y.shape ==", y.shape)

    # store pre-computed cosine window
    cos_window = pylab.outer(pylab.hanning(sz[0]),
                             pylab.hanning(sz[1]))

    total_time = 0  # to calculate FPS
    positions = pylab.zeros((len(img_files), 2))  # to calculate precision

    global z, response
    z = None
    alphaf = None
    response = None

    for frame, image_filename in enumerate(img_files):

        if True and ((frame % 10) == 0):
            print("Processing frame", frame)

        # load image
        image_path = os.path.join(video_path, image_filename)

        im = pylab.imread(image_path)
        if len(im.shape) == 3 and im.shape[2] > 1:
            im = rgb2gray(im)

        #print("Image max/min value==", im.max(), "/", im.min())

        if should_resize_image:
            im = scipy.misc.imresize(im, 0.5)

        start_time = time.time()

        # extract and pre-process subwindow
        x = get_subwindow(im, pos, sz, cos_window)

        if debug:
            pylab.figure()
            pylab.imshow(x)
            pylab.title("sub window")

        is_first_frame = (frame == 0)

        if not is_first_frame:
            # calculate response of the classifier at all locations
            k = dense_gauss_kernel(sigma, x, z)
            kf = pylab.fft2(k)
            alphaf_kf = pylab.multiply(alphaf, kf)
            response = pylab.real(pylab.ifft2(alphaf_kf))  # Eq. 9

            # target location is at the maximum response
            r = response
            row, col = pylab.unravel_index(r.argmax(), r.shape)
            pos = pos - pylab.floor(sz/2) + [row, col]

            if debug:
                print("Frame ==", frame)
                print("Max response", r.max(), "at", [row, col])
                pylab.figure()
                pylab.imshow(cos_window)
                pylab.title("cos_window")

                pylab.figure()
                pylab.imshow(x)
                pylab.title("x")

                pylab.figure()
                pylab.imshow(response)
                pylab.title("response")
                pylab.show(block=True)

        # end "if not first frame"

        # get subwindow at current estimated target position,
        # to train classifer
        x = get_subwindow(im, pos, sz, cos_window)

        # Kernel Regularized Least-Squares,
        # calculate alphas (in Fourier domain)
        k = dense_gauss_kernel(sigma, x)
        new_alphaf = pylab.divide(yf, (pylab.fft2(k) + lambda_value))  # Eq. 7
        new_z = x

        if is_first_frame:
            #first frame, train with a single image
            alphaf = new_alphaf
            z = x
        else:
            # subsequent frames, interpolate model
            f = interpolation_factor
            alphaf = (1 - f) * alphaf + f * new_alphaf
            z = (1 - f) * z + f * new_z
        # end "first frame or not"

        # save position and calculate FPS
        positions[frame, :] = pos
        total_time += time.time() - start_time

        # visualization
        plot_tracking(frame, pos, target_sz, im, ground_truth)
    # end of "for each image in video"

    if should_resize_image:
        positions = positions * 2

    print("Frames-per-second:",  len(img_files) / total_time)

    title = os.path.basename(os.path.normpath(input_video_path))

    if len(ground_truth) > 0:
        # show the precisions plot
        show_precision(positions, ground_truth, video_path, title)

    return
Exemplo n.º 50
0
#!/usr/bin/python
import spimage
import pylab
# Read input image
img = spimage.sp_image_read('../ring/raw_ring.h5', 0)
# Convolute with a 2 pixel
# standard deviation gaussian
img_blur = spimage.sp_gaussian_blur(img, 2.0)
rel_diff = abs((pylab.real(img_blur.image) - pylab.real(img.image)) /
               pylab.real(img_blur.image))
# Plot relative difference
pylab.imshow(rel_diff, vmin=0, vmax=0.5)
pylab.colorbar()
pylab.show()
Exemplo n.º 51
0
def track(input_video_path, show_tracking):
    """
    注意:以 f 结尾的变量表示频率域
    """

    # 目标周围的额外区域
    padding = 1.0
    # 空间带宽,与目标成比例
    output_sigma_factor = 1 / float(16)
    # 高斯核带宽
    sigma = 0.2
    # 正则化系数
    lambda_value = 1e-2
    # 线性插值因子
    interpolation_factor = 0.075
    # 加载视频信息,包括待测试的每帧图片列表,首帧目标矩形框中心点坐标[y,x],矩形框高、宽一半的大小,是否进行图片缩放一半
    # 每帧图片的 ground truth 信息,视频路径
    info = load_video_info.load_video_info(input_video_path)
    img_files, pos, target_sz, should_resize_image, ground_truth, video_path = info

    # 把填充考虑进去,定义为窗口大小。
    sz = pylab.floor(target_sz * (1 + padding))

    # 计算想要的高斯形状的输出,其中带宽正比于目标矩形框大小
    output_sigma = pylab.sqrt(pylab.prod(target_sz)) * output_sigma_factor
    # 平移目标矩形框的高度,以中心点为圆点,得到高度坐标列表
    # 平移目标矩形框的宽度,以中心点为圆点,得到宽度坐标列表
    grid_y = pylab.arange(sz[0]) - pylab.floor(sz[0] / 2)
    grid_x = pylab.arange(sz[1]) - pylab.floor(sz[1] / 2)
    # 把坐标列表边长坐标矩阵,即对二维平面范围内的区域进行网格划分
    rs, cs = pylab.meshgrid(grid_x, grid_y)
    # 论文中公式 (19),计算得到 [0, 1] 值,越靠近中心点值越大,反之越小
    y = pylab.exp((-0.5 / output_sigma ** 2) * (rs ** 2 + cs ** 2))
    # 计算二维离散傅里叶变换
    yf = pylab.fft2(y)

    # 首先计算矩形框高(某一个整数值)的 Hanning 窗(加权的余弦窗),其次计算矩形框宽的 Hanning 窗
    # 最后计算两个向量的外积得到矩形框的余弦窗
    cos_window = pylab.outer(pylab.hanning(sz[0]), pylab.hanning(sz[1]))
    # 计算 FPS
    total_time = 0  # to calculate FPS
    # 计算精度值
    positions = pylab.zeros((len(img_files), 2))  # to calculate precision

    # global z, response
    plot_tracking.z = None
    alphaf = None
    plot_tracking.response = None
    # 依次访问图像从图像名列表中
    for frame, image_filename in enumerate(img_files):
        if (frame % 10) == 0:
            print("Processing frame", frame)
        # 读取图像
        image_path = os.path.join(video_path, image_filename)
        im = pylab.imread(image_path)
        # 如果图像是彩色图像,则转化为灰度图像
        if len(im.shape) == 3 and im.shape[2] > 1:
            im = rgb2gray.rgb2gray(im)
        # 如果需要进行图像缩放,则缩放为原来一半
        if should_resize_image:
            im = np.array(Image.fromarray(im).resize((int(im.shape[0] / 2), int(im.shape[1] / 2))))

        # 开始计时
        start_time = time.time()

        # 提取并预处理子窗口,采用余弦子窗口
        x = get_subwindow.get_subwindow(im, pos, sz, cos_window)

        is_first_frame = (frame == 0)
        # 不过不是第一帧,则计算分类器的响应
        if not is_first_frame:
            # 计算分类器在所有位置上的相应
            k = dense_gauss_kernel.dense_gauss_kernel(sigma, x, plot_tracking.z)
            kf = pylab.fft2(k)
            alphaf_kf = pylab.multiply(alphaf, kf)
            plot_tracking.response = pylab.real(pylab.ifft2(alphaf_kf))  # Eq. 9

            # 最大响应就是目标位置
            r = plot_tracking.response
            row, col = pylab.unravel_index(r.argmax(), r.shape)
            pos = pos - pylab.floor(sz / 2) + [row, col]

            if debug:
                print("Frame ==", frame)
                print("Max response", r.max(), "at", [row, col])
                pylab.figure()
                pylab.imshow(cos_window)
                pylab.title("cos_window")

                pylab.figure()
                pylab.imshow(x)
                pylab.title("x")

                pylab.figure()
                pylab.imshow(plot_tracking.response)
                pylab.title("response")
                pylab.show(block=True)

        # end "if not first frame"

        # 获取目标位置的余弦窗口,用于训练分类器
        x = get_subwindow.get_subwindow(im, pos, sz, cos_window)

        # kernel 最小方差正则化,在傅里叶域计算参数 ALPHA
        k = dense_gauss_kernel.dense_gauss_kernel(sigma, x)
        new_alphaf = pylab.divide(yf, (pylab.fft2(k) + lambda_value))  # Eq. 7
        new_z = x

        if is_first_frame:
            # 对于第一帧,训练单张图片
            alphaf = new_alphaf
            plot_tracking.z = x
        else:
            # 对于后续帧,进行模型参数插值
            f = interpolation_factor
            alphaf = (1 - f) * alphaf + f * new_alphaf
            plot_tracking.z = (1 - f) * plot_tracking.z + f * new_z

        # 保持当前位置,并计算 FPS
        positions[frame, :] = pos
        total_time += time.time() - start_time

        # 可视化显示跟踪的结果
        if show_tracking == "yes":
            plot_tracking.plot_tracking(frame, pos, target_sz, im, ground_truth)

    if should_resize_image:
        positions = positions * 2

    print("Frames-per-second:", len(img_files) / total_time)

    title = os.path.basename(os.path.normpath(input_video_path))

    if len(ground_truth) > 0:
        # 画出精确率图像
        show_precision.show_precision(positions, ground_truth, title)
Exemplo n.º 52
0
def taylor_coeff(fun, N):
    """From L. Trefethen, Ten digits algorithms """
    zz = exp(2j*pi*(array(list(range(N))))/N)
    c = fft(fun(zz))/N
    return real(c)
Exemplo n.º 53
0
def pwcausalr(
    x,
    Nr,
    Nl,
    porder,
    fs,
    freq=0
):  # Note: freq determines whether the frequency points are calculated or chosen
    from pylab import size, shape, real, log, conj, zeros, arange, array
    from numpy import linalg
    det = linalg.det
    import numpy as np  # Just for "sum"; can't remember what's wrong with pylab's sum
    [L, N] = shape(x)
    #L is the number of channels, N is the total points in every channel

    if freq == 0: F = timefreq(x[0, :], fs)  # Define the frequency points
    else: F = array(range(0, freq + 1))  # Or just pick them
    npts = size(F, 0)
    # Initialize arrays
    maxindex = np.sum(arange(1, L))
    pp = zeros((L, npts))
    # Had these all defined on one line, and stupidly they STAY linked!!
    cohe = zeros((maxindex, npts))
    Fy2x = zeros((maxindex, npts))
    Fx2y = zeros((maxindex, npts))
    Fxy = zeros((maxindex, npts))
    index = 0

    for i in range(1, L):
        for j in range(i + 1, L + 1):
            y = zeros((2, N))  # Initialize y
            index = index + 1
            y[0, :] = x[i - 1, :]
            y[1, :] = x[j - 1, :]
            A2, Z2, tmp = armorf(y, Nr, Nl, porder)
            #fitting a model on every possible pair
            eyx = Z2[1, 1] - Z2[0, 1]**2 / Z2[0, 0]
            #corrected covariance
            exy = Z2[0, 0] - Z2[1, 0]**2 / Z2[1, 1]
            f_ind = 0
            for f in F:
                f_ind = f_ind + 1
                S2, H2 = spectrum_AR(A2, Z2, porder, f, fs)
                pp[i - 1, f_ind - 1] = abs(S2[0, 0] * 2)
                # revised
                if (i == L - 1) & (j == L):
                    pp[j - 1, f_ind - 1] = abs(S2[1, 1] * 2)
                    # revised
                cohe[index - 1,
                     f_ind - 1] = real(abs(S2[0, 1])**2 / S2[0, 0] / S2[1, 1])
                Fy2x[index - 1, f_ind - 1] = log(
                    abs(S2[0, 0]) /
                    abs(S2[0, 0] - (H2[0, 1] * eyx * conj(H2[0, 1])) / fs))
                #Geweke's original measure
                Fx2y[index - 1, f_ind - 1] = log(
                    abs(S2[1, 1]) /
                    abs(S2[1, 1] - (H2[1, 0] * exy * conj(H2[1, 0])) / fs))
                Fxy[index - 1, f_ind - 1] = log(
                    abs(S2[0, 0] - (H2[0, 1] * eyx * conj(H2[0, 1])) / fs) *
                    abs(S2[1, 1] -
                        (H2[1, 0] * exy * conj(H2[1, 0])) / fs) / abs(det(S2)))

    return F, pp, cohe, Fx2y, Fy2x, Fxy
n_reS11 = keys.index('RE[S11]')
n_imS11 = keys.index('IM[S11]')
n_reS21 = keys.index('RE[S21]')
n_imS21 = keys.index('IM[S21]')

freq = data[:,n_freq]
S11 = data[:,n_reS11]+1j*data[:,n_imS11]
S21 = data[:,n_reS21]+1j*data[:,n_imS21]


for label, y, x in (('S11', S11, freq), ('S21',S21, freq)):
    cur = curve.Curve()
    cur.set_data(pandas.Series(y, index=x))
    #res, fit_curve = cur.fit('lorentz_complex_sam')
    res, fit_curve = cur.fit('lorentz_complex_thibault')
    plot(real(fit_curve.data),imag(fit_curve.data), label='fit Q='+str(fit_curve.params['Q_c'])+';f='+str(fit_curve.params['omega_0']))
    #plot(real(fit_curve.data),imag(fit_curve.data), label='fit Q='+str(fit_curve.params['Q'])+';f='+str(fit_curve.params['x0']))
    #plot(real(cur.data),imag(cur.data), 'o', label=label)
legend()
show()
title(filename)

savefig(filename + '.pdf')
savefig(filename + '.png')






Exemplo n.º 55
0
                 mpos = m,  
                 loc = (1, 0, 1))
 
 
 f = PowerSpectra(time_data = t, 
                  window = 'Hanning', 
                  overlap = '50%', 
                  block_size = 4096)
 ###################################################################
 ### Plotting ###
 from pylab import figure,plot,show,xlim,ylim,xscale,xticks,xlabel,ylabel,grid,real
 from acoular import L_p              
 
                  
 band = 3 # octave: 1 ;   1/3-octave: 3
 (f_borders, p, f_center) = barspectrum(real(f.csm[:,0,0]), f.fftfreq(), band)
 
 label_freqs = [str(int(_)) for _ in f_center]
 
 
 figure(figsize=(20, 6))
 
 plot(f_borders,L_p(p))
 
 xlim(f_borders[0]*2**(-1./6),f_borders[-1]*2**(1./6))
 ylim(40,90)
 
 xscale('symlog')
 xticks(f_center,label_freqs)
 xlabel('f in Hz')
 ylabel('SPL in dB')
Exemplo n.º 56
0
    def update(self, new_img):
        self.canvas   = new_img.copy()
        self.trackNo +=1

        res_max = 0.
        for scale_rate in self.scale_ratios:
            template_size = scale_rate * self.window_sz_new
            # get subwindow at current estimated target position, to train classifer
            x = self.get_subwindow(new_img, self.pos_list[-1], template_size)
            # calculate response of the classifier at all locations
            k = self.dense_gauss_kernel(self.sigma, x, self.z)
            kf = pylab.fft2(k)
            alphaf_kf = pylab.multiply(self.alphaf, kf)
            response = pylab.real(pylab.ifft2(alphaf_kf))  # Eq. 9

            # target location is at the maximum response
            r = response
            row, col = pylab.unravel_index(r.argmax(), r.shape)
            if res_max< np.max(r):
               res_row = int(row*scale_rate)
               res_col = int(col*scale_rate)
               self.window_sz_new = template_size
               self.target_sz = self.target_sz*scale_rate
               res_ave, res_max, self.psr = self.response_win_ave_max(response, col, row, winsize=12)
               self.scale_rate = scale_rate


        #roi rect's topleft point add [row, col]
        pos = self.pos_list[-1] - pylab.floor(self.window_sz_new / 2) + [res_row, res_col]

        rect = pylab.array([pos[1] - self.target_sz[1] / 2, pos[0] - self.target_sz[0] / 2, self.target_sz[1], self.target_sz[0]])
        rect = rect.astype(np.int)
        #print (self.target_sz, self.psr, self.scale_rate)
        if debug:
            if self.trackNo == 1:
                #pylab.ion()  # interactive mode on
                self.fig, self.axes = pylab.subplots(ncols=3)
                self.fig.show()
                # We need to draw the canvas before we start animating...
                self.fig.canvas.draw()

                k_img = self.axes[0].imshow(k,animated=True)
                x_img = self.axes[1].imshow(x,animated=True)
                r_img = self.axes[2].imshow(response,animated=True)

                self.subimgs = [k_img, x_img, r_img]
                # Let's capture the background of the figure
                self.backgrounds = [self.fig.canvas.copy_from_bbox(ax.bbox) for ax in self.axes]

                # tracking_rectangle = pylab.Rectangle((0, 0), 0, 0)
                # tracking_rectangle.set_color((1, 0, 0, 0.5))
                # tracking_figure_axes.add_patch(tracking_rectangle)
                #
                # gt_point = pylab.Circle((0, 0), radius=5)
                # gt_point.set_color((0, 0, 1, 0.5))
                # tracking_figure_axes.add_patch(gt_point)
                # tracking_figure_title = tracking_figure.suptitle("")
                pylab.show(block=False)
                #self.fig.show()
            else:
                self.subimgs[0].set_data(k)
                self.subimgs[1].set_data(x)
                self.subimgs[2].set_data(response)
                items = enumerate(zip(self.subimgs, self.axes, self.backgrounds), start=1)
                for j, (subimg, ax, background) in items:
                    self.fig.canvas.restore_region(background)
                    ax.draw_artist(subimg)
                    self.fig.canvas.blit(ax.bbox)
                pylab.show(block=False)

        if self.psr > 10:
            #computing new_alphaf and observed x as z
            x = self.get_subwindow(new_img, pos, self.window_sz_new)

            # Kernel Regularized Least-Squares, calculate alphas (in Fourier domain)
            k = self.dense_gauss_kernel(self.sigma, x)
            new_alphaf = pylab.divide(self.yf, (pylab.fft2(k) + self.lambda_value))  # Eq. 7
            new_z = x

            # subsequent frames, interpolate model
            f = self.interpolation_factor
            self.alphaf = (1 - f) * self.alphaf + f * new_alphaf
            self.z = (1 - f) * self.z + f * new_z


        self.roi_list.append(self.get_imageROI(new_img, rect))
        self.pos_list.append(pos)
        self.rect_list.append(rect)
        ok = 1
        return ok, rect, self.psr