def dense_gauss_kernel(sigma, x, y=None):
    xf = pylab.fft2(x)  # x in Fourier domain
    x_flat = x.flatten()
    xx = pylab.dot(x_flat.transpose(), x_flat)  # squared norm of x

    if y is not None:
        yf = pylab.fft2(y)
        y_flat = y.flatten()
        yy = pylab.dot(y_flat.transpose(), y_flat)
    else:
        yf = xf
        yy = xx

    xyf = pylab.multiply(xf, pylab.conj(yf))

    xyf_ifft = pylab.ifft2(xyf)
    row_shift, col_shift = pylab.floor(pylab.array(x.shape) / 2).astype(int)
    xy_complex = pylab.roll(xyf_ifft, row_shift, axis=0)
    xy_complex = pylab.roll(xy_complex, col_shift, axis=1)
    xy = pylab.real(xy_complex)

    scaling = -1 / (sigma**2)
    xx_yy = xx + yy
    xx_yy_2xy = xx_yy - 2 * xy
    k = pylab.exp(scaling * pylab.maximum(0, xx_yy_2xy / x.size))

    return k
Ejemplo n.º 2
0
def determineCuts(spec2d, dCutFactor = 4.0, dAddPix = 10, bPlot = True) :
    
    vproj = spec2d.sum(1)
    dvproj = vproj-pylab.roll(vproj,1)#derivitive
    
    index  = pylab.arange(0,len(vproj),1)
    index1 = index[dvproj > dvproj.max()/dCutFactor]
    start  = index1[0]
    end    = index1[-1]
    
    startWide = start-dAddPix
    endWide   = end+dAddPix 
    
    if bPlot :
        pylab.figure(12)
        pylab.clf()

        pylab.subplot(2,2,1)
        pylab.plot(vproj)

        pylab.subplot(2,2,2)
        pylab.plot(pylab.absolute(dvproj))
        pylab.axhline(dvproj.max()/dCutFactor)

        pylab.subplot(2,2,3)
        pylab.plot(vproj)
        pylab.axvline(start)
        pylab.axvline(end)

        pylab.axvline(startWide)
        pylab.axvline(endWide)

    return [startWide, endWide]
Ejemplo n.º 3
0
def fft_based(input_signal, filter_coefficients, boundary=0):
    """applied fft if the signal is too short to be splitted in windows
    Params :
        input_signal : the audio signal
        filter_coefficients : coefficients of the chirplet bank
        boundary : manage the bounds of the signal
    Returns :
        audio signal with application of fast Fourier transform
    """
    num_coeffs = filter_coefficients.size
    half_size = num_coeffs//2

    if boundary == 0:#ZERO PADDING
        input_signal = np.lib.pad(input_signal, (half_size, half_size), 'constant', constant_values=0)
        filter_coefficients = np.lib.pad(filter_coefficients, (0, input_signal.size-num_coeffs), 'constant', constant_values=0)
        newx = ifft(fft(input_signal)*fft(filter_coefficients))
        return newx[num_coeffs-1:-1]

    elif boundary == 1:#symmetric
        input_signal = concatenate([flipud(input_signal[:half_size]), input_signal, flipud(input_signal[half_size:])])
        filter_coefficients = np.lib.pad(filter_coefficients, (0, input_signal.size-num_coeffs), 'constant', constant_values=0)
        newx = ifft(fft(input_signal)*fft(filter_coefficients))
        return newx[num_coeffs-1:-1]

    else:#periodic
        return roll(ifft(fft(input_signal)*fft(filter_coefficients, input_signal.size)), -half_size).real
Ejemplo n.º 4
0
    def SW_run(self):
        """Runs in own thread - reads data via Swabian box"""
        self.measure_data = pl.zeros((50, ))
        try:
            self.tag = createTimeTagger()
            self.tag.setTriggerLevel(0, 0.15)
            self.tag.setTriggerLevel(1, 0.15)
            self.ctr = Counter(self.tag, [0, 1], int(1e9), int(self.dt))
            while self.running:
                time.sleep(self.dt / 1000.)
                rates = self.ctr.getData()
                if self.detID == 0:
                    newCount = pl.mean(rates[0]) * 1000
                elif self.detID == 1:
                    newCount = pl.mean(rates[1]) * 1000
                else:
                    newCount = (pl.mean(rates[0]) + pl.mean(rates[1])) * 1000
                self.measure_data[0] = newCount
                self.measure_data = pl.roll(self.measure_data, -1)
                print self.measure_data
                self.gotdata = True

        finally:
            self.ctr.stop()
            self.tag.reset()
            self.running = False
            self.btn.setEnabled(True)
Ejemplo n.º 5
0
 def PH_run(self):
     """Runs in own thread - reads data via Picoharp counters"""
     self.measure_data = pl.zeros((50, ))
     try:
         self.ph = ph.PicoHarp()
         if self.dt < 0.1:
             print "Integration time too short! Changed to 0.1s"
             self.dt = 0.1
             self.time.setValue(0.1)
         while self.running:
             nbOfIntegrations = self.dt / 100
             newCount = 0
             for i in range(nbOfIntegrations):
                 time.sleep(0.1)  # 100 ms gate time on PicoHarp counters
                 rates = self.ph.getCountRates()
                 if self.detID == 0:
                     newCount += rates[
                         0] * 0.1  # counts on 100 ms integration bin
                 elif self.detID == 1:
                     newCount += rates[
                         1] * 0.1  # counts on 100 ms integration bin
                 else:
                     newCount += sum(
                         rates) * 0.1  # counts on 100 ms integration bin
             self.measure_data[0] = newCount / (float(self.dt) / 1000)
             self.measure_data = pl.roll(self.measure_data, -1)
             #print self.measure_data
             self.gotdata = True
     finally:
         self.running = False
         self.ph.close()
         self.btn.setEnabled(True)
Ejemplo n.º 6
0
def dense_gauss_kernel(sigma, x, y=None):
    """
    Gaussian Kernel with dense sampling.
    Evaluates a gaussian kernel with bandwidth SIGMA for all displacements
    between input images X and Y, which must both be MxN. They must also
    be periodic (ie., pre-processed with a cosine window). The result is
    an MxN map of responses.

    If X and Y are the same, omit the third parameter to re-use some
    values, which is faster.
    """

    xf = pylab.fft2(x)  # x in Fourier domain
    x_flat = x.flatten()
    xx = pylab.dot(x_flat.transpose(), x_flat)  # squared norm of x

    if y is not None:
        # general case, x and y are different
        yf = pylab.fft2(y)
        y_flat = y.flatten()
        yy = pylab.dot(y_flat.transpose(), y_flat)
    else:
        # auto-correlation of x, avoid repeating a few operations
        yf = xf
        yy = xx

    # cross-correlation term in Fourier domain
    xyf = pylab.multiply(xf, pylab.conj(yf))

    # to spatial domain
    xyf_ifft = pylab.ifft2(xyf)
    #xy_complex = circshift(xyf_ifft, floor(x.shape/2))
    row_shift, col_shift = pylab.floor(pylab.array(x.shape) / 2).astype(int)
    xy_complex = pylab.roll(xyf_ifft, row_shift, axis=0)
    xy_complex = pylab.roll(xy_complex, col_shift, axis=1)
    xy = pylab.real(xy_complex)

    # calculate gaussian response for all positions
    scaling = -1 / (sigma**2)
    xx_yy = xx + yy
    xx_yy_2xy = xx_yy - 2 * xy
    k = pylab.exp(scaling * pylab.maximum(0, xx_yy_2xy / x.size))

    #print("dense_gauss_kernel x.shape ==", x.shape)
    #print("dense_gauss_kernel k.shape ==", k.shape)

    return k
def dense_gauss_kernel(sigma, x, y=None):
    """
    Gaussian Kernel with dense sampling.
    Evaluates a gaussian kernel with bandwidth SIGMA for all displacements
    between input images X and Y, which must both be MxN. They must also
    be periodic (ie., pre-processed with a cosine window). The result is
    an MxN map of responses.

    If X and Y are the same, ommit the third parameter to re-use some
    values, which is faster.
    """

    xf = pylab.fft2(x)  # x in Fourier domain
    x_flat = x.flatten()
    xx = pylab.dot(x_flat.transpose(), x_flat)  # squared norm of x

    if y is not None:
        # general case, x and y are different
        yf = pylab.fft2(y)
        y_flat = y.flatten()
        yy = pylab.dot(y_flat.transpose(), y_flat)
    else:
        # auto-correlation of x, avoid repeating a few operations
        yf = xf
        yy = xx

    # cross-correlation term in Fourier domain
    xyf = pylab.multiply(xf, pylab.conj(yf))

    # to spatial domain
    xyf_ifft = pylab.ifft2(xyf)
    #xy_complex = circshift(xyf_ifft, floor(x.shape/2))
    row_shift, col_shift = pylab.floor(pylab.array(x.shape)/2).astype(int)
    xy_complex = pylab.roll(xyf_ifft, row_shift, axis=0)
    xy_complex = pylab.roll(xy_complex, col_shift, axis=1)
    xy = pylab.real(xy_complex)

    # calculate gaussian response for all positions
    scaling = -1 / (sigma**2)
    xx_yy = xx + yy
    xx_yy_2xy = xx_yy - 2 * xy
    k = pylab.exp(scaling * pylab.maximum(0, xx_yy_2xy / x.size))

    #print("dense_gauss_kernel x.shape ==", x.shape)
    #print("dense_gauss_kernel k.shape ==", k.shape)

    return k
Ejemplo n.º 8
0
def dense_gauss_kernel(sigma, x, y=None):
    """
    通过高斯核计算余弦子窗口图像块的响应图
    利用带宽是 sigma 的高斯核估计两个图像块 X (MxN) 和 Y (MxN) 的关系。X, Y 是循环的、经余弦窗处理的。输出结果是
    响应图矩阵 MxN. 如果 X = Y, 则函数调用时取消 y,则加快计算。
    该函数对应原文中的公式 (16),以及算法1中的 function k = dgk(x1, x2, sigma)
    :param sigma: 高斯核带宽
    :param x: 余弦子窗口图像块
    :param y: 空或者模板图像块
    :return: 响应图
    """
    # 计算图像块 x 的傅里叶变换
    xf = pylab.fft2(x)  # x in Fourier domain
    # 把图像块 x 拉平
    x_flat = x.flatten()
    # 计算 x 的2范数平方
    xx = pylab.dot(x_flat.transpose(), x_flat)  # squared norm of x

    if y is not None:
        # 一半情况, x 和 y 是不同的,计算 y 的傅里叶变化和2范数平方
        yf = pylab.fft2(y)
        y_flat = y.flatten()
        yy = pylab.dot(y_flat.transpose(), y_flat)
    else:
        # x 的自相关,避免重复计算
        yf = xf
        yy = xx

    # 傅里叶域的互相关计算,逐元素相乘
    xyf = pylab.multiply(xf, pylab.conj(yf))

    # 转化为频率域
    xyf_ifft = pylab.ifft2(xyf)
    # 对频率域里的矩阵块进行滚动平移,分别沿 row 和 col 轴
    row_shift, col_shift = pylab.floor(pylab.array(x.shape) / 2).astype(int)
    xy_complex = pylab.roll(xyf_ifft, row_shift, axis=0)
    xy_complex = pylab.roll(xy_complex, col_shift, axis=1)
    xy = pylab.real(xy_complex)

    # 计算高斯核响应图
    scaling = -1 / (sigma**2)
    xx_yy = xx + yy
    xx_yy_2xy = xx_yy - 2 * xy

    return pylab.exp(scaling * pylab.maximum(0, xx_yy_2xy / x.size))
Ejemplo n.º 9
0
def forward_algorithm(match, skip=-5.0):
    v = skip * arange(len(match[0]))
    result = []
    for i in range(0, len(match)):
        w = roll(v, 1).copy()
        w[0] = skip * i
        v = log_add(log_mul(v, match[i]), log_mul(w, match[i]))
        result.append(v)
    return array(result, 'f')
Ejemplo n.º 10
0
def findspectralLines(data, cutLevel =0.866, bPlot=True) :#0.02 is good up to cad 4000->0.01
    # moving average 3, smooth data
    data = 1/3.0*(data + pylab.roll(data,1) + pylab.roll(data,2))
    #normalise
    ndata   = data/data.max()
    #where values are abover cut assign value to -1
    #same basic idea as findcallibration except we define the area of interest as below rather than abover cut value
    lines=numpy.where(ndata>cutLevel,-1,ndata)
    coords = []
    
        #find start and end values of each line
    for index, item in enumerate(lines):
        if lines[index-1]==-1 and item !=-1:
            coords.append(index)
        elif  index==764:
            break
        elif lines[index+1]==-1 and item !=-1:
            coords.append(index)

    coordPairs = pylab.reshape(coords,(len(coords)/2,2))

    
    if bPlot:
        pylab.figure(10)
        pylab.clf()
        pylab.plot(ndata,label='3 point moving average, 1/3.0*(data + pylab.roll(data,1) + pylab.roll(data,2))')
        pylab.xlabel('Pixels')
        pylab.ylabel('Normalised "Smoothed" yValues')
        pylab.axhline(cutLevel,color='r',label='Threshold=%s'%(cutLevel))
        pylab.title('Spectral Lines Found')
        pylab.legend(loc='best',prop={'size':8})

        for lineRange in coordPairs :
            pylab.axvline(lineRange[0],color='r',ls='--')
            pylab.axvline(lineRange[1],color='r',ls='--')

        print 'findCalibrationLines> line coordinates :',coords
        print 'findCalibratoinLines> length of list   :',len(coords)
        print 'findCalibrationLines> pairs of coordinates :\n',coordPairs

    return[coordPairs]
Ejemplo n.º 11
0
def calcaV(W,method = "ratio"):
    """Calculate aV"""
    if method == "ratio":
        return pl.log(pl.absolute(W/pl.roll(W,-1,axis=1)))
    else:
        aVs = pl.zeros(pl.shape(W))
        n = pl.arange(1,pl.size(W,axis=1)+1)
        f = lambda b,t,W: W - b[0] * pl.exp(-b[1] * t)
        
        for i in xrange(pl.size(W,axis=0)):
            params,result = optimize.leastsq(f,[1.,1.],args=(n,W[i]))
            aVs[i] = params[1] * pl.ones(pl.shape(W[i]))
            
        return aVs
Ejemplo n.º 12
0
    def _extract_onsets(self):
        """
        ::

           The simplest onset detector in the world: power envelope derivative zero crossings +/-
        """
        fp = self._check_feature_params()
        if not self._have_power:
            return None
        dd = P.diff(P.r_[0, self.POWER])
        self.ONSETS = P.where((dd > 0) & (P.roll(dd, -1) < 0))[0]
        if self.verbosity:
            print("Extracted ONSETS")
        self._have_onsets = True
        return True
Ejemplo n.º 13
0
 def _extract_onsets(self):
     """
     ::
     
        The simplest onset detector in the world: power envelope derivative zero crossings +/-
     """
     fp = self._check_feature_params()
     if not self._have_power:
         return None
     dd = P.diff(P.r_[0,self.POWER])
     self.ONSETS = P.where((dd>0) & (P.roll(dd,-1)<0))[0]
     if self.verbosity:
         print "Extracted ONSETS"
     self._have_onsets = True
     return True
Ejemplo n.º 14
0
 def run(self):
     """Runs in own thread - reads data via input counter task"""
     self.measure_data = py.zeros((50, ))
     try:
         while self.running:
             self.ctr.data = py.zeros((10000, ), dtype=py.uint32)
             self.ctr.ReadCounterU32(10000, 10., self.ctr.data, 10000, None,
                                     None)
             self.ctr.StopTask()
             self.measure_data[0] = (self.ctr.data[-1]) * 10
             self.measure_data[py.isinf(self.measure_data)] = py.nan
             self.measure_data = py.roll(self.measure_data, -1)
             print self.measure_data
             self.gotdata = True
     finally:
         self.running = False
         self.ctr.StopTask()
         self.ctr.ClearTask()
         self.trig.ClearTask()
         self.btn.setEnabled(True)
Ejemplo n.º 15
0
    def 取音訊且顯示頻譜於幕(它, 鍵盤=None):

        能量 = 它.音.en
        頻率 = 它.音.f0  # 也可用 .fm, .f0

        #頻率 *=2

        長度 = 能量**0.5 * 0.1

        x = 10  # 它.幕寬//2 # int(t*0.1)%它.幕寬

        y = 頻率 * 它.幕高  #
        y = 它.幕高 - y  #

        色 = 頻率轉顏色(頻率)

        方形 = (x, y, 長度, 10)

        #pg.draw.rect(它.幕, 色, 方形)

        #
        # 主要 畫頻譜 的技術 在此! 另有 調色盤 要在 啟動音訊時 先做好。
        #
        頻譜 = 它.音.specgram

        #
        # up_down flip, 頻譜上下對調,讓低頻在下,高頻在上,比較符合直覺。
        #
        頻譜 = 頻譜[:, -1::-1]

        #
        # 這個 頻譜 大小要如何自動調整才能恰當的呈現在螢幕上,還有待研究一下。
        #
        頻譜 = (pl.log(頻譜) + 10) * 10

        #
        # 錦上添花
        #
        # 加這行讓頻譜會轉,有趣!!
        #
        if 鍵盤 == K_e:
            頻譜 = pl.roll(頻譜, -int(它.音.frameI % 它.音.frameN), axis=0)

        #
        # pygame 的 主要貢獻:  頻譜 ---> 音幕
        #
        pg.surfarray.blit_array(它.音幕, 頻譜.astype('int'))

        #
        # pygame 的 次要貢獻: 調整一下 寬高 音幕 ---> aSurf
        #
        aSurf = pg.transform.scale(它.音幕, (它.幕寬, 它.幕高))  #//4))

        #
        # 黏上幕  aSurf ---> display
        #

        #aSurf= pg.transform.average_surfaces([aSurf, 它.攝影畫面])
        它.幕.blit(aSurf, (0, 0))

        pg.draw.rect(它.幕, 色, 方形)
        #這行是音高 f0, 原本在之前畫,但發現會被頻譜擋住,就移到此處。

        #
        # 把攝影畫面 黏上去,1/4 螢幕就好了,不要太大,以免宣賓奪主。
        #
        #### bSurf= pg.transform.scale(它.攝影畫面, (它.幕寬//4, 它.幕高//4)) #//4))
        #### 它.幕.blit(bSurf, (0,0))

        #
        # 江永進的建議,在頻譜前畫一條白線,並把能量、頻率軌跡畫出。
        #
        if 鍵盤 == K_f:
            x = (它.音.frameI % 它.音.frameN) * 它.幕寬 / 它.音.frameN
            h = 它.幕高
            pg.draw.line(它.幕, pg.Color('white'), (x, h), (x, 0), 2)

            y = 長度
            y = h - y

            z = 頻率
            z = h - z

            if 它.音.frameI % 它.音.frameN == 0:
                它.能量點列表 = [(x, y)]
                它.頻率點列表 = [(x, z)]
            else:
                if 它.能量點列表[-1] != (x, y):
                    它.能量點列表 += [(x, y)]
                if 它.頻率點列表[-1] != (x, z):
                    它.頻率點列表 += [(x, z)]
                pass

            if len(它.能量點列表) > 1:
                pg.draw.lines(它.幕, pg.Color('black'), False, 它.能量點列表, 1)

            if len(它.頻率點列表) > 1:
                pg.draw.lines(它.幕, pg.Color('blue'), False, 它.頻率點列表, 2)
def PolyArea(x, y):
	#  calculate the area of an arbitrary ploygon with given vertices
    return 0.5 * abs(pylab.dot(x, pylab.roll(y, 1)) - pylab.dot(y, pylab.roll(x, 1)))
Ejemplo n.º 17
0
    mean = p.mean(psp_voltage, axis=0)
    std = p.std(psp_voltage, axis=0)
    p.figure()
    p.title("mean and standard deviation, ideal trigger")
    p.plot(time, mean, 'r-')
    p.fill_between(time, mean - std, mean + std, alpha=.3)
    p.xlabel("time / AU")
    p.ylabel("voltage / AU")
    insert_params()

    kernel = p.ones(sliding_average_len) / float(sliding_average_len)
    mean_max_index = p.argmax(mean)
    for i in range(len(psp_voltage)):
        smoothed = p.convolve(psp_voltage[i], kernel, "same")
        shift = mean_max_index - p.argmax(smoothed)
        psp_voltage[i] = p.roll(smoothed, shift)

    p.figure()
    p.title("mean and standard deviation, max trigger")
    mean_shifted = p.mean(psp_voltage, axis=0)
    std = p.std(psp_voltage, axis=0)
    p.plot(time, mean_shifted, 'r-')
    p.plot(time, mean, 'k--', alpha=.7)
    p.ylim(offset - height * .2, None)
    p.fill_between(time, mean - std, mean + std, alpha=.3)
    p.xlabel("time / AU")
    p.ylabel("voltage / AU")
    insert_params()

    p.show()
Ejemplo n.º 18
0
def step_lax_wendroff(U,lam):
    Uip1 = pylab.roll(U, -1)
    Uim1 = pylab.roll(U, +1)
    return U - 0.5*A*lam * (Uip1 - Uim1) + 0.5*(A*lam)**2 * (Uip1 - 2*U + Uim1)
Ejemplo n.º 19
0
    def acquisition(self):
        axisFont = QFont('Lucida')
        axisFont.setPointSize(30)

        axisPen = pg.mkPen({'color': "#FFF", 'width': 2})
        plotPen = pg.mkPen({'width': 4, 'color': 'y'})
        self.plotMaxPen = pg.mkPen({'width': 2, 'color': 'r'})
        self.plotMinPen = pg.mkPen({'width': 2, 'color': 'b'})

        bottom = self.ui.graphicsView.getPlotItem().getAxis('bottom')
        left = self.ui.graphicsView.getPlotItem().getAxis('left')

        bottom.setStyle(tickTextOffset=30, tickLength=10)
        left.setStyle(tickTextOffset=10, tickLength=10)

        bottom.setPen(axisPen)
        left.setPen(axisPen)

        bottom.tickFont = axisFont
        left.tickFont = axisFont

        labelStyle = {'color': '#FFF', 'font-size': '20pt'}
        bottom.setLabel(text='Time', units='s', **labelStyle)
        if self.meas_mode == 'current':
            left.setLabel(text='Current', units='mA', **labelStyle)
        else:
            left.setLabel(text='Voltage', units='V', **labelStyle)
        bottom.setHeight(100)
        left.setWidth(120)
        self.ui.graphicsView.setYRange(self.voltMin, self.voltMax)

        plot = self.ui.graphicsView.plot(pen=plotPen)
        self.plotMax = self.ui.graphicsView.plot(pen=self.plotMaxPen)
        self.plotMin = self.ui.graphicsView.plot(pen=self.plotMinPen)

        self.voltmeter.start()
        while self.acquire:

            if self.timeParamsChanged:
                nbOfSteps = int(
                    abs(py.floor(float(self.timeWindow) / self.timeStep)) + 1)
                timeArray = py.linspace(-self.timeWindow, 0, nbOfSteps)
                measurementArray = py.zeros(nbOfSteps)

                self.timeParamsChanged = False

            if self.voltmeter.measToRead:
                self.voltmeter.measToRead = False
                if self.meas_mode == 'current':
                    measurementArray[
                        0] = self.scalingFactor * self.voltmeter.measurementPoint * 1000
                else:
                    measurementArray[
                        0] = self.scalingFactor * self.voltmeter.measurementPoint
                measurementArray = py.roll(measurementArray, -1)
                plot.setData(timeArray, measurementArray)
                self.ui.nbReading.display('{:.5f}'.format(
                    measurementArray[-1]))

                if self.maxReadingReset or measurementArray[-1] > self.maxValue:
                    self.maxValue = measurementArray[-1]
                    self.ui.nbMaxReading.display('{:.5f}'.format(
                        self.maxValue))
                    self.plotMax.setData(
                        py.array([timeArray[0], timeArray[-1]]),
                        py.array([self.maxValue, self.maxValue]))
                    self.maxReadingReset = False

                if self.minReadingReset or measurementArray[-1] < self.minValue:
                    self.minValue = measurementArray[-1]
                    self.ui.nbMinReading.display('{:.5f}'.format(
                        self.minValue))
                    self.plotMin.setData(
                        py.array([timeArray[0], timeArray[-1]]),
                        py.array([self.minValue, self.minValue]))
                    self.minReadingReset = False

            self.app.processEvents()

        self.voltmeter.stop()
Ejemplo n.º 20
0
def autoCor(Ps,t):
    """Calculates autocorrelation function"""
    meanP = pl.mean(Ps)
    return pl.mean((Ps - meanP) * (pl.roll(Ps,-t) - meanP))
Ejemplo n.º 21
0
    def 取音訊且顯示頻譜於幕(它, 鍵盤= None):

        能量=    它.音.en
        頻率=    它.音.f0 # 也可用 .fm, .f0

        #頻率 *=2

        長度= 能量**0.5 *0.1

        x= 10 # 它.幕寬//2 # int(t*0.1)%它.幕寬

        y= 頻率 * 它.幕高  #
        y= 它.幕高 - y     #

        色= 頻率轉顏色(頻率)

        方形= (x, y, 長度, 10)

        #pg.draw.rect(它.幕, 色, 方形)

        #
        # 主要 畫頻譜 的技術 在此! 另有 調色盤 要在 啟動音訊時 先做好。
        #
        頻譜= 它.音.specgram

        #
        # up_down flip, 頻譜上下對調,讓低頻在下,高頻在上,比較符合直覺。
        #
        頻譜= 頻譜[:,-1::-1]

        #
        # 這個 頻譜 大小要如何自動調整才能恰當的呈現在螢幕上,還有待研究一下。
        #
        頻譜= (pl.log(頻譜)+10)*10

        #
        # 錦上添花
        #
        # 加這行讓頻譜會轉,有趣!!
        #
        if 鍵盤 == K_e:
            頻譜= pl.roll(頻譜, -int(它.音.frameI % 它.音.frameN), axis=0)

        #
        # pygame 的 主要貢獻:  頻譜 ---> 音幕
        #
        pg.surfarray.blit_array(它.音幕, 頻譜.astype('int'))

        #
        # pygame 的 次要貢獻: 調整一下 寬高 音幕 ---> aSurf
        #
        aSurf= pg.transform.scale(它.音幕, (它.幕寬, 它.幕高)) #//4))

        #
        # 黏上幕  aSurf ---> display
        #

        #aSurf= pg.transform.average_surfaces([aSurf, 它.攝影畫面])
        它.幕.blit(aSurf, (0,0))

        pg.draw.rect(它.幕, 色, 方形)  
        #這行是音高 f0, 原本在之前畫,但發現會被頻譜擋住,就移到此處。

        #
        # 把攝影畫面 黏上去,1/4 螢幕就好了,不要太大,以免宣賓奪主。
        #
        #### bSurf= pg.transform.scale(它.攝影畫面, (它.幕寬//4, 它.幕高//4)) #//4))
        #### 它.幕.blit(bSurf, (0,0))

        #
        # 江永進的建議,在頻譜前畫一條白線,並把能量、頻率軌跡畫出。
        #
        if 鍵盤 == K_f:
            x= (它.音.frameI % 它.音.frameN)  * 它.幕寬 / 它.音.frameN
            h= 它.幕高
            pg.draw.line(它.幕, pg.Color('white'),(x,h),(x,0) , 2)

            y= 長度
            y= h-y

            z= 頻率
            z= h-z

            if 它.音.frameI % 它.音.frameN == 0:
                它.能量點列表 = [(x,y)]
                它.頻率點列表 = [(x,z)]
            else:
                if 它.能量點列表[-1] != (x,y):
                    它.能量點列表 += [(x,y)]
                if 它.頻率點列表[-1] != (x,z):
                    它.頻率點列表 += [(x,z)]
                pass

            if len(它.能量點列表)>1 :
                pg.draw.lines(它.幕, pg.Color('black'), False, 它.能量點列表, 1)

            if len(它.頻率點列表)>1:
                pg.draw.lines(它.幕, pg.Color('blue'),  False,  它.頻率點列表, 2)
def advection(f, name):
    L1_values = list()
    dx_values = list()
    n_dx = 6  # exponent of the dx step
    for i in range(n_dx):

        #Discretizing Space
        #---------------------------------------------------------------------------
        A = 1.  #advection speed

        a = -1.  # min x
        b = 1.  # max x
        nx = 100 * 2**i  # number of x steps
        h = (b - a) / nx  # the step size in x
        x, dx = pylab.linspace(a, b, nx + 1, retstep=True)
        dx_values.append(dx)

        t_f = 2 / A
        t_i = 0.
        k = abs(.5 * h / A)

        n_graphs = 11  # rough number (minus 1)of graphs of U that will be displayed

        #Intial Values for U
        #---------------------------------------------------------------------------

        U = [f(xi, pylab.pi) for xi in x]
        U_exact = [f(xi, pylab.pi) for xi in x]
        #U_un = pylab.array([gaussian(xi,pylab.pi) for xi in x])
        if i == n_dx - 1:
            pylab.figure()
            pylab.subplot('211')
            pylab.xlabel("x")
            pylab.ylabel("U")
            pylab.title("U vs x at various times for initial condition: %s" %
                        name)

        #Integration
        #---------------------------------------------------------------------------
        t = t_i
        iter = 0

        while t <= t_f:
            Uim1 = pylab.roll(U, 1)
            Uip1 = pylab.roll(U, -1)
            U = .5 * (Uim1 + Uip1) - .5 * k * (Uip1 - Uim1) / dx

            #Uim1_un = pylab.roll(U_un,1)
            #Uip1_un = pylab.roll(U_un,-1)
            #U_un = U_un - .5*k*(Uip1_un-Uim1_un)/dx

            t += k
            iter += 1
            if iter % (2 * nx / n_graphs) == 0 and i == n_dx - 1:
                pylab.plot(x, U)
                #pylab.plot(x,U_un)

        L1_norm = dx * sum(abs(U - U_exact))
        L1_values.append(L1_norm)

    pylab.subplot('212')
    pylab.loglog(dx_values, L1_values, '-o')
    pylab.xlabel("dx")
    pylab.ylabel("L1_norm")

    slope = (pylab.log(L1_values[n_dx - 1]) - pylab.log(L1_values[0])) / (
        pylab.log(dx_values[n_dx - 1]) - pylab.log(dx_values[0]))
    print name
    print "The slope of convergence is : %f" % slope
Ejemplo n.º 23
0
def build_fft(input_signal,
              filter_coefficients,
              threshold_windows=6,
              boundary=0):
    """generate fast transform fourier by windows
    Params :
        input_signal : the audio signal
        filter_coefficients : coefficients of the chirplet bank
        threshold_windows : calcul the size of the windows
        boundary : manage the bounds of the signal
    Returns :
        fast Fourier transform applied by windows to the audio signal

    """
    num_coeffs = filter_coefficients.size
    #print(n,boundary,M)
    half_size = num_coeffs // 2
    signal_size = input_signal.size
    #power of 2 to apply fast fourier transform
    windows_size = 2**ceil(log2(num_coeffs * (threshold_windows + 1)))
    number_of_windows = floor(signal_size // windows_size)

    if number_of_windows == 0:
        return fft_based(input_signal, filter_coefficients, boundary)

    windowed_fft = empty_like(input_signal)
    #pad with 0 to have a size in a power of 2
    windows_size = int(windows_size)

    zeropadding = np.lib.pad(filter_coefficients,
                             (0, windows_size - num_coeffs),
                             'constant',
                             constant_values=0)

    h_fft = fft(zeropadding)

    #to browse the whole signal
    current_pos = 0

    #apply fft to a part of the signal. This part has a size which is a power
    #of 2
    if boundary == 0:  #ZERO PADDING

        #window is half padded with since it's focused on the first half
        window = input_signal[current_pos:current_pos + windows_size -
                              half_size]
        zeropaddedwindow = np.lib.pad(window, (len(h_fft) - len(window), 0),
                                      'constant',
                                      constant_values=0)
        x_fft = fft(zeropaddedwindow)

    elif boundary == 1:  #SYMMETRIC
        window = concatenate([
            flipud(input_signal[:half_size]),
            input_signal[current_pos:current_pos + windows_size - half_size]
        ])
        x_fft = fft(window)

    else:
        x_fft = fft(input_signal[:windows_size])

    windowed_fft[:windows_size - num_coeffs] = (ifft(
        x_fft * h_fft)[num_coeffs - 1:-1]).real

    current_pos += windows_size - num_coeffs - half_size
    #apply fast fourier transofm to each windows
    while current_pos + windows_size - half_size <= signal_size:

        x_fft = fft(input_signal[current_pos - half_size:current_pos +
                                 windows_size - half_size])
        #Suppress the warning, work on the real/imagina
        windowed_fft[current_pos:current_pos + windows_size -
                     num_coeffs] = (ifft(x_fft * h_fft)[num_coeffs -
                                                        1:-1]).real
        current_pos += windows_size - num_coeffs
    # print(countloop)
    #apply fast fourier transform to the rest of the signal
    if windows_size - (signal_size - current_pos + half_size) < half_size:

        window = input_signal[current_pos - half_size:]
        zeropaddedwindow = np.lib.pad(
            window,
            (0, int(windows_size - (signal_size - current_pos + half_size))),
            'constant',
            constant_values=0)
        x_fft = fft(zeropaddedwindow)
        windowed_fft[current_pos:] = roll(ifft(
            x_fft * h_fft), half_size)[half_size:half_size +
                                       windowed_fft.size - current_pos].real
        windowed_fft[-half_size:] = convolve(input_signal[-num_coeffs:],
                                             filter_coefficients,
                                             'same')[-half_size:]
    else:

        window = input_signal[current_pos - half_size:]
        zeropaddedwindow = np.lib.pad(
            window,
            (0, int(windows_size - (signal_size - current_pos + half_size))),
            'constant',
            constant_values=0)
        x_fft = fft(zeropaddedwindow)
        windowed_fft[current_pos:] = ifft(
            x_fft * h_fft)[num_coeffs - 1:num_coeffs + windowed_fft.size -
                           current_pos - 1].real

    return windowed_fft
Ejemplo n.º 24
0
def step_lax_fried(U,lam):
    Uim1 = pylab.roll(U,1)
    Uip1 = pylab.roll(U,-1)
    return .5*(Uim1 + Uip1) - .5*(lam)*(Uip1-Uim1)
Ejemplo n.º 25
0
    def takeAudioAndDisplay(self, keyboard=None):

        en = self.audio.en
        frequency = self.audio.f0  # 也可用 .fm, .f0

        #frequency *=2

        length = en**0.5 * 0.1

        x = 10  # self.screenWidth//2 # int(t*0.1)%self.screenWidth

        y = frequency * self.screenHeigth  #
        y = self.screenHeigth - y  #

        color = frequency2color(frequency)

        rect = (x, y, length, 10)

        #pg.draw.rect(self.screen, color, rect)

        #
        # 主要 畫頻譜 的技術 在此! 另有 palette 要在 啟動音訊時 先做好。
        # (The main draw of the spectrum techniques in this! Another palette to do when you first start the audio.)
        #
        specgram = self.audio.specgram

        #
        # up_down flip, 頻譜上下對調,讓低頻在下,高頻在上,比較符合直覺。
        # (Up and down the spectrum swap, so the next low-frequency, high frequency on, more intuitive.)
        #
        specgram = specgram[:, -1::-1]

        #
        # 這個 specgram 大小要如何自動調整才能恰當的呈現在螢幕上,還有待研究一下。
        # (This specgram how to automatically adjust to the size appropriate for presentation on the screen, what remains to be studied.)
        #
        specgram = (pl.log(specgram) + 10) * 10

        #
        # 錦上添花 (Icing on the cake)
        #
        # 加這行讓頻譜會轉,有趣!! (Add this line so that the spectrum will be transferred, fun! !)
        #
        if keyboard == K_e:
            specgram = pl.roll(specgram,
                               -int(self.audio.frameI % self.audio.frameN),
                               axis=0)

        #
        # pygame 的 主要貢獻 (The main contribution):  specgram ---> audioScreen
        #
        pg.surfarray.blit_array(self.audioScreen, specgram.astype('int'))

        #
        # pygame 的 次要貢獻 (Minor contribution): 調整一下 (Adjustments) size audioScreen ---> aSurf
        #
        aSurf = pg.transform.scale(
            self.audioScreen, (self.screenWidth, self.screenHeigth))  #//4))

        #
        # 黏上幕 (Stick on screen) aSurf ---> display
        #

        #aSurf= pg.transform.average_surfaces([aSurf, self.videoShot])
        self.screen.blit(aSurf, (0, 0))

        pg.draw.rect(self.screen, color,
                     rect)  #這行是音高 f0, 原本在之前畫,但發現會被頻譜擋住,就移到此處。

        #
        # 把攝影畫面 黏上去,1/4 螢幕就好了,不要太大,以免宣賓奪主。
        # (The photographic images stick up, 1/4 screen just fine, not too much, lest the Lord declared Bin wins.)
        #
        bSurf = pg.transform.scale(
            self.videoShot,
            (self.screenWidth // 4, self.screenHeigth // 4))  #//4))
        self.screen.blit(bSurf, (0, 0))

        #
        # 江永進的建議,在頻譜前畫一條白線,並把能量、頻率軌跡畫出。
        # (Jiang Yong Jin's proposal to draw a white line before the spectrum and the energy and frequency trajectory draw.)
        #
        if keyboard == K_f:
            x = (self.audio.frameI %
                 self.audio.frameN) * self.screenWidth / self.audio.frameN
            h = self.screenHeigth
            pg.draw.line(self.screen, pg.Color('white'), (x, h), (x, 0), 2)

            y = length
            y = h - y

            z = frequency
            z = h - z

            if self.audio.frameI % self.audio.frameN == 0:
                self.enList = [(x, y)]
                self.f0List = [(x, z)]
            else:
                if self.enList[-1] != (x, y):
                    self.enList += [(x, y)]
                if self.f0List[-1] != (x, z):
                    self.f0List += [(x, z)]
                pass

            if len(self.enList) > 1:
                pg.draw.lines(self.screen, pg.Color('black'), False,
                              self.enList, 1)

            if len(self.f0List) > 1:
                pg.draw.lines(self.screen, pg.Color('blue'), False,
                              self.f0List, 2)
     print "Fill %s File %s has a problem (nrow of beam OR pltaggzero OR pltlumizero is 0 in input file)" % (
         fill, file)
     h5in.close()
     continue
 print "The file is %s" % (file)
 table = h5in.root.pltaggzero
 h5out = t.open_file(output_path_ + file, mode='w')
 outtable = h5out.create_table('/',
                               outtablename,
                               Lumitable,
                               filters=compr_filter,
                               chunkshape=chunkshape)
 rownew = outtable.row
 channel_counter = 0  #bxmask = h5in.root.beam[channel_counter/nchannels]['collidable'] needs it and it has to start from 0 for every file
 bxmask = h5in.root.beam[0]['collidable']
 leading = (py.logical_xor(bxmask, py.roll(bxmask, 1)) & bxmask)
 train = py.logical_xor(leading, bxmask)
 maskhigh = 0
 masklow = 0
 for d in disablech:  #Move to function at some point
     #masklow
     if (d == 0):
         masklow += 0x1  #Use hexadecimal numbers
     elif (d == 1):
         masklow += 0x2
     elif (d == 2):
         masklow += 0x10
     elif (d == 3):
         masklow += 0x20
     elif (d == 4):
         masklow += 0x100