Exemple #1
0
    def test_depth_kwarg(self):
        nz = 100
        zN2 = 0.5*nz**-1 + np.arange(nz, dtype=np.float64)/nz
        zU = 0.5*nz**-1 + np.arange(nz+1, dtype=np.float64)/nz
        N2 = np.full(nz, 1.)
        f0 = 1.
        #beta = 1e-6
        beta = 0.
        Nx = 1
        Ny = 1
        dx = .1
        dy = .1
        k = fft.fftshift( fft.fftfreq(Nx, dx) )
        l = fft.fftshift( fft.fftfreq(Ny, dy) )
        ubar = np.zeros(nz+1)
        vbar = np.zeros(nz+1)
        etax = np.zeros(2)
        etay = np.zeros(2)

        with self.assertRaises(ValueError):
            z, growth_rate, vertical_modes = modes.instability_analysis_from_N2_profile(
                zN2, N2, f0, beta, k, l, zU, ubar, vbar, etax, etay, depth=zN2[-5]
            )

        # no error expected
        z, growth_rate, vertical_mode = modes.instability_analysis_from_N2_profile(
                zN2, N2, f0, beta, k, l, zU, ubar, vbar, etax, etay, depth=1.1
        )
def analyze_audio(prefix):
    output = []
    rate, data = wavfile.read('%s.wav' % prefix)
    dt = 1./rate
    T = dt * data.shape[0]
    output.append('%s %s' % (dt, T))

    tvec = np.arange(0, T, dt)
    sig0 = data[:, 0]
    sig1 = data[:, 1]

    output.append('%s %s' % (np.sum(sig0), np.sum(sig1)))

    plt.clf()
    plt.plot(tvec, sig0)
    plt.plot(tvec, sig1)
    xtickarray = range(0, 12, 2)
    plt.xticks(xtickarray, ['%d s' % x for x in xtickarray])
    plt.savefig('%s_time.png' % prefix)

    plt.clf()
    samp_freq0 = fftpack.fftfreq(sig0.size, d=dt)
    sig_fft0 = fftpack.fft(sig0)
    samp_freq1 = fftpack.fftfreq(sig1.size, d=dt)
    sig_fft1 = fftpack.fft(sig1)
    plt.plot(np.log(np.abs(samp_freq0)+1e-9), np.abs(sig_fft0))
    plt.plot(np.log(np.abs(samp_freq1)+1e-9), np.abs(sig_fft1))
    plt.xlim(np.log(10), np.log(40e3))
    xtickarray = np.log(np.array([20, 1e2, 3e2, 1e3, 3e3, 10e3, 30e3]))
    plt.xticks(xtickarray, ['20Hz', '100Hz', '300Hz', '1kHz', '3kHz', '10kHz',
                            '30kHz'])
    plt.savefig('%s_freq.png' % prefix)

    return '\n'.join(output)
Exemple #3
0
def genwavenumber(nlon):
    if (nlon%2 == 0):
        wavenumber = fftpack.fftshift(fftpack.fftfreq(nlon)*nlon)[1:]
    else:
        wavenumber = fftpack.fftshift(fftpack.fftfreq(nlon)*nlon)

    return wavenumber
Exemple #4
0
def make_audio_analysis_plots(infile, prefix='temp', make_plots=True,
                              do_fft=True, fft_sum=None):
    ''' create frequency plot '''
    import numpy as np
    from scipy import fftpack
    from scipy.io import wavfile
    import matplotlib
    matplotlib.use('Agg')
    import matplotlib.pyplot as pl

    if not os.path.exists(infile):
        return -1

    try:
        rate, data = wavfile.read(infile)
    except ValueError:
        print('error reading wav file')
        return -1
    dt_ = 1./rate
    time_ = dt_ * data.shape[0]
    tvec = np.arange(0, time_, dt_)
    sig0 = data[:, 0]
    sig1 = data[:, 1]
    if not tvec.shape == sig0.shape == sig1.shape:
        return -1
    if not do_fft:
        fft_sum_ = float(np.sum(np.abs(sig0)))
        if hasattr(fft_sum, 'value'):
            fft_sum.value = fft_sum_
        return fft_sum_
    if make_plots:
        pl.clf()
        pl.plot(tvec, sig0)
        pl.plot(tvec, sig1)
        xtickarray = range(0, 12, 2)
        pl.xticks(xtickarray, ['%d s' % x for x in xtickarray])
        pl.savefig('%s/%s_time.png' % (HOMEDIR, prefix))
        pl.clf()
    samp_freq0 = fftpack.fftfreq(sig0.size, d=dt_)
    sig_fft0 = fftpack.fft(sig0)
    samp_freq1 = fftpack.fftfreq(sig1.size, d=dt_)
    sig_fft1 = fftpack.fft(sig1)
    if make_plots:
        pl.clf()
        pl.plot(np.log(np.abs(samp_freq0)+1e-9), np.abs(sig_fft0))
        pl.plot(np.log(np.abs(samp_freq1)+1e-9), np.abs(sig_fft1))
        pl.xlim(np.log(10), np.log(40e3))
        xtickarray = np.log(np.array([20, 1e2, 3e2, 1e3, 3e3, 10e3, 30e3]))
        pl.xticks(xtickarray, ['20Hz', '100Hz', '300Hz', '1kHz', '3kHz',
                               '10kHz', '30kHz'])
        pl.savefig('%s/%s_fft.png' % (HOMEDIR, prefix))
        pl.clf()

        run_command('mv %s/%s_time.png %s/%s_fft.png %s/public_html/videos/'
                    % (HOMEDIR, prefix, HOMEDIR, prefix, HOMEDIR))

    fft_sum_ = float(np.sum(np.abs(sig_fft0)))
    if hasattr(fft_sum, 'value'):
        fft_sum.value = fft_sum_
    return fft_sum_
Exemple #5
0
 def test_definition(self):
     x = [0,1,2,3,4,-4,-3,-2,-1]
     assert_array_almost_equal(9*fftfreq(9),x)
     assert_array_almost_equal(9*pi*fftfreq(9,pi),x)
     x = [0,1,2,3,4,-5,-4,-3,-2,-1]
     assert_array_almost_equal(10*fftfreq(10),x)
     assert_array_almost_equal(10*pi*fftfreq(10,pi),x)
Exemple #6
0
def invert_vort(uc, dx=dx, dy=dy, nx=nx, ny=ny, geom=tad.geom):

    ucv = geom.validview(uc)
    vort = ucv[0]
    u = ucv[1]
    v = ucv[2]

    f = fft2(vort)

    nx, ny = vort.shape

    scal_y = 2*pi/dy/ny
    scal_x = 2*pi/dx/nx

    k = fftfreq(nx, 1/nx)[:,None] * 1j * scal_x
    l = fftfreq(ny, 1/ny)[None,:] * 1j * scal_y


    lapl = k**2 + l**2
    lapl[0,0] = 1.0

    psi = f/lapl
    u[:] = -real(ifft2(psi * l))
    v[:] = real(ifft2(psi * k))

    return uc
def deflection_calculation(x, y, topo, rho_t, rho_c, rho_m, Te, E, nu, padding=0):
    """
    Calculates the deflection due to a topographic load for a plate of constant
    thickness Te.
    Uses the equation:
    F[w] = rho_t/(rho_m-rho_c)  phi_e(k)  F[topo]
    """
    ny, nx = np.shape(topo)
    dx = abs(x[0][1] - x[0][0])
    dy = abs(y[1][0] - y[0][0])
    if padding != 0:
        ny_pad, nx_pad = ny*padding, nx*padding
        topo = np.pad(topo, (ny_pad,nx_pad), 'constant', constant_values=0)
    else:
        nx_pad, ny_pad = 0, 0 
        
    fx = fftpack.fftfreq(nx + 2*nx_pad, dx)
    fy = fftpack.fftfreq(ny + 2*ny_pad, dy)
    fx, fy = np.meshgrid(fx, fy)
    k = 2*np.pi*np.sqrt(fx**2 + fy**2)
    
    F_w = rho_t/(rho_m - rho_c)*phi_e(k, Te, rho_c, rho_m, E, nu)*fftpack.fft2(topo)
    w = np.real(fftpack.ifft2(F_w))
    
    if padding != 0:
        w = w[ny_pad:-ny_pad, nx_pad:-nx_pad]
    return w
    def icwt2d(self, da=0.25):
        '''
        Inverse bi-dimensional continuous wavelet transform as in Wang and
        Lu (2010), equation [5].

        Parameters
        ----------
        da : float, optional
            Spacing in the frequency axis.
        '''
        if self.Wf is None:
            raise TypeError("Run cwt2D before icwt2D")
        m0, l0, k0 = self.Wf.shape

        if m0 != self.scales.size:
            raise Warning('Scale parameter array shape does not match\
                           wavelet transform array shape.')
        # Calculates the zonal and meridional wave numters.
        L, K = 2 ** int(np.ceil(np.log2(l0))), 2 ** int(np.ceil(np.log2(k0)))
        # Calculates the zonal and meridional wave numbers.
        l, k = fftfreq(L, self.dy), fftfreq(K, self.dx)
        # Creates empty inverse wavelet transform array and fills it for every
        # discrete scale using the convolution theorem.
        self.iWf = np.zeros((m0, L, K), 'complex')
        for i, an in enumerate(self.scales):
            psi_ft_bar = an * self.wavelet.psi_ft(an * k, an * l)
            W_ft = fftn(self.Wf[i, :, :], s=(L, K))
            self.iWf[i, :, :] = ifftn(W_ft * psi_ft_bar, s=(L, K)) *\
                da / an ** 2.

        self.iWf = self.iWf[:, :l0, :k0].real.sum(axis=0) / self.wavelet.cpsi

        return self
def direct_shift(x,a,period=None):
    n = len(x)
    if period is None:
        k = fftfreq(n)*1j*n
    else:
        k = fftfreq(n)*2j*pi/period*n
    return ifft(fft(x)*exp(k*a)).real
Exemple #10
0
    def __init__(self, x, y, s, detrend=True, window=False, **kwargs):
        # r-space
        self.x  = np.asanyarray(x)
        self.y  = np.asanyarray(y)
        self.s  = np.asanyarray(s)

        assert len(self.x.shape) == 2
        assert self.x.shape == self.y.shape == self.s.shape
        assert self.x.size == self.y.size == self.s.size

        # r-space spacing
        self.dx = self._delta(self.x, np.index_exp[0,0], np.index_exp[1,0])
        self.dy = self._delta(self.y, np.index_exp[0,0], np.index_exp[0,1])

        # r-space samples
        self.n0 = self.x.shape[0]
        self.n1 = self.x.shape[1]

        # r-space lengths
        self.lx = self.n0 * self.dx
        self.ly = self.n1 * self.dy

        # k-space
        u = fftpack.fftshift(fftpack.fftfreq(self.n0))
        v = fftpack.fftshift(fftpack.fftfreq(self.n1))
        self.u, self.v = np.meshgrid(u, v, indexing='ij')

        # k-space spacing
        self.du = self._delta(self.u, np.index_exp[0,0], np.index_exp[1,0])
        self.dv = self._delta(self.v, np.index_exp[0,0], np.index_exp[0,1])

        # k-space lengths
        self.lu = self.n0 * self.du
        self.lv = self.n1 * self.dv

        # nyquist
        try:
            self.nyquist_u = 0.5/self.dx
        except ZeroDivisionError:
            self.nyquist_u = 0.0

        try:
            self.nyquist_v = 0.5/self.dy
        except ZeroDivisionError:
            self.nyquist_v = 0.0

        self.k = np.sqrt(self.u**2 + self.v**2)

        # detrend the signal
        if detrend:
            self.s = signal.detrend(self.s)

        # apply window to signal
        if window:
            self._window()
            self.s = self.s * self.window

        # compute the FFT
        self.fft = fftpack.fftshift(fftpack.fft2(self.s))
        self.power = self.fft.real**2 + self.fft.imag**2
Exemple #11
0
    def __init__(self, cube, header, phys_units=False):
        super(VCS, self).__init__()

        self.header = header
        self.cube = cube
        self.fftcube = None
        self.correlated_cube = None
        self.ps1D = None
        self.phys_units = phys_units

        if np.isnan(self.cube).any():
            self.cube[np.isnan(self.cube)] = 0
            # Feel like this should be more specific
            self.good_channel_count = np.sum(self.cube[0, :, :] != 0)
        else:
            self.good_channel_count = float(
                self.cube.shape[1] * self.cube.shape[2])

        # Lazy check to make sure we have units of km/s
        if np.abs(self.header["CDELT3"]) > 1:
            self.vel_to_pix = np.abs(self.header["CDELT3"]) / 1000.
        else:
            self.vel_to_pix = np.abs(self.header["CDELT3"])

        self.vel_channels = np.arange(1, self.cube.shape[0], 1)

        if self.phys_units:
            self.vel_freqs = np.abs(
                fftfreq(self.cube.shape[0])) / self.vel_to_pix
        else:
            self.vel_freqs = np.abs(fftfreq(self.cube.shape[0]))
Exemple #12
0
 def mkgrad(self,data):
   """Calculate gradient by convolution with appropriate gaussian filter"""
   n1=data.shape[0]
   if(not self.n or n1 != self.n):
     self.n= n1
     self.grad_multiplyer= fftfreq(n1)*numpy.exp(-4*fftfreq(n1)**2)
   data_f = fft(data)
   grad_f = data_f*self.grad_multiplyer
   return numpy.absolute(ifft(grad))
Exemple #13
0
def get_mps(t, freq, spec):
    "Computes the MPS of a spectrogram (idealy a log-spectrogram) or other REAL time-freq representation"
    mps = fftshift(fft2(spec))
    amps = np.real(mps * np.conj(mps))
    nf = mps.shape[0]
    nt = mps.shape[1]
    wfreq = fftshift(fftfreq(nf, d=freq[1] - freq[0]))
    wt = fftshift(fftfreq(nt, d=t[1] - t[0]))
    return wt, wfreq, mps, amps
Exemple #14
0
 def makeImageFromSf(self, sfx, sf, reduceNoise=True):
     """Invert SF all the way back to ImageI."""
     self.invertSf(sfx, sf)
     self.invertAcovf1d(reduceNoise=reduceNoise)
     self.invertAcovf2d(useI=True)
     self.invertPsd2d(useI=True)
     self.invertFft(useI=True)
     self.xfreq = fftpack.fftfreq(self.nx, 1.0)
     self.yfreq = fftpack.fftfreq(self.ny, 1.0)
     return
Exemple #15
0
    def __init__(self, datain, spd, tim_taper=0.1):
        """Arguments:
        
       'datain'    -- the data to be filtered. dimension must be (time, lat, lon)

       'spd'       -- samples per day

       'tim_taper' -- tapering ratio by cos. applay tapering first and last tim_taper%
                      samples. default is cos20 tapering

                      """
        ntim, nlat, nlon = datain.shape

        #remove the lowest three harmonics of the seasonal cycle (WK99, WKW03)
##         if ntim > 365*spd/3:
##             rf = fftpack.rfft(datain,axis=0)
##             freq = fftpack.rfftfreq(ntim*spd, d=1./float(spd))
##             rf[(freq <= 3./365) & (freq >=1./365),:,:] = 0.0     #freq<=3./365 only??
##             datain = fftpack.irfft(rf,axis=0)

        #remove dominal trend
        data = signal.detrend(datain, axis=0)

        #tapering
        if tim_taper == 'hann':
            window = signal.hann(ntim)
            data = data * window[:,NA,NA]
        elif tim_taper > 0:
        #taper by cos tapering same dtype as input array
            tp = int(ntim*tim_taper)
            window = numpy.ones(ntim, dtype=datain.dtype)
            x = numpy.arange(tp)
            window[:tp] = 0.5*(1.0-numpy.cos(x*pi/tp))
            window[-tp:] = 0.5*(1.0-numpy.cos(x[::-1]*pi/tp))
            data = data * window[:,NA,NA]

        #FFT
        self.fftdata = fftpack.fft2(data, axes=(0,2))

        #Note
        # fft is defined by exp(-ikx), so to adjust exp(ikx) multipried minus         
        wavenumber = -fftpack.fftfreq(nlon)*nlon
        frequency = fftpack.fftfreq(ntim, d=1./float(spd))
        knum, freq = numpy.meshgrid(wavenumber, frequency)

        #make f<0 domain same as f>0 domain
        #CAUTION: wave definition is exp(i(k*x-omega*t)) but FFT definition exp(-ikx)
        #so cahnge sign
        knum[freq<0] = -knum[freq<0]
        freq = numpy.abs(freq)
        self.knum = knum
        self.freq = freq

        self.wavenumber = wavenumber
        self.frequency = frequency
Exemple #16
0
    def test_Eady(self, atol=5e-2, nz=20, Ah=0.):
        """ Eady setup
        """
        ###########
        # prepare parameters for Eady
        ###########
        nz = nz
        zin = np.arange(nz+1, dtype=np.float64)/nz
        N2 = np.full(nz, 1.)
        f0 = 1.
        beta = 0.
        Nx = 10
        Ny = 1
        dx = 1e-1
        dy = 1e-1
        k = fft.fftshift( fft.fftfreq(Nx, dx) )
        l = fft.fftshift( fft.fftfreq(Ny, dy) )
        vbar = np.zeros(nz+1)
        ubar = zin
        etax = np.zeros(2)
        etay = np.zeros(2)

        z, growth_rate, vertical_modes = \
            modes.instability_analysis_from_N2_profile(
                .5*(zin[1:]+zin[:-1]), N2, f0, beta, k, l,
                zin, ubar, vbar, etax, etay, depth=1., sort='LI', num=2
        )

        self.assertEqual(nz+1, vertical_modes.shape[0],
            msg='modes array must be in the right shape')

        self.assertTrue(np.all( np.diff(
                    growth_rate.reshape((growth_rate.shape[0], len(k)*len(l))).imag.max(axis=1) ) <= 0.),
            msg='imaginary part of modes should be descending')

        mode_amplitude1 = (np.absolute(vertical_modes[:, 0])**2).sum(axis=0)
        self.assertTrue(np.allclose(1., mode_amplitude1),
            msg='mode1 should be normalized to amplitude of 1 at all horizontal wavenumber points')

        mode_amplitude2 = (np.absolute(vertical_modes[:, 1])**2).sum(axis=0)
        self.assertTrue(np.allclose(1., mode_amplitude2),
            msg='mode2 should be normalized to amplitude of 1 at all horizontal wavenumber points')

        #########
        # Analytical solution for Eady growth rate
        #########
        growthEady = np.zeros(len(k))
        for i in range(len(k)):
            if (k[i]==0) or ((np.tanh(.5*k[i])**-1 - .5*k[i]) * (.5*k[i] - np.tanh(.5*k[i])) < 0):
                growthEady[i] = 0.
            else:
                growthEady[i] = ubar.max() * np.sqrt( (np.tanh(.5*k[i])**-1 - .5*k[i]) * (.5*k[i] - np.tanh(.5*k[i])) )

        self.assertTrue( np.allclose(growth_rate.imag[0, 0, :], growthEady, atol=atol),
            msg='The numerical growth rates should be close to the analytical Eady solution' )
Exemple #17
0
def mps(spectrogram, df, dt):
    """
        Compute the modulation power spectrum for a given spectrogram.
    """

    #normalize and mean center the spectrogram
    sdata = copy.copy(spectrogram)
    sdata /= sdata.max()
    sdata -= sdata.mean()

    #take the 2D FFT and center it
    smps = fft2(sdata)
    smps = fftshift(smps)

    #compute the log amplitude
    mps_logamp = 20*np.log10(np.abs(smps)**2)
    mps_logamp[mps_logamp < 0.0] = 0.0

    #compute the phase
    mps_phase = np.angle(smps)

    #compute the axes
    nf = mps_logamp.shape[0]
    nt = mps_logamp.shape[1]
    spectral_freq = fftshift(fftfreq(nf, d=df))
    temporal_freq = fftshift(fftfreq(nt, d=dt))

    """
    nb = sdata.shape[1]
    dwf = np.zeros(nb)
    for ib in range(int(np.ceil((nb+1)/2.0))+1):
        posindx = ib
        negindx = nb-ib+2
        print 'ib=%d, posindx=%d, negindx=%d' % (ib, posindx, negindx)
        dwf[ib]= (ib-1)*(1.0/(df*nb))
        if ib > 1:
            dwf[negindx] =- dwf[ib]

    nt = sdata.shape[0]
    dwt = np.zeros(nt)
    for it in range(0, int(np.ceil((nt+1)/2.0))+1):
        posindx = it
        negindx = nt-it+2
        print 'it=%d, posindx=%d, negindx=%d' % (it, posindx, negindx)
        dwt[it] = (it-1)*(1.0/(nt*dt))
        if it > 1 :
            dwt[negindx] = -dwt[it]

    spectral_freq = dwf
    temporal_freq = dwt
    """

    return temporal_freq,spectral_freq,mps_logamp,mps_phase
Exemple #18
0
    def __init__(self,shape,lengths):
        self.lengths=lengths
        self.shape=shape

        self.Inv_Laplacian=(np.dstack(np.meshgrid(fftpack.fftfreq(self.shape[1],1.0/self.shape[1]),
                                       fftpack.fftfreq(self.shape[0],1.0/self.shape[0])))**2).sum(-1)
        self.Inv_Laplacian[self.Inv_Laplacian<1.0]=1.0
        self.Inv_Laplacian=1.0
        self.Inv_Laplacian/=self.lengths.area_lat_lon

        #Define the vector calculus in spherical harmonics:
        self.spharm=horizontal_vector_calculus_spherical_harmonics(self.shape,self.lengths)
        return
Exemple #19
0
    def solve_sqg(self):
        import scipy.fftpack as fft
        import numpy as np
        from math import pi
        
        self.precondition()
        
        dx = self.dx
        dy = self.dy
        rhos = self.ssd
        
        bhat = fft.fft2( - 9.81 * rhos / self.rho0)  # calculate bouyance 
        ny, nx = rhos.shape
        nz = self.nz
        k = 2 * pi * fft.fftfreq(nx)
        l = 2 * pi * fft.fftfreq(ny)

        ipsihat = np.zeros((nz+3, ny, nx))*complex(0, 0)
        
        Q = np.zeros((nz + 1, 1), dtype='float64'); Q[[0,-1]] = 0.0 # for interior PV, no used in this version
        
        # cutoff value
        ck, cl = 2 * pi / self.filterL, 2 * pi / self.filterL
        # loop through wavenumbers
        bhats = np.zeros_like(bhat)
        for ik in np.arange(k.size):
            for il in np.arange(l.size):
                wv2 = ((k[ik] / dx[il, 0]) ** 2 + (l[il] / dy[0, ik]) ** 2)
                if wv2 > (ck * ck + cl * cl):
                    bhats[il,ik] = bhat[il,ik]
                    right = - bhat[il, ik] / self.f0 * self.Rp
                    left = self.M - wv2 * np.eye(self.nz+1)
                    ipsihat[1:-1, il, ik] = np.linalg.solve(left, right).flatten()
                else:
                    print 'skip k(ik,il)', ik, il, "wv2 = ", wv2
        
        for k in range(1,nz+2):
            ipsihat[k, :, :] = (fft.ifft2(ipsihat[k, :, :]))
        
        if self.bottomboundary == 'psi=0':
            self.psis = np.r_[(np.real(ipsihat)), np.zeros((1,ny,nx))]
        else:
            self.psis = np.real(ipsihat)
            self.psis[0,:,:]= self.psis[1,:,:]
            self.psis[-1,:,:]=self.psis[-2,:,:]-self.dzc[-1]*np.real(fft.ifft2(-bhats))/self.f0
            
        self.rhos = self.psi2rho(self.psis)
        self.us, self.vs = psi2uv(self.lon, self.lat, self.psis)
        
        return
Exemple #20
0
 def calcFft(self):
     """Calculate the 2d FFT of the image (self.fimage).
     If 'shift', adds a shift to move the small spatial scales to the
     center of the FFT image to self.fimage. Also calculates the frequencies. """
     # Generate the FFT (note, scipy places 0 - largest spatial scale frequencies - at corners)
     self.fimage = fftpack.fft2(self.image)
     if self.shift:
         # Shift the FFT to put the largest spatial scale frequencies at center
         self.fimage = fftpack.fftshift(self.fimage)
     # Note, these frequencies follow unshifted order (0= first, largest spatial scale (with positive freq)). 
     self.xfreq = fftpack.fftfreq(self.nx, 1.0)
     self.yfreq = fftpack.fftfreq(self.ny, 1.0)
     self.xfreqscale = self.xfreq[1] - self.xfreq[0]
     self.yfreqscale = self.yfreq[1] - self.yfreq[0]
     return
Exemple #21
0
    def __init__(self, shape, space):
        "docstring"
        nx, ny = shape
        dx, dy = space

        scal_y = 2 * pi / dy / ny
        scal_x = 2 * pi / dx / nx

        k = fftfreq(nx, 1 / nx)[:, None] * 1j * scal_x
        l = fftfreq(ny, 1 / ny)[None, :] * 1j * scal_y

        lapl = k**2 + l**2
        lapl[0, 0] = 1.0

        self.k, self.l, self.lapl = k, l, lapl
Exemple #22
0
    def __init__(self, raw_files, blocksize, samplerate, fedge, fedge_at_top,
                 time_offset=0.0*u.s, dtype='cu4bit,cu4bit', comm=None):
        """ARO data aqcuired with a CHIME correlator containts 1024 channels
        over the 400MHz BW, 2 polarizations, and 2 unsigned 8-byte ints for
        real and imaginary for each timestamp.
        """
        self.meta = eval(open(raw_files[0] + '.meta').read())
        nchan = self.meta['nfreq']
        self.time0 = Time(self.meta['stime'], format='unix') + time_offset
        self.npol = self.meta['ninput']
        self.samplerate = samplerate
        self.fedge_at_top = fedge_at_top
        if fedge.isscalar:
            self.fedge = fedge
            f = fftshift(fftfreq(nchan, (2./samplerate).to(u.s).value)) * u.Hz
            if fedge_at_top:
                self.frequencies = fedge - (f-f[0])
            else:
                self.frequencies = fedge + (f-f[0])
        else:
            assert fedge.shape == (nchan,)
            self.frequencies = fedge
            if fedge_at_top:
                self.fedge = self.frequencies.max()
            else:
                self.fedge = self.frequencies.min()

        self.dtsample = (nchan * 2 / samplerate).to(u.s)
        if comm is None or comm.rank == 0:
            print("In AROCHIMEData, calling super")
            print("Start time: ", self.time0.iso)

        super(AROCHIMEData, self).__init__(raw_files, blocksize, dtype, nchan,
                                           comm=comm)
 def _getFFT(self, data, fs):
     """
     @param data: 
         set of samples
     @param fs  : 
         sample-rate of the data (samples/s)
     """
     # Number of total samplepoints
     N = len( data )
 
     # Sample rate 
     T = 1.0 / fs # sample interval T = 1/256 = 0.0039 s
     
     x = np.linspace(0.0, (N*T), N)
 
     # frequency content
     yfft  = fft(data, N)
 
     # let's take only the positive frequencies and normalize the amplitude
     yfft  = np.abs(yfft) / N
     freqs = fftfreq(N, T)
     half = int(np.floor(N/2)/2)
     freqs = freqs[0:half]
     yfft  = [yfft[0]] + [ y * 2 for y in yfft[1:half] ]
     
     return freqs, yfft
Exemple #24
0
def band_hilbert(x, fs, band, N=None, axis=-1):
    x = np.asarray(x)
    Xf = fftpack.fft(x, N, axis=axis)
    w = fftpack.fftfreq(x.shape[0], d=1. / fs)
    Xf[(w < band[0]) | (w > band[1])] = 0
    x = fftpack.ifft(Xf, axis=axis)
    return 2*x
Exemple #25
0
def stftfreq(wsize, sfreq=None):  # noqa: D401
    """Frequencies of stft transformation.

    Parameters
    ----------
    wsize : int
        Size of stft window
    sfreq : float
        Sampling frequency. If None the frequencies are given between 0 and pi
        otherwise it's given in Hz.

    Returns
    -------
    freqs : array
        The positive frequencies returned by stft

    See Also
    --------
    stft
    istft
    """
    n_freq = wsize // 2 + 1
    freqs = fftfreq(wsize)
    freqs = np.abs(freqs[:n_freq])
    if sfreq is not None:
        freqs *= float(sfreq)
    return freqs
Exemple #26
0
def get_power2(x, fs, band, n_sec=5):
    n_steps = int(n_sec * fs)
    w = fftpack.fftfreq(n_steps, d=1. / fs * 2)
    print(len(range(0, x.shape[0] - n_steps, n_steps)))
    pows = [2*np.sum(fftpack.rfft(x[k:k+n_steps])[(w > band[0]) & (w < band[1])]**2)/n_steps
            for k in range(0, x.shape[0] - n_steps, n_steps)]
    return np.array(pows)
Exemple #27
0
def _mt_spectra(x, dpss, sfreq):
    """ Compute tapered spectra

    Parameters
    ----------
    x : array, shape=(n_signals, n_times)
        Input signal
    dpss : array, shape=(n_tapers, n_times)
        The tapers
    sfreq : float
        The sampling frequency

    Returns
    -------
    x_mt : array, shape=(n_signals, n_tapers, n_times)
        The tapered spectra
    freqs : array
        The frequency points in Hz of the spectra
    """

    # remove mean (do not use in-place subtraction as it may modify input x)
    x = x - np.mean(x, axis=-1)[:, np.newaxis]
    x_mt = fftpack.fft(x[:, np.newaxis, :] * dpss)

    # only keep positive frequencies
    freqs = fftpack.fftfreq(x.shape[1], 1. / sfreq)
    freq_mask = (freqs >= 0)

    x_mt = x_mt[:, :, freq_mask]
    freqs = freqs[freq_mask]

    return x_mt, freqs
Exemple #28
0
    def _ajust_units(self):
        """
        this must be called always that a change in the potential is made
        because the interface to outside is always in eV, nm, s while
        internally it works always with atomic units
        """
        self.x_m = self.x_nm * 1.0e-9 # nm to m
        self.x_au = self.x_m / self.au_l # m to au
        self.dx_m = self.x_m[1]-self.x_m[0] # dx
        self.dx_au = self.x_au[1]-self.x_au[0] # dx
        self.k_au = fftfreq(self.N, d=self.dx_au)

        self.v_j = self.v_ev * self.ev # ev to j
        self.v_au_ti = self.v_au = self.v_j / self.au_e # j to au

        # check whether there is any bias to apply
        try:
            self.v_au_ti += self.bias_au
        except:
            pass

        # check whether there is any dynamic field to apply
        try:
            assert self.v_au_td and isinstance(self.v_au_td, LambdaType)
            self.v_au_full = lambda t: self.v_au_ti + self.v_au_td(t)
        except:
            self.v_au_full = lambda t: self.v_au_ti
    def analyzeSound(self):
        """ highlights N first peaks in frequency diagram
        """
        # on recharge les données 
        data = self.data
        sample_freq = self.sample_freq
        from scipy.fftpack import fftfreq
        freq_vect = fftfreq(data.size) * sample_freq
        
        # on trouve les maxima
        y0 = abs(fft(data))
#        y1 = abs(fft(data[:, 1]))
        maxi0 = ((diff(sign(diff(y0))) < 0) & (y0[1:-1] > y0.max()/10.)).nonzero()[0] + 1 # local max
        # maxi1 = ((diff(sign(diff(y1))) < 0) & (y1[1:-1] > y1.max()/10.)).nonzero()[0] + 1 # local max
        
        # fréquence
        ax = self.main_figure.figure.add_subplot(212)
        ax.plot(freq_vect[maxi0], y0[maxi0], "o")
        # ax.plot(freq_vect[maxi1], y1[maxi1], "o")
        
        # annotations au dessus d'une fréquence de coupure
        fc = 100
        for point in maxi0[(freq_vect[maxi0] > fc).nonzero()][:self.ui.spinBox.value()]:
            plt.annotate("%.2f" % freq_vect[point], (freq_vect[point], y0[point]))
#        for point in maxi1[(freq_vect[maxi0] > fc).nonzero()][:self.ui.spinBox.value()]:
#            plt.annotate("%.2f" % freq_vect[point], (freq_vect[point], y1[point]))
        
        self.ui.main_figure.canvas.draw()
Exemple #30
0
    def __init__(self, waveReader):
        self.windowSize = PADDED_WINDOW_SAMPLES
        self.sampleRate = waveReader.samplerate
        self.signal = waveReader.channels[0]

        # Performs STFT on samples
        pspc = self.stft(WINDOW_SAMPLES, HOP_SAMPLES)
        
        # Gets FFT bins center frequencies, discard negative frequencies
        freqs = fftfreq(
            self.windowSize, 
            float(1)/waveReader.samplerate
            )[:self.windowSize/2]

        # Scales bins according to the Terhardt transfer function
        pspc = self.scaleTerhardt(pspc, freqs)

        # Warps frequencies to bark bins
        pspc = self.scaleBark(pspc, freqs)

        # Convolves frequency envelopes with a half-hanning window
        pspc = self.temporalMask(pspc)

        # Scales samples to dB with I0 = 60, clips values below -60dB
        pspc = self.scaleDb(pspc)

        self.spectogram = pspc

        self.eventDetection = self.calculateEventDetection(self.spectogram)
        self.eventDetectionEvents = self.findEvents(self.eventDetection)

        self.loudness = self.calculateLoudness(self.spectogram)

        self.events = self.matchEventsToLoudness(self.eventDetectionEvents, self.loudness)
        self.eventSamples = self.events * HOP_SAMPLES
data = np.loadtxt("monthrg.dat")
agnios = np.array(data[:, 0])
manchas = np.array(data[:, 3])
dias = np.array(data[:, 2])
meses = np.array(data[:, 1])
i = dias > 0
#Angios y manchas por agnio
t = agnios[i] + meses[i] / 12
manchas_agnios = manchas[i]
#Agnios y manchas desde 1900
ii = t > 1850
year = t[ii]
manch = manchas_agnios[ii]
plt.plot(year, manch)
plt.savefig("manchas2.png")
plt.close()
#transformadas de fourier para los datos anteriores
dt = 1.0  #Unidades: 1/meses
fourier = fft(manch) / len(year)
frecuencia = fftfreq(len(year), dt)
plt.plot(frecuencia, abs(fourier))
plt.savefig("frecuencia.png")
plt.close()
#Busca la frecuencia
iii = frecuencia > 0
indice = np.argmax(fourier[iii])
f = frecuencia[iii]
frec_max = f[indice]
t = 1 / frec_max
print "El periodo es", t, "meses"
def _spectral_helper(x, y, fs=1.0, window='hanning', nperseg=256,
                    noverlap=None, nfft=None, detrend='constant',
                    return_onesided=True, scaling='spectrum', axis=-1,
                    mode='psd'):
    """
    Calculate various forms of windowed FFTs for PSD, CSD, etc.

    This is a helper function that implements the commonality between the
    psd, csd, and spectrogram functions. It is not designed to be called
    externally. The windows are not averaged over; the result from each window
    is returned.

    Parameters
    ---------
    x : array_like
        Array or sequence containing the data to be analyzed.
    y : array_like
        Array or sequence containing the data to be analyzed. If this is
        the same object in memoery as x (i.e. _spectral_helper(x, x, ...)),
        the extra computations are spared.
    fs : float, optional
        Sampling frequency of the time series. Defaults to 1.0.
    window : str or tuple or array_like, optional
        Desired window to use. See `get_window` for a list of windows and
        required parameters. If `window` is array_like it will be used
        directly as the window and its length will be used for nperseg.
        Defaults to 'hanning'.
    nperseg : int, optional
        Length of each segment.  Defaults to 256.
    noverlap : int, optional
        Number of points to overlap between segments. If None,
        ``noverlap = nperseg // 2``.  Defaults to None.
    nfft : int, optional
        Length of the FFT used, if a zero padded FFT is desired.  If None,
        the FFT length is `nperseg`. Defaults to None.
    detrend : str or function or False, optional
        Specifies how to detrend each segment. If `detrend` is a string,
        it is passed as the ``type`` argument to `detrend`.  If it is a
        function, it takes a segment and returns a detrended segment.
        If `detrend` is False, no detrending is done.  Defaults to 'constant'.
    return_onesided : bool, optional
        If True, return a one-sided spectrum for real data. If False return
        a two-sided spectrum. Note that for complex data, a two-sided
        spectrum is always returned.
    scaling : { 'density', 'spectrum' }, optional
        Selects between computing the cross spectral density ('density')
        where `Pxy` has units of V**2/Hz and computing the cross spectrum
        ('spectrum') where `Pxy` has units of V**2, if `x` and `y` are
        measured in V and fs is measured in Hz.  Defaults to 'density'
    axis : int, optional
        Axis along which the periodogram is computed; the default is over
        the last axis (i.e. ``axis=-1``).
    mode : str, optional
        Defines what kind of return values are expected. Options are ['psd',
        'complex', 'magnitude', 'angle', 'phase'].

    Returns
    -------
    freqs : ndarray
        Array of sample frequencies.
    result : ndarray
        Array of output data, contents dependant on *mode* kwarg.
    t : ndarray
        Array of times corresponding to each data segment

    References
    ----------
    .. [1] Stack Overflow, "Rolling window for 1D arrays in Numpy?",
        http://stackoverflow.com/a/6811241
    .. [2] Stack Overflow, "Using strides for an efficient moving average
        filter", http://stackoverflow.com/a/4947453

    Notes
    -----
    Adapted from matplotlib.mlab

    .. versionadded:: 0.16.0
    """
    if mode not in ['psd', 'complex', 'magnitude', 'angle', 'phase']:
        raise ValueError("Unknown value for mode %s, must be one of: "
                         "'default', 'psd', 'complex', "
                         "'magnitude', 'angle', 'phase'" % mode)

    # If x and y are the same object we can save ourselves some computation.
    same_data = y is x

    if not same_data and mode != 'psd':
        raise ValueError("x and y must be equal if mode is not 'psd'")

    axis = int(axis)

    # Ensure we have np.arrays, get outdtype
    x = np.asarray(x)
    if not same_data:
        y = np.asarray(y)
        outdtype = np.result_type(x,y,np.complex64)
    else:
        outdtype = np.result_type(x,np.complex64)

    if not same_data:
        # Check if we can broadcast the outer axes together
        xouter = list(x.shape)
        youter = list(y.shape)
        xouter.pop(axis)
        youter.pop(axis)
        try:
            outershape = np.broadcast(np.empty(xouter), np.empty(youter)).shape
        except ValueError:
            raise ValueError('x and y cannot be broadcast together.')

    if same_data:
        if x.size == 0:
            return np.empty(x.shape), np.empty(x.shape), np.empty(x.shape)
    else:
        if x.size == 0 or y.size == 0:
            outshape = outershape + (min([x.shape[axis], y.shape[axis]]),)
            emptyout = np.rollaxis(np.empty(outshape), -1, axis)
            return emptyout, emptyout, emptyout

    if x.ndim > 1:
        if axis != -1:
            x = np.rollaxis(x, axis, len(x.shape))
            if not same_data and y.ndim > 1:
                y = np.rollaxis(y, axis, len(y.shape))

    # Check if x and y are the same length, zero-pad if neccesary
    if not same_data:
        if x.shape[-1] != y.shape[-1]:
            if x.shape[-1] < y.shape[-1]:
                pad_shape = list(x.shape)
                pad_shape[-1] = y.shape[-1] - x.shape[-1]
                x = np.concatenate((x, np.zeros(pad_shape)), -1)
            else:
                pad_shape = list(y.shape)
                pad_shape[-1] = x.shape[-1] - y.shape[-1]
                y = np.concatenate((y, np.zeros(pad_shape)), -1)

    # X and Y are same length now, can test nperseg with either
    if x.shape[-1] < nperseg:
        warnings.warn('nperseg = {0:d}, is greater than input length = {1:d}, '
                      'using nperseg = {1:d}'.format(nperseg, x.shape[-1]))
        nperseg = x.shape[-1]

    nperseg = int(nperseg)
    if nperseg < 1:
        raise ValueError('nperseg must be a positive integer')

    if nfft is None:
        nfft = nperseg
    elif nfft < nperseg:
        raise ValueError('nfft must be greater than or equal to nperseg.')
    else:
        nfft = int(nfft)

    if noverlap is None:
        noverlap = nperseg//2
    elif noverlap >= nperseg:
        raise ValueError('noverlap must be less than nperseg.')
    else:
        noverlap = int(noverlap)

    # Handle detrending and window functions
    if not detrend:
        def detrend_func(d):
            return d
    elif not hasattr(detrend, '__call__'):
        def detrend_func(d):
            return signaltools.detrend(d, type=detrend, axis=-1)
    elif axis != -1:
        # Wrap this function so that it receives a shape that it could
        # reasonably expect to receive.
        def detrend_func(d):
            d = np.rollaxis(d, -1, axis)
            d = detrend(d)
            return np.rollaxis(d, axis, len(d.shape))
    else:
        detrend_func = detrend

    if isinstance(window, string_types) or type(window) is tuple:
        win = get_window(window, nperseg)
    else:
        win = np.asarray(window)
        if len(win.shape) != 1:
            raise ValueError('window must be 1-D')
        if win.shape[0] != nperseg:
            raise ValueError('window must have length of nperseg')

    if np.result_type(win,np.complex64) != outdtype:
        win = win.astype(outdtype)

    if mode == 'psd':
        if scaling == 'density':
            scale = 1.0 / (fs * (win*win).sum())
        elif scaling == 'spectrum':
            scale = 1.0 / win.sum()**2
        else:
            raise ValueError('Unknown scaling: %r' % scaling)
    else:
        scale = 1

    if return_onesided is True:
        if np.iscomplexobj(x):
            sides = 'twosided'
        else:
            sides = 'onesided'
            if not same_data:
                if np.iscomplexobj(y):
                    sides = 'twosided'
    else:
        sides = 'twosided'

    if sides == 'twosided':
        num_freqs = nfft
    elif sides == 'onesided':
        if nfft % 2:
            num_freqs = (nfft + 1)//2
        else:
            num_freqs = nfft//2 + 1

    # Perform the windowed FFTs
    result = _fft_helper(x, win, detrend_func, nperseg, noverlap, nfft)
    result = result[..., :num_freqs]
    freqs = fftpack.fftfreq(nfft, 1/fs)[:num_freqs]

    if not same_data:
        # All the same operations on the y data
        result_y = _fft_helper(y, win, detrend_func, nperseg, noverlap, nfft)
        result_y = result_y[..., :num_freqs]
        result = np.conjugate(result) * result_y
    elif mode == 'psd':
        result = np.conjugate(result) * result
    elif mode == 'magnitude':
        result = np.absolute(result)
    elif mode == 'angle' or mode == 'phase':
        result = np.angle(result)
    elif mode == 'complex':
        pass

    result *= scale
    if sides == 'onesided':
        if nfft % 2:
            result[...,1:] *= 2
        else:
            # Last point is unpaired Nyquist freq point, don't double
            result[...,1:-1] *= 2

    t = np.arange(nperseg/2, x.shape[-1] - nperseg/2 + 1, nperseg - noverlap)/float(fs)

    if sides != 'twosided' and not nfft % 2:
        # get the last value correctly, it is negative otherwise
        freqs[-1] *= -1

    # we unwrap the phase here to handle the onesided vs. twosided case
    if mode == 'phase':
        result = np.unwrap(result, axis=-1)

    result = result.astype(outdtype)

    # All imaginary parts are zero anyways
    if same_data and mode != 'complex':
        result = result.real

    # Output is going to have new last axis for window index
    if axis != -1:
        # Specify as positive axis index
        if axis < 0:
            axis = len(result.shape)-1-axis

        # Roll frequency axis back to axis where the data came from
        result = np.rollaxis(result, -1, axis)
    else:
        # Make sure window/time index is last axis
        result = np.rollaxis(result, -1, -2)

    return freqs, t, result
Exemple #33
0
factor = input()
factor = int(factor)

ax = plt.gca()

data_test = 2050
data_over = 329

time_test = np.arange(1*10**(-9), data_test*4*10**(-9), 4*10**(-9))

data_array = np.load('ratio.npz')
ratio = data_array['x']
angle_dif = data_array['y']

dt = 4*10**(-9)
freq_fft_test = fftfreq(data_test, dt)
freq_fft_test_plus = freq_fft_test[0:data_test//2]

freq_array = np.arange(1*10**6, 41*10**6, 1*10**6)
freq_interpolate = np.arange(0, 41*10**6, 0.5*10**6)
func_amp = interpolate.InterpolatedUnivariateSpline(freq_array, ratio, k = 1)
func_angle = interpolate.InterpolatedUnivariateSpline(freq_array, angle_dif, k = 1)

result_amp_test = func_amp(freq_fft_test_plus)
for i in range(data_over, data_test//2):
    result_amp_test[i] = result_amp_test[data_over-1]

result_amp_test_min = result_amp_test[data_over-1]
result_angle_test = func_angle(freq_fft_test_plus)

for i in range(data_over, data_test//2):
Exemple #34
0
def select_prd_peak(wl, data, window=None, axes=None, known_prd=None):
    assert len(data.shape) == 1, 'expected spectrum averaged over ' +\
            'camera frames'
    threshold = 0.001  # fraction of DC peak
    window_width = 150  # in pixels
    pad_factor = 1
    adjacent_pts_fit = 2  # fit on either side
    min_prd = 650  # fs
    dc_left_time, dc_right_time = -200., 200.  # in fs

    if window:
        window_width = window.shape[0]
        raise NotImplementedError('untested implementation for reusing window')
    else:
        window = get_window(('kaiser', 7), window_width, fftbins=False)

    assert window.shape[0] < data.shape[0], 'window must be smaller than data'
    assert len(data.shape) == 1, 'data should be 1d'

    freq, data, df = wavelen_to_freq(wl, data, ret_df=True)

    padded = np.zeros((data.shape[0] * pad_factor, ))
    padded[:data.shape[0]] = data * get_window(('kaiser', 20), data.shape[0])
    time = fftshift(fftfreq(padded.shape[0], df))
    ft = fftshift(ifft(padded))
    ft_abs = np.abs(ft)

    if known_prd is None:
        res = argrelmax(ft_abs, order=20 * pad_factor)[0]
        prd_idx = res[np.logical_and(
            time[res] > min_prd, ft_abs[res] > threshold * np.max(ft_abs))][0]

        # fit parabola to adjacent points to find intrabin time
        sel = slice(prd_idx - adjacent_pts_fit, prd_idx + adjacent_pts_fit)
        p = np.polyfit(time[sel], ft_abs[sel], 2)
        prd_guess = -0.5 * p[1] / p[0]  # from vertex form
        log.debug('found PRD at {:.1f} fs'.format(prd_guess))
    else:
        prd_guess = known_prd
        prd_idx = np.abs(time - prd_guess).argmin()
        log.debug('reusing PRD at {:.1f} fs, idx {:d}'.format(
            time[prd_idx], prd_idx))

    if axes:
        # going to plot stuff to axes
        axes.plot(time, ft_abs, marker='+', markersize=5)
        next_highest = ft_abs[prd_idx]
        axes.text(prd_guess, next_highest, r'${:5.2f}$'.format(prd_guess))
        axes.set_ylim(0, 1.1 * next_highest)
        axes.vlines(prd_guess, 0, next_highest, color='r')

    sel_peak = slice(prd_idx - window_width // 2, prd_idx + window_width // 2)
    lo_peak = np.zeros_like(ft)
    lo_peak[sel_peak] = ft[sel_peak] * window

    if axes:
        axes.plot(time, abs(lo_peak))

    # detect minima near DC peak
    # first find index of pm 60 fs, and call that the DC interval
    dc_left_idx, dc_right_idx = np.abs(time - dc_left_time).argmin(), \
        np.abs(time-dc_right_time).argmin()

    sel_dc = slice(dc_left_idx, dc_right_idx)
    dc_peak = np.zeros_like(ft)

    dc_peak[sel_dc] = ft[sel_dc] * get_window(
        ('kaiser', 7), dc_right_idx - dc_left_idx)
    if axes:
        axes.plot(time, np.abs(dc_peak), 'c', linewidth=1.2)

    dt = np.abs(time[1] - time[0])

    freq = np.linspace(freq[0], freq[-1], freq.shape[0] * pad_factor)

    # estimate of E_LO(w) without phase information
    E_lo = np.sqrt(np.abs(fft(dc_peak)))
    E_sig = np.exp(-1j * prd_guess * freq) * fft(lo_peak) / E_lo

    return prd_guess, freq, E_sig, E_lo
# 1. Get the window profile
window = get_window('hamming', N_window)

# 2. Set up the FFT
result = []
start = 0
while (start < N_data - N_window):
    end = start + N_window
    result.append(fftshift(fft(window*data[start:end])))
    start = end

result.append(fftshift(fft(window*data[-N_window:])))
result = array(result,result[0].dtype)

# Display results
freqscale = fftshift(fftfreq(N_window,dT))[150:-150]/1e3
figure(1)
clf()
imshow(abs(result[:,150:-150]), extent=(freqscale[-1], freqscale[0],
                                    (N_data*dT - T_window/2.0), T_window/2.0))
xlabel('Frequency (kHz)')
ylabel('Time (sec.)')
gray()

show()




Exemple #36
0
def resample(x, num, t=None, axis=0, window=None):
    """
    Resample `x` to `num` samples using Fourier method along the given axis.

    The resampled signal starts at the same value as `x` but is sampled
    with a spacing of ``len(x) / num * (spacing of x)``.  Because a
    Fourier method is used, the signal is assumed to be periodic.

    Parameters
    ----------
    x : array_like
        The data to be resampled.
    num : int
        The number of samples in the resampled signal.
    t : array_like, optional
        If `t` is given, it is assumed to be the sample positions
        associated with the signal data in `x`.
    axis : int, optional
        The axis of `x` that is resampled.  Default is 0.
    window : array_like, callable, string, float, or tuple, optional
        Specifies the window applied to the signal in the Fourier
        domain.  See below for details.

    Returns
    -------
    resampled_x or (resampled_x, resampled_t)
        Either the resampled array, or, if `t` was given, a tuple
        containing the resampled array and the corresponding resampled
        positions.

    Notes
    -----
    The argument `window` controls a Fourier-domain window that tapers
    the Fourier spectrum before zero-padding to alleviate ringing in
    the resampled values for sampled signals you didn't intend to be
    interpreted as band-limited.

    If `window` is a function, then it is called with a vector of inputs
    indicating the frequency bins (i.e. fftfreq(x.shape[axis]) ).

    If `window` is an array of the same length as `x.shape[axis]` it is
    assumed to be the window to be applied directly in the Fourier
    domain (with dc and low-frequency first).

    For any other type of `window`, the function `scipy.signal.get_window`
    is called to generate the window.

    The first sample of the returned vector is the same as the first
    sample of the input vector.  The spacing between samples is changed
    from dx to:

        dx * len(x) / num

    If `t` is not None, then it represents the old sample positions,
    and the new sample positions will be returned as well as the new
    samples.

    """
    x = asarray(x)
    X = fft(x, axis=axis)
    Nx = x.shape[axis]
    if window is not None:
        if callable(window):
            W = window(fftfreq(Nx))
        elif isinstance(window, ndarray) and window.shape == (Nx, ):
            W = window
        else:
            W = ifftshift(get_window(window, Nx))
        newshape = ones(len(x.shape))
        newshape[axis] = len(W)
        W.shape = newshape
        X = X * W
    sl = [slice(None)] * len(x.shape)
    newshape = list(x.shape)
    newshape[axis] = num
    N = int(np.minimum(num, Nx))
    Y = zeros(newshape, 'D')
    sl[axis] = slice(0, (N + 1) / 2)
    Y[sl] = X[sl]
    sl[axis] = slice(-(N - 1) / 2, None)
    Y[sl] = X[sl]
    y = ifft(Y, axis=axis) * (float(num) / float(Nx))

    if x.dtype.char not in ['F', 'D']:
        y = y.real

    if t is None:
        return y
    else:
        new_t = arange(0, num) * (t[1] - t[0]) * Nx / float(num) + t[0]
        return y, new_t
Exemple #37
0
def direct_hilbert(x):
    fx = fft(x)
    n = len(fx)
    w = fftfreq(n) * n
    w = 1j * sign(w)
    return ifft(w * fx)
import scipy.io.wavfile as wavfile
import scipy
import scipy.fftpack as fftpk
import numpy as np
from matplotlib import pyplot as plt

s_rate, signal = wavfile.read("tibet.wav")

FFT = abs(scipy.fft(signal))
freqs = fftpk.fftfreq(len(FFT), (1.0 / s_rate))

plt.plot(freqs[range(len(FFT) // 2)], FFT[range(len(FFT) // 2)])
plt.xlabel('Frequency (Hz)')
plt.ylabel('Amplitude')
plt.show()
Exemple #39
0
def plot_shh_anal_loc():
    """
    Function to plot multiple analytical power spectra along e.g. an aquifer in a 3D plot.
    Still not working because plot3d has issues with log scale ...
    """

    # set parameters
    data_points = 8000
    time_step_size = 86400
    aquifer_length = 1000
    Sy = 1e-1
    T = 0.001
    from calc_tc import calc_tc

    tc = calc_tc(aquifer_length, Sy, T)
    print(tc)

    import sys

    # add search path for own modules
    sys.path.append("/Users/houben/PhD/python/scripts/spectral_analysis")
    from shh_analytical import shh_analytical
    import numpy as np
    import matplotlib.pyplot as plt
    from matplotlib import cm
    from mpl_toolkits.mplot3d import Axes3D
    import scipy.fftpack as fftpack

    # create an input signal
    np.random.seed(123456789)
    input = np.random.rand(data_points)
    spectrum = fftpack.fft(input)
    # erase first half of spectrum
    spectrum = abs(spectrum[:round(len(spectrum) / 2)])**2
    spectrum = spectrum  # [1:]
    # X contains the different locations
    X = np.linspace(0, aquifer_length - 1, 10)
    X = [100, 900]
    # Y contains the frequencies, erase first data point because it's 0
    Y = abs(fftpack.fftfreq(len(input),
                            time_step_size))[:round(len(input) / 2)]
    Y = np.log10(Y[1:])
    Z = np.zeros((len(Y), len(X)))
    for i, loc in enumerate(X):
        Z[:, i] = np.log10(
            shh_analytical((Y, spectrum),
                           Sy=Sy,
                           T=T,
                           x=loc,
                           L=aquifer_length,
                           m=5,
                           n=5,
                           norm=False))
    # erase first data point from Z for each location
    print(Z)
    print(Y)
    import matplotlib.pyplot as plt

    plt.plot(Y, Z[:, 0])
    # plt.loglog(Y,Z[:,0])
    #    X, Y = np.meshgrid(X, Y)
    #    fig = plt.figure()
    #    ax = Axes3D(fig)
    # surf = ax.plot_surface(
    #    X, Y, Z, rstride=1, cstride=2, shade=False, linewidth=1, cmap="Spectral_r"
    # )
    #    surf = ax.plot_wireframe(X, Y, Z, rstride=0, cstride=1, cmap=cm.magma)
    # surf.set_edgecolors(surf.to_rgba(surf._A))
    # surf.set_facecolors("white")
    # ax1 = ax.plot_wireframe(X, Y, Z, rstride=1, cstride=0)

    #    ax.set_xlabel("Location [m]")
    #    ax.set_ylabel("Frequency [Hz]")
    #    ax.set_zlabel("log Spectral Density")

    # ax.set_zscale("log")
    # ax.yaxis.set_scale("log")
    # ax.zaxis._set_scale('log')
    # ax.set_yscale("log")
    plt.show()
def trialfunction(input_data):

    trials_dic = {}

    dbc = 0

    if Alc_train_extractor.shape == Con_train_extractor.shape:

        print('Same shape error:')
        print(X_train.shape)
        print(y_train.shape)
        raise SystemExit

    if (input_data.shape == Alc_train_extractor.shape) or (
            input_data.shape
            == Alc_train_classifier.shape) or (input_data.shape
                                               == Alc_test.shape):
        dbc = EEG_data

    if (input_data.shape == Con_train_extractor.shape) or (
            input_data.shape
            == Con_train_classifier.shape) or (input_data.shape
                                               == Con_test.shape):
        dbc = EEG_data_control

    for pos in input_data:

        Trial = dbc.loc[dbc['trial number'] == pos]

        columns = ['channel', 'time', 'sensor value']

        Trial = Trial.pivot_table(index='channel',
                                  columns='time',
                                  values='sensor value')

        trials_dic[pos] = Trial

    RGB_dic = {}

    for key in trials_dic:
        data = trials_dic.get(key)

        # Get real amplitudes of FFT (only in postive frequencies)

        fft_raw = fft(data)

        fft_vals = np.absolute(fft_raw)

        fft_vals = normalize(fft_vals, axis=1)

        # Get frequencies for amplitudes in Hz

        fs = 256  # Sampling rate

        fft_freq = fftfreq(fs, 1.0 / fs)

        # Define EEG bands
        eeg_bands = {
            'Theta': (4, 7),
            'Alpha': (8, 12),
            'Beta': (13, 30),
        }

        # Take the  sum of squared absolute values/amplitudes for each EEG band

        eeg_band_fft = defaultdict(list)

        for band in eeg_bands:

            freq_ix = np.where((fft_freq >= eeg_bands[band][0])
                               & (fft_freq <= eeg_bands[band][1]))[0]

            for channel in fft_vals:

                filterdch = channel[freq_ix]

                sqdvals = np.square(filterdch)

                sumvals = np.sum(sqdvals, axis=0)

                eeg_band_fft[band].append(sumvals)

        extracted_df = pd.DataFrame(eeg_band_fft)

        neeg = EEG_data.drop(columns=[
            'matching condition', 'name', 'trial number', 'subject identifier',
            'time', 'sample num', 'sensor value'
        ])

        neeg = neeg.drop_duplicates()

        #get names of source elctrodes:

        extracted_df = extracted_df.reset_index(drop=True)
        neeg = neeg.reset_index(drop=True)

        e_names = neeg
        e_names = e_names.rename(columns={'sensor position': 0})

        extracted_df = extracted_df.join(neeg)

        #get coordinates in 3d from robertoostenveld.nl/electrodes/plotting_1005.txt

        coords = pd.read_csv(
            '/kaggle/input/httpsrobertoostenveldnlelectrodes/plotting_1005.txt',
            sep='\t',
            header=None)

        coords = coords.drop(coords.columns[4], axis=1)

        #print(coords)
        testerd = pd.merge(e_names, coords, on=0, how='inner')

        testerd.set_index('channel', inplace=True)

        testerd.columns = ['pos', 'x', 'y', 'z']

        extracted_df = extracted_df.rename(columns={'sensor position': "pos"})

        #filter values and coordinates
        extracted_df = pd.merge(extracted_df, testerd, on="pos", how='inner')
        extracted_df = extracted_df.drop(['x', 'y', 'z'], axis=1)
        extracted_df.set_index('channel', inplace=True)

        extracted_df = extracted_df.drop(columns=['pos'])
        extracted_df.index.names = ['pos']

        #adapted from https://www.samuelbosch.com/2014/02/azimuthal-equidistant-projection.html

        class Point(object):
            def __init__(self, x, y, z):
                self.x = x
                self.y = y
                self.z = z

        class AzimuthalEquidistantProjection(object):
            """ 
                http://mathworld.wolfram.com/AzimuthalEquidistantProjection.html
                http://mathworld.wolfram.com/SphericalCoordinates.html
            """
            def __init__(self):

                self.t1 = pi / 2  ## polar latitude center of projection , https://en.wikipedia.org/wiki/Azimuthal_equidistant_projection
                self.l0 = 0  ## arbitrary longitude center of projection
                self.cost1 = cos(self.t1)
                self.sint1 = sin(self.t1)

            def project(self, point):

                #ADDAPTED FOR 3D CARTESIAN TO SPHERICAL

                hxy = np.hypot(point.x, point.y)

                t = np.arctan2(point.z, hxy)
                l = np.arctan2(point.y, point.x)

                ###

                costcosll0 = cos(t) * cos(l - self.l0)
                sint = sin(t)

                c = acos((self.sint1) * (sint) + (self.cost1) * costcosll0)
                k = c / sin(c)

                x = k * cos(t) * sin(l - self.l0)
                y = k * (self.cost1 * sint - self.sint1 * costcosll0)
                return x, y

        #Projection df

        projected_df = pd.DataFrame()

        for index, row in testerd.iterrows():

            x = row['x']
            y = row['y']
            z = row['z']

            p = AzimuthalEquidistantProjection()
            r = p.project(Point(x, y, z))

            r = pd.Series(r)

            projected_df = projected_df.append(r, ignore_index=True)

        projected_df = projected_df.rename(columns={0: 'X', 1: 'Y'})

        ###map coodinate with valuies

        new_df = projected_df.join(extracted_df)
        new_df = new_df.drop([31])  # drop row because i contains no values
        #print(new_df)

        Theta_df = new_df.drop(['Alpha', 'Beta', 'X', 'Y'], axis=1)
        Alpha_df = new_df.drop(['Theta', 'Beta', 'X', 'Y'], axis=1)
        Beta_df = new_df.drop(['Theta', 'Alpha', 'X', 'Y'], axis=1)

        #map onto mesh

        xpoints = np.array(new_df[['X']].squeeze())
        ypoints = np.array(new_df[['Y']].squeeze())

        Thetavalues = np.array(Theta_df).squeeze()
        Alphavalues = np.array(Alpha_df).squeeze()
        Betavalues = np.array(Beta_df).squeeze()

        xx, yy = np.mgrid[-1.5:1.5:32j, -1.5:1.5:32j]

        Thetavalues = minmax_scale(Thetavalues,
                                   feature_range=(0.0, 1.0),
                                   axis=0)
        Alphavalues = minmax_scale(Alphavalues,
                                   feature_range=(0.0, 1.0),
                                   axis=0)
        Betavalues = minmax_scale(Betavalues, feature_range=(0.0, 1.0), axis=0)

        Thetagrid = griddata((xpoints, ypoints),
                             Thetavalues, (xx, yy),
                             method='cubic',
                             fill_value=0.0)
        Alphagrid = griddata((xpoints, ypoints),
                             Alphavalues, (xx, yy),
                             method='cubic',
                             fill_value=0.0)
        Betagrid = griddata((xpoints, ypoints),
                            Betavalues, (xx, yy),
                            method='cubic',
                            fill_value=0.0)

        ##RGB construction

        RGB = np.empty((32, 32, 3))

        RGB[:, :, 0] = Thetagrid
        RGB[:, :, 1] = Alphagrid
        RGB[:, :, 2] = Betagrid

        RGB_dic[key] = RGB

    ##creating new dict with new keys

    lendict = len(RGB_dic)
    #print('lendict: ',lendict)

    lenlist = np.arange(0, lendict)

    #print(lenlist)

    final_dict = dict(zip(lenlist, list(RGB_dic.values())))

    return final_dict
Exemple #41
0
def fourier_transform(funcs, dts, inSI=False, SI_units='thz',
                    convention='math', zero_padding=False, npow=1,
                     **kwargs_norm_units):
    """
    The F.T. of an array with various sampling times for each of the funcs

    Input
    -----
    funcs : a list of np.array types representing the discrete time signal.
    I assume you have correctly sampled the funcs by not including the endpoint
    of the time signal
    Must be casted as list so enclose with []. Or as a tuple like (f1,) with the comma being important. CAST AS A TUPLE
    dts : the sampling time step. For now, every function has the same time step.
    inSI (optional) : wheteher to use SI units or normalized units for frequency (Default: False)
    SI_units (optional) : specify the SI unit for frequency. (Default: THz). Valid options are 'thz',
    **kwargs_norm_units (optional) : If inSI==True, then specify the appropriate normalized units for
    the toSI function. Please see docstring for toSI for further details.
    convention: 'physics' convention or 'math' convention. Physics convention uses
    ifft as Fourer transform and math convention uses fft
    Returns
    -------
    (frequencies, fft of signal) :  list containing the frequencies and
    spectrum with the zero frequency centered
    else (Default)
    Return nothing
    """

    if isinstance(dts, float):
        dts = np.tile(dts, len(funcs))  # cast as a list
    elif isinstance(dts, list):
        if len(dts) != len(funcs):
            print('The funcs and dts array are not the same size.')
            return None

    funcs_freq = []
    freqs_list = []
    for (f, dt) in zip(funcs, dts):
        Ns = np.size(f)
        # windowing
        # window_real = np.max(np.real(f))*signal.hann(Ns)
        # window_imag = np.max(np.imag(f))*signal.hann(Ns)
        # Testing window
        # plt.figure()
        # plt.plot(window)
        # plt.plot(np.abs(f))
        # plt.show()
        # plt.close()
        # f = window*f
        # f = window_real*np.real(f) + I*window_imag*np.imag(f)
        if zero_padding:
            NFFT = nextpow2(Ns, npow=npow)
            # pad asymmetrical on right side, assumes sampling in [0,T)
            # f = np.append(f, np.zeros(NFFT-Ns))
            # f = np.pad(f, (0, NFFT-Ns), 'constant', constant_values=(0, 0))

            # pad symmetrically on both sides since it's a gaussian. sampling [-0.5*T, 0.5*T)
            Nleft = int(np.ceil(0.5*(NFFT - Ns)))
            Nright = int((NFFT - Ns) - Nleft)
            f = np.pad(f, (Nleft, Nright), 'constant', constant_values=(0, 0))
            Ns = np.size(f)
        # ipdb.set_trace()
        if convention == 'math':
            f_freq = fftshift((fft(f)))
        elif convention == 'physics':
            # f_freq = fftshift((ifft(f, n=2**15)))  # another exaple of asymmetric padding on the right side
            # Ns = np.size(f_freq)
            f_freq = fftshift((ifft(f)))
        freqs = fftshift(fftfreq(Ns, dt))

        funcs_freq.append(f_freq)
        freqs_list.append(freqs)
        if inSI == True:
            freqs = toSI(freqs, 'freq', SI_units, **kwargs_norm_units)
            if freqs == None:
               print('toSI function call failed. Aborting')
               return None
    return (freqs_list, funcs_freq)
Exemple #42
0
# Reading the generated data signal in CSV format
df = pd.read_csv('test_sig_3.csv', header=0)

# Setting Number of Samples and Period
N = len(df)
T = (df.iloc[-1][0] - df.iloc[0][0]) / (N - 1)
print("Numbers of Samples (N):", N, " | Sampling Interval (T):", T)

# Obtaining the Values as a Numpy Array from the Dataframe
x = df['N'].values
y = df['F(N)'].values
print("Current DC Offset:", y.mean())

# Computing Fast Fourier Transform of Signal N, F(N)
xf = fftpack.fftfreq(N) / T
yf = fftpack.fft(y) * (2.0 / N)

# Setting the plotting parameters
xf_plot, yf_magplot, yf_phaseplot = np.linspace(0.0, 1.0 / (2 * T), num=N //
                                                2), np.abs(yf[:N // 2]),
np.angle(yf[:N // 2])

# Reconstructing Sinusoidal Signal
y_recon = fftpack.ifft(yf) / (2.0 / N)

# Run the given data through the Savitzky-Golay Filter
filtered = savgol_filter(y, 101, 1)

# Create an array to place the different anomalies outside the bands
anom = []
Exemple #43
0
    print('\nloading:\n')
    print(filetoopen)
    data = stlab.readdata.readdat(filetoopen)
    #data = [stlab.readdata.readdat(path) for path in pathlist]

    Q = data[0]['Q ()'][0]
    freq = []
    volt_fft = []
    current = []
    for line in data:
        time = line['Time (wp*t)']
        voltage = line['AC Voltage (V)']
        current.append(line['Current (Ic)'][0])
        #testplot(time,voltage)

        F = fftfreq(len(time), d=time[1] - time[0])
        F = F[:len(F) // 2]

        signal_fft = fft(voltage)
        signal_fft = signal_fft[:len(signal_fft) // 2]
        #testplot(F,abs(signal_fft))

        freq.append(F)
        volt_fft.append(abs(signal_fft))
    freq = np.asarray(freq[0])
    volt_fft = np.asarray(volt_fft)

    peakfreqs = [
        peakidx(np.log(volt_fft[i] + 1e-11), thres=0.1)
        for i in range(len(data))
    ]
Exemple #44
0
import numpy as np
from scipy.fftpack import fftfreq
from numpy.fft import fft
from matplotlib import pyplot as plt
from scipy.io import loadmat
from os.path import dirname
from likefunctions_boston import plotlikeconfig

plotlikeconfig(title='FFT', xlabel='Frequency(Hz)', ylabel='Strength')
samples = 1200
x = np.linspace(0, 1, samples)
data_bearing = np.zeros(samples * 3)
data_bearing[:samples] = 2 * np.sin(2 * np.pi * 200 * x) + 2 * np.sin(
    2 * np.pi * 100 * x)
fft_data_bearing = fft(data_bearing)
# real_fft_data_bearing=rfft(data_bearing)
freq_fft_data_bearing = fftfreq(fft_data_bearing.size, 1 / samples)
freq_limit = int(fft_data_bearing.size / 2)
signal_limit = freq_limit
plt.stem(freq_fft_data_bearing[:freq_limit],
         abs(fft_data_bearing[:freq_limit]),
         linefmt='',
         use_line_collection=True)
# plt.stem(freq_fft_data_bearing[:freq_limit],abs(real_fft_data_bearing)[:freq_limit],use_line_collection=True)

plt.show()
def iradon(radon_image,
           theta=None,
           output_size=None,
           filter="ramp",
           interpolation="linear",
           circle=True):
    """
    Inverse radon transform.

    Reconstruct an image from the radon transform, using the filtered
    back projection algorithm.

    Parameters
    ----------
    radon_image : array_like, dtype=float
        Image containing radon transform (sinogram). Each column of
        the image corresponds to a projection along a different angle. The
        tomography rotation axis should lie at the pixel index
        ``radon_image.shape[0] // 2`` along the 0th dimension of
        ``radon_image``.
    theta : array_like, dtype=float, optional
        Reconstruction angles (in degrees). Default: m angles evenly spaced
        between 0 and 180 (if the shape of `radon_image` is (N, M)).
    output_size : int, optional
        Number of rows and columns in the reconstruction.
    filter : str, optional
        Filter used in frequency domain filtering. Ramp filter used by default.
        Filters available: ramp, shepp-logan, cosine, hamming, hann.
        Assign None to use no filter.
    interpolation : str, optional
        Interpolation method used in reconstruction. Methods available:
        'linear', 'nearest', and 'cubic' ('cubic' is slow).
    circle : boolean, optional
        Assume the reconstructed image is zero outside the inscribed circle.
        Also changes the default output_size to match the behaviour of
        ``radon`` called with ``circle=True``.

    Returns
    -------
    reconstructed : ndarray
        Reconstructed image. The rotation axis will be located in the pixel
        with indices
        ``(reconstructed.shape[0] // 2, reconstructed.shape[1] // 2)``.

    References
    ----------
    .. [1] AC Kak, M Slaney, "Principles of Computerized Tomographic
           Imaging", IEEE Press 1988.
    .. [2] B.R. Ramesh, N. Srinivasa, K. Rajgopal, "An Algorithm for Computing
           the Discrete Radon Transform With Some Applications", Proceedings of
           the Fourth IEEE Region 10 International Conference, TENCON '89, 1989

    Notes
    -----
    It applies the Fourier slice theorem to reconstruct an image by
    multiplying the frequency domain of the filter with the FFT of the
    projection data. This algorithm is called filtered back projection.

    """
    if radon_image.ndim != 2:
        raise ValueError('The input image must be 2-D')
    if theta is None:
        m, n = radon_image.shape
        theta = np.linspace(0, 180, n, endpoint=False)
    else:
        theta = np.asarray(theta)
    if len(theta) != radon_image.shape[1]:
        raise ValueError("The given ``theta`` does not match the number of "
                         "projections in ``radon_image``.")
    interpolation_types = ('linear', 'nearest', 'cubic')
    if interpolation not in interpolation_types:
        raise ValueError("Unknown interpolation: %s" % interpolation)
    if not output_size:
        # If output size not specified, estimate from input radon image
        if circle:
            output_size = radon_image.shape[0]
        else:
            output_size = int(
                np.floor(np.sqrt((radon_image.shape[0])**2 / 2.0)))
    if circle:
        radon_image = _sinogram_circle_to_square(radon_image)

    th = (np.pi / 180.0) * theta
    # resize image to next power of two (but no less than 64) for
    # Fourier analysis; speeds up Fourier and lessens artifacts
    projection_size_padded = \
        max(64, int(2 ** np.ceil(np.log2(2 * radon_image.shape[0]))))
    pad_width = ((0, projection_size_padded - radon_image.shape[0]), (0, 0))
    img = np.pad(radon_image, pad_width, mode='constant', constant_values=0)

    # Construct the Fourier filter
    # This computation lessens artifacts and removes a small bias as
    # explained in [1], Chap 3. Equation 61
    n1 = np.arange(0, projection_size_padded / 2 + 1, dtype=np.int)
    n2 = np.arange(projection_size_padded / 2 - 1, 0, -1, dtype=np.int)
    n = np.concatenate((n1, n2))
    f = np.zeros(projection_size_padded)
    f[0] = 0.25
    f[1::2] = -1 / (np.pi * n[1::2])**2

    # Computing the ramp filter from the fourier transform of is frequency domain representation
    # lessens artifacts and removes a small bias as explained in [1], Chap 3. Equation 61
    fourier_filter = 2 * np.real(fft(f))  # ramp filter
    omega = 2 * np.pi * fftfreq(projection_size_padded)
    if filter == "ramp":
        pass
    elif filter == "shepp-logan":
        # Start from first element to avoid divide by zero
        fourier_filter[1:] *= np.sin(omega[1:] / 2) / (omega[1:] / 2)
    elif filter == "cosine":
        freq = (0.5 * np.arange(0, projection_size_padded) /
                projection_size_padded)
        cosine_filter = np.fft.fftshift(np.sin(2 * np.pi * np.abs(freq)))
        fourier_filter *= cosine_filter
    elif filter == "hamming":
        hamming_filter = np.fft.fftshift(np.hamming(projection_size_padded))
        fourier_filter *= hamming_filter
    elif filter == "hann":
        hanning_filter = np.fft.fftshift(np.hanning(projection_size_padded))
        fourier_filter *= hanning_filter
    elif filter is None:
        fourier_filter[:] = 1
    else:
        raise ValueError("Unknown filter: %s" % filter)
    # Apply filter in Fourier domain
    projection = fft(img, axis=0) * fourier_filter[:, np.newaxis]
    radon_filtered = np.real(ifft(projection, axis=0))

    # Resize filtered image back to original size
    radon_filtered = radon_filtered[:radon_image.shape[0], :]
    reconstructed = np.zeros((output_size, output_size))
    # Determine the center of the projections (= center of sinogram)
    mid_index = radon_image.shape[0] // 2

    [X, Y] = np.mgrid[0:output_size, 0:output_size]
    xpr = X - int(output_size) // 2
    ypr = Y - int(output_size) // 2

    # Reconstruct image by interpolation
    for i in range(len(theta)):
        t = ypr * np.cos(th[i]) - xpr * np.sin(th[i])
        x = np.arange(radon_filtered.shape[0]) - mid_index
        if interpolation == 'linear':
            backprojected = np.interp(t,
                                      x,
                                      radon_filtered[:, i],
                                      left=0,
                                      right=0)
        else:
            interpolant = interp1d(x,
                                   radon_filtered[:, i],
                                   kind=interpolation,
                                   bounds_error=False,
                                   fill_value=0)
            backprojected = interpolant(t)
        reconstructed += backprojected
    if circle:
        radius = output_size // 2
        reconstruction_circle = (xpr**2 + ypr**2) <= radius**2
        reconstructed[~reconstruction_circle] = 0.

    return reconstructed * np.pi / (2 * len(th))
Exemple #46
0
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D

# 時系列のサンプルデータ作成
N = 512  # データ数
dt = 0.01  # サンプリング間隔
f = 20  # 周波数
t = np.linspace(1, N, N) * dt - dt
y = np.sin(2 * np.pi * f * t)

# 離散フーリエ変換
yf = fft(y)

# 周波数スケール作成
# t/dt/dt/N で作成しても良い
freq = fftfreq(N, dt)

# プロット
# 大きさ、位相
plt.figure(1)
plt.subplot(211)
plt.plot(freq[1:N / 2], np.abs(yf)[1:N / 2])
plt.axis('tight')
plt.ylabel("amplitude")
plt.subplot(212)
plt.plot(freq[1:N / 2], np.degrees(np.angle(yf)[1:N / 2]))
plt.axis('tight')
plt.ylim(-180, 180)
plt.xlabel("frequency[Hz]")
plt.ylabel("phase[deg]")
Exemple #47
0
def Traitement_Difference(nom_exp1,
                          nom_exp2,
                          nom_resultat,
                          valeur_resistance,
                          t1,
                          t2,
                          duree,
                          amplification1=1.,
                          amplification2=1.,
                          diviseur=1.,
                          offset=0.,
                          freq_coupure=0.):
    import numpy as np
    from Graphique import Graphique_Simple
    from Graphique import Graphique_Double_Echelle
    from Integration import Integrale_Trapeze
    from Filtre import Passe_bas
    from scipy import fftpack

    data1 = np.loadtxt(nom_exp1 + '.dat', delimiter=';')
    data2 = np.loadtxt(nom_exp2 + '.dat', delimiter=';')

    Temps1 = np.asarray(data1[:, 0], dtype=float) / 1000000.
    Temps1 = Temps1 - Temps1[0]
    Tension1 = (np.asarray(data1[:, 1], dtype=float) -
                offset) * diviseur / amplification1

    Temps2 = np.asarray(data2[:, 0], dtype=float) / 1000000.
    Temps2 = Temps2 - Temps2[0]
    Tension2 = (np.asarray(data2[:, 1], dtype=float) -
                offset) * diviseur / amplification1

    i1 = 0
    for i in range(0, Temps1.size):
        if abs(Temps1[i1] - t1) > abs(Temps1[i] - t1):
            i1 = i
    di = 0
    for i in range(i1, Temps1.size):
        if abs(Temps1[di + i1] - (t1 + duree)) > abs(Temps1[i] - (t1 + duree)):
            di = i - i1

    i2 = 0
    for i in range(0, Temps2.size):
        if abs(Temps2[i2] - t2) > abs(Temps2[i] - t2):
            i2 = i

    Temps = np.zeros(di)
    T1 = np.zeros(di)
    T2 = np.zeros(di)

    for i in range(i1, i1 + di):
        Temps[i - i1] = Temps1[i] - Temps1[i1]
        T1[i - i1] = Tension1[i]

    for i in range(i2, i2 + di):
        T2[i - i2] = Tension2[i]

    Tension = T1 - T2
    freq_echantillonage = Temps[Temps.size - 1] / Temps.size
    freqs = fftpack.fftfreq(Temps.size, d=freq_echantillonage)
    TFT_M = abs(fftpack.fft(Tension - np.mean(Tension)))
    TFT = abs(fftpack.fft(Tension))

    Graphique_Simple(Temps,
                     Tension,
                     x_label='Temps [s]',
                     y_label='Tension [V]',
                     save_name='Graphique_' + nom_resultat + '_T',
                     graph_name='Difference de la tension au cours du temps')
    Graphique_Simple(
        freqs,
        TFT,
        x_label='Temps [s]',
        y_label='Tension [V]',
        save_name='Graphique_' + nom_resultat + '_TFT',
        graph_name='TF Difference de la tension au cours du temps')
    Graphique_Simple(
        freqs,
        TFT_M,
        x_label='Temps [s]',
        y_label='Tension [V]',
        save_name='Graphique_' + nom_resultat + '_TTF-M',
        graph_name=
        'TF sans la composante continu Difference de la tension au cours du temps'
    )
Exemple #48
0
def clean(signals,
          confounds=None,
          low_pass=0.2,
          t_r=2.5,
          high_pass=False,
          detrend=False,
          standardize=True,
          shift_confounds=False):
    """ Normalize the signal, and if any confounds are given, project in
        the orthogonal space.

        Low pass filter improves specificity (more interesting arrows
        selected)

        High pass filter should be kepts small, so as not to kill
        sensitivity
    """
    if standardize:
        signals = _standardize(signals, normalize=True)
    elif detrend:
        signals = _standardize(signals, normalize=False)
    signals = np.asarray(signals)

    if confounds is not None:
        if isinstance(confounds, basestring):
            filename = confounds
            confounds = np.genfromtxt(filename)
            if np.isnan(confounds.flat[0]):
                # There may be a header
                if np.version.short_version >= '1.4.0':
                    confounds = np.genfromtxt(filename, skip_header=1)
                else:
                    confounds = np.genfromtxt(filename, skiprows=1)
        # Restrict the signal to the orthogonal of the confounds
        confounds = np.atleast_2d(confounds)
        if shift_confounds:
            confounds = np.r_[confounds[..., 1:-1], confounds[..., 2:],
                              confounds[..., :-2]]
            signals = signals[..., 1:-1]
        confounds = _standardize(confounds, normalize=True)
        confounds = qr_economic(confounds)[0].T
        signals -= np.dot(np.dot(signals, confounds.T), confounds)

    if low_pass and high_pass and high_pass >= low_pass:
        raise ValueError("Your value for high pass filter (%f) is higher or"
                         " equal to the value for low pass filter (%f). This"
                         " would result in a blank signal" %
                         (high_pass, low_pass))

    if low_pass or high_pass:
        n = signals.shape[-1]
        freq = fftpack.fftfreq(n, d=t_r)
        for s in signals:
            fft = fftpack.fft(s)
            if low_pass:
                fft[np.abs(freq) > low_pass] = 0
            if high_pass:
                fft[np.abs(freq) < high_pass] = 0
            s[:] = fftpack.ifft(fft)

    if detrend:
        # This is faster than scipy.detrend and equivalent
        regressor = np.arange(signals.shape[1]).astype(np.float)
        regressor -= regressor.mean()
        regressor /= np.sqrt((regressor**2).sum())

        signals -= np.dot(signals, regressor)[:, np.newaxis] * regressor

    if standardize:
        signals = _standardize(signals, normalize=True)
    elif detrend:
        signals = _standardize(signals, normalize=False)
    return signals
Exemple #49
0
import numpy as np
import matplotlib.pyplot as plt
import math
import scipy.fftpack as syfp
x = np.linspace(0, 5, 1000)
hanning = np.hanning(len(x))
y = np.sin(2 * math.pi * x)
plt.plot(x, y)
plt.show()
fft = np.fft.fft(x)
fft_freq = np.fft.fftfreq(len(x), x[1] - x[0])
freqs = syfp.fftfreq(len(x), x[1] - x[0])
plt.semilogy(freqs, abs(fft))
plt.axvline(1, color="red", linestyle="--")
plt.show()

import scipy as sy
import scipy.fftpack as syfp

dt = 0.02071
t = np.linspace(0, 10, 1000)  ## time at the same spacing of your data
freq = 10
u = np.sin(2 * np.pi * t * freq)  ## Note freq=0.01 Hz

# Do FFT analysis of array
FFT = sy.fft.fft(u)

# Getting the related frequencies
freqs = syfp.fftfreq(len(u), t[1] -
                     t[0])  ## added dt, so x-axis is in meaningful units
Exemple #50
0
import numpy as np
import matplotlib.pyplot as plt
from scipy.fftpack import fft, fftfreq

datos = np.genfromtxt("funcion.dat")

t = datos[:, 0]
f = datos[:, 1]


def fourier(function):
    N = len(function)
    fourier = np.zeros(N)
    for i in range(N):
        coeficientes = 0.0
        for j in range(N):
            coeficientes += function[j] * np.exp((-2 * np.pi * 1j * i * j) / N)
        fourier[i] = abs(coeficientes)
    return fourier


n = len(f)
x = fourier(f) / n
dt = t[2] - t[1]
freq = fftfreq(n, dt)
fmax = np.argmax(x)
frecuencia = int(freq[fmax])

print('La frecuencia es:', frecuencia, 'Hz')
Exemple #51
0
    def iterate(self, iloop, save_wisdom=1):
        dat = self.dat
        ran = self.ran
        smooth = self.smooth
        binsize = self.binsize
        beta = self.beta
        bias = self.bias
        f = self.f
        nbins = self.nbins

        #-- Creating arrays for FFTW
        if iloop == 0:
            delta = pyfftw.empty_aligned((nbins, nbins, nbins),
                                         dtype='complex128')
            deltak = pyfftw.empty_aligned((nbins, nbins, nbins),
                                          dtype='complex128')
            psi_x = pyfftw.empty_aligned((nbins, nbins, nbins),
                                         dtype='complex128')
            psi_y = pyfftw.empty_aligned((nbins, nbins, nbins),
                                         dtype='complex128')
            psi_z = pyfftw.empty_aligned((nbins, nbins, nbins),
                                         dtype='complex128')
            #delta = np.zeros((nbins, nbins, nbins), dtype='complex128')
            #deltak= np.zeros((nbins, nbins, nbins), dtype='complex128')
            #psi_x = np.zeros((nbins, nbins, nbins), dtype='complex128')
            #psi_y = np.zeros((nbins, nbins, nbins), dtype='complex128')
            #psi_z = np.zeros((nbins, nbins, nbins), dtype='complex128')

            print 'Allocating randoms in cells...'
            deltar = self.allocate_gal_cic(ran)
            print 'Smoothing...'
            deltar = gaussian_filter(deltar, smooth / binsize)

            #-- Initialize FFT objects and load wisdom if available
            wisdomFile = "wisdom." + str(nbins) + "." + str(self.nthreads)
            if os.path.isfile(wisdomFile):
                print 'Reading wisdom from ', wisdomFile
                g = open(wisdomFile, 'r')
                wisd = json.load(g)
                pyfftw.import_wisdom(wisd)
                g.close()
            print 'Creating FFTW objects...'
            fft_obj  = pyfftw.FFTW(delta, delta, axes=[0, 1, 2], \
                                   threads=self.nthreads)
            ifft_obj = pyfftw.FFTW(deltak, psi_x, axes=[0, 1, 2], \
                                   threads=self.nthreads, \
                                   direction='FFTW_BACKWARD')
        else:
            delta = self.delta
            deltak = self.deltak
            deltar = self.deltar
            psi_x = self.psi_x
            psi_y = self.psi_y
            psi_z = self.psi_z
            fft_obj = self.fft_obj
            ifft_obj = self.ifft_obj

        #fft_obj = pyfftw.FFTW(delta, delta, threads=self.nthreads, axes=[0, 1, 2])
        #-- Allocate galaxies and randoms to grid with CIC method
        #-- using new positions
        print 'Allocating galaxies in cells...'
        deltag = self.allocate_gal_cic(dat)
        print 'Smoothing...'
        deltag = gaussian_filter(deltag, smooth / binsize)

        print 'Computing fluctuations...'
        delta[:] = deltag - self.alpha * deltar
        w = np.where(deltar > self.ran_min)
        delta[w] = delta[w] / (self.alpha * deltar[w])
        w2 = np.where((deltar <= self.ran_min))
        delta[w2] = 0.
        #-- removing this to not change statistics
        #w3=np.where(delta>np.percentile(delta[w].ravel(), 99))
        #delta[w3] = 0.
        del (w)
        del (w2)
        #del(w3)
        del (deltag)

        print 'Fourier transforming delta field...'
        norm_fft = 1.  #binsize**3
        fft_obj(input_array=delta, output_array=delta)
        #delta = pyfftw.builders.fftn(\
        #                delta, axes=[0, 1, 2], \
        #                threads=self.nthreads, overwrite_input=True)()

        #-- delta/k**2
        k = fftfreq(self.nbins, d=binsize) * 2 * np.pi
        #-- adding 1e-100 in order to avoid division by zero
        delta /= (k[:, None, None]**2 + k[None, :, None]**2 +
                  k[None, None, :]**2 + 1e-100)
        delta[0, 0, 0] = 0.

        #-- Estimating the IFFT in Eq. 12 of Burden et al. 2015
        print 'Inverse Fourier transforming to get psi...'
        norm_ifft = 1.  #(k[1]-k[0])**3/(2*np.pi)**3*nbins**3

        deltak[:] = delta * -1j * k[:, None, None] / bias
        ifft_obj(input_array=deltak, output_array=psi_x)
        deltak[:] = delta * -1j * k[None, :, None] / bias
        ifft_obj(input_array=deltak, output_array=psi_y)
        deltak[:] = delta * -1j * k[None, None, :] / bias
        ifft_obj(input_array=deltak, output_array=psi_z)

        #psi_x = pyfftw.builders.ifftn(\
        #                delta*-1j*k[:, None, None]/bias, \
        #                axes=[0, 1, 2], \
        #                threads=self.nthreads, overwrite_input=True)().real
        #psi_y = pyfftw.builders.ifftn(\
        #                delta*-1j*k[None, :, None]/bias, \
        #                axes=[0, 1, 2], \
        #                threads=self.nthreads, overwrite_input=True)().real
        #psi_z = pyfftw.builders.ifftn(\
        #                delta*-1j*k[None, None, :]/bias, \
        #                axes=[0, 1, 2], \
        #                threads=self.nthreads, overwrite_input=True)().real
        #psi_x = ifftn(-1j*delta*k[:, None, None]/bias).real*norm_ifft
        #psi_y = ifftn(-1j*delta*k[None, :, None]/bias).real*norm_ifft
        #psi_z = ifftn(-1j*delta*k[None, None, :]/bias).real*norm_ifft

        #-- removing RSD from galaxies
        shift_x, shift_y, shift_z =  \
                self.get_shift(dat, psi_x.real, psi_y.real, psi_z.real, \
                               use_newpos=True)
        print "Few displacement values: "
        for i in range(10):
            print shift_x[i], shift_y[i], shift_z[i], dat.x[i]

        #-- for first loop need to approximately remove RSD component
        #-- from Psi to speed up calculation
        #-- first loop so want this on original positions (cp),
        #-- not final ones (np) - doesn't actualy matter
        if iloop == 0:
            psi_dot_rhat = (shift_x*dat.x + \
                            shift_y*dat.y + \
                            shift_z*dat.z ) /dat.dist
            shift_x -= beta / (1 + beta) * psi_dot_rhat * dat.x / dat.dist
            shift_y -= beta / (1 + beta) * psi_dot_rhat * dat.y / dat.dist
            shift_z -= beta / (1 + beta) * psi_dot_rhat * dat.z / dat.dist
        #-- remove RSD from original positions (cp) of
        #-- galaxies to give new positions (np)
        #-- these positions are then used in next determination of Psi,
        #-- assumed to not have RSD.
        #-- the iterative procued then uses the new positions as
        #-- if they'd been read in from the start
        psi_dot_rhat = (shift_x * dat.x + shift_y * dat.y +
                        shift_z * dat.z) / dat.dist
        dat.newx = dat.x + f * psi_dot_rhat * dat.x / dat.dist
        dat.newy = dat.y + f * psi_dot_rhat * dat.y / dat.dist
        dat.newz = dat.z + f * psi_dot_rhat * dat.z / dat.dist

        self.deltar = deltar
        self.delta = delta
        self.deltak = deltak
        self.psi_x = psi_x
        self.psi_y = psi_y
        self.psi_z = psi_z
        self.fft_obj = fft_obj
        self.ifft_obj = ifft_obj

        #-- save wisdom
        wisdomFile = "wisdom." + str(nbins) + "." + str(self.nthreads)
        if iloop == 0 and save_wisdom and not os.path.isfile(wisdomFile):
            wisd = pyfftw.export_wisdom()
            f = open(wisdomFile, 'w')
            json.dump(wisd, f)
            f.close()
            print 'Wisdom saved at', wisdomFile
Exemple #52
0
    else:
        return_value = new_data, new_coord
    return return_value


def padded(to_pad, max_len):
    length = len(to_pad)
    zeros = max_len - length
    to_pad = list(to_pad)
    for i in range(zeros):
        to_pad.append(0)
    return to_pad


# In[3]:
zonal_spacing = fftfreq(240, 1.5)
zonal_spacing = 1 / zonal_spacing
zonal_spacing = 360 / zonal_spacing

# In[4]:

# get file neams for detrending
from os import walk

f = []
for (dirpath, dirnames, filenames) in walk('gphfiles/'):
    f.extend(filenames)
    break

for wantfile in range(len(f)):
Exemple #53
0
nco_sine = np.sin(2 * np.pi * nco_freq * t)

i_post_mix = adc_out * nco_cosine
q_post_mix = adc_out * nco_sine

# using cusomized fft module
x, y = fftplot.winfft(i_post_mix, fs=fs)
plt.figure()
fftplot.plot_spectrum(x, y)
plt.title(f'Output Spectrum of i_post_mix - {ftone / 1e6} MHz Tone')

# using cusomized fft module
x, y = fftplot.winfft(q_post_mix, fs=fs)
plt.figure()
fftplot.plot_spectrum(x, y)
plt.title(f'Output Spectrum of q_post_mix - {ftone / 1e6} MHz Tone')

plt.figure()
yf = fft.fft(adc_out)
xf = fft.fftfreq(nsamps, 1 / fs)
xf = fft.fftshift(xf)
yf = fft.fftshift(yf)
plt.plot(xf / 1e3, np.abs(yf))

plt.figure()
plt.stem(xf / 1e3,
         angle2(np.round(yf, 1)) * 180 / pi,
         use_line_collection=True)

plt.show()
Exemple #54
0
    def spec_plot(title=0,
                  data=d2_averages[0],
                  levels="no",
                  save=False,
                  name=None):

        #select a season for plotting
        test_dat = data
        frequencies = fftfreq(len(test_dat[1]), 0.25)

        #set what you wanna crop
        max_z = 40
        max_f = 1

        #crop the data, only keep the positive frequencies
        cropped = [[
            test_dat[i][j] for i in range(len(zonal_spacing))
            if zonal_spacing[i] <= max_z and zonal_spacing[i] >= 0
        ] for j in range(len(frequencies)) if np.abs(frequencies[j]) <= max_f]

        cropf = [
            counted for counted in frequencies if np.abs(counted) <= max_f
        ]  # and counted != 0]
        cropz = [
            zonal_spacing[i] for i in range(len(zonal_spacing))
            if zonal_spacing[i] <= max_z and zonal_spacing[i] >= 0
        ]

        x = cropf
        y = cropz
        X, Y = np.meshgrid(x, y)
        X = np.flip(X, 1)
        Z = np.transpose(np.abs(cropped))

        # add cyclic point for plotting purposes
        x = np.array(x)
        testZ = [fftshift(entry) for entry in Z]
        testZ = np.array(testZ)

        dataout, lonsout = add_cyclic_point(testZ, fftshift(x))
        x = lonsout
        y = y
        X, Y = np.meshgrid(x, y)
        X = np.flip(X, 1)

        # set colors and levels for discrete values
        # colors_set = cubehelix_palette(10)
        colors_set = sns.cubehelix_palette(10,
                                           start=2,
                                           rot=0,
                                           dark=0,
                                           light=.95)

        if levels == "no":
            # set colors and levels for discrete values
            level_set_less = [
                np.percentile(dataout, j * 10) for j in range(1, 11)
            ]
            for j in range(1, 5):
                level_set_less.append(np.percentile(dataout, 90 + 2 * j))
                #level_set_less = flatten(level_set_less)
            level_set_less.sort()
            levels_rec.append(level_set_less)

        else:
            level_set_less = levels

        colors_set = sns.palplot(sns.color_palette("hls", len(level_set_less)))
        colors_set = sns.cubehelix_palette(14,
                                           start=2,
                                           rot=0,
                                           dark=0,
                                           light=.95)
        colors_set = sns.color_palette("cubehelix", 14)

        # plot it
        plt.clf()
        plt.figure(figsize=(15, 5), constrained_layout=True)
        # actual plot

        CF = plt.contourf(
            X,
            Y,
            dataout,
            colors=colors_set,
            levels=level_set_less,
        )

        # set colorbars

        CBI = plt.colorbar(CF)
        ax = CBI.ax
        ax.text(-2.5, 0.8, 'Coefficient magnitude', rotation=90)

        # ax.yaxis.get_offset_text().set_position((-3, 5))

        labels = ["{:.1E}".format(Decimal(entry)) for entry in level_set_less]

        CBI.set_ticklabels(labels)
        # plot labels
        plt.xlabel(r"Frequency (day$^{-1}$)")
        plt.ylim(ymax=25, ymin=3)
        plt.xlim(xmax=0.75, xmin=-0.75)
        plt.ylabel("Zonal wavenumber")
        plt.title(str(title) + " climatology of geopotential height spectra",
                  pad=15)

        # formatting
        sns.set_style("ticks")
        sns.set_context("poster")
        sns.despine()

        if save == True:

            plt.savefig(name, bbox_inches="tight")
Exemple #55
0
# PROCEDIMIENTO
dt=(tn-t0)/n    # intervalo de muestreo
# Analógica Referencia para mostrar que es par
fs=20
t=np.arange(-tn,tn,dt)  # eje tiempo analógica
m=len(t)
xanalog=np.zeros(m, dtype=float)
for i in range(0,m):
    xanalog[i]=entradax(t[i],fs)

# FFT: Transformada Rapida de Fourier
# Analiza la parte t>=0 de xnalog muestras[n:2*n]
xf=fourier.fft(xanalog[n:2*n])
xf=fourier.fftshift(xf)
# Rango de frecuencia para eje
frq=fourier.fftfreq(n, dt)
frq=fourier.fftshift(frq)

# x[w] real
xfreal=(1/n)*np.real(xf)
# x[w] imaginario
xfimag=(1/n)*np.imag(xf)
# x[w] magnitud
xfabs=(1/n)*np.abs(xf)
# x[w] angulo
xfangle=(1/n)*np.unwrap(np.angle(xf))

#SALIDA
plt.figure(1)       # define la grafica
plt.suptitle('Transformada Rápida Fourier FFT')
x0 = int(x_length / 2)

# Constants
hbar = 1
m = 1
omega = 0.01
sigma = np.sqrt(hbar / (2 * m * omega))

# Defining Wavefunction
psi = wave_init(x, sigma, x0-50)
V_x = harmonic_potential(x, m, omega, x0)
#V_x = np.zeros(N)

# Defining k
dk = dx / (2 * np.pi)
k = fftfreq(N, dk)
ks = fftshift(k)

# Defining time steps
t = 0
dt = 5
step = 1

sch = Schrodinger(x, psi, V_x, k, hbar, m, t)

print(sch.norm_x())

x_limits = ((x[0],x[N-1]), (0, 0.16))
k_limits = ((-5, 5), (0, max(abs(sch.psi_k)+0.5)))

Exemple #57
0
 def k_z(self):
     f_z = fftpack.fftshift(fftpack.fftfreq(self.Nz, d=self.d_z))
     k_z = 2 * np.pi * f_z
     return k_z
Exemple #58
0
    #   ---------------
    #   Grid properties
    #   ---------------
    c_cm_ps = c * 1e2 / 1e12
    c_nm_ps = c * 1e9 / 1e12
    num_bits = 12
    num_grid_points = 2**num_bits  # use powers of 2 for efficient FFT's
    time_window_width = 50  # [ps]
    tms = sp.linspace(-time_window_width / 2, time_window_width / 2,
                      num_grid_points)

    wavelength_signal = 1040.0  # [nm]
    wavelength_pump = 976.0  # [nm]
    num_points = len(tms)  # number of grid points
    dt = tms[1] - tms[0]  # time spacing [ps]
    freqs = fftshift(fftfreq(num_points, dt))  # [THz]
    omegas_centered_on_zero = (2 *
                               sp.pi) * freqs  # angular frequency about zero
    omega_centre_signal = (
        2 * sp.pi *
        c_nm_ps) / wavelength_signal  # angular reference frequency [TCycles]
    omega_centre_pump = (
        2 * sp.pi *
        c_nm_ps) / wavelength_pump  # angular reference frequency [TCycles]

    omegas = omegas_centered_on_zero + omega_centre_signal
    omegas_pump = omegas_centered_on_zero + omega_centre_pump
    wls = 2 * sp.pi * c_nm_ps / omegas  # wavelength grid [nm] - need a wls grid for the signal centered at 1040 nm
    wls_pump = 2 * sp.pi * c_nm_ps / omegas_pump  # wavelength grid [nm]- need a wls grid for the pump centered at 976 nm

    #   -----------
Exemple #59
0
    # Man kan gemme reference til de enkelte lyde (lyd objekter) og starte/stoppe dem individuelt... Eksperimenter med det i senere udgave
    # Man hører kun de første 8 lyde i sweep når man sætter konstanten til True ;-) Så det er ikke så fedt.... Så bør man justere de andre
    # konstanter til sådan at man kun kommer rundt i loop 8 gange...
    #
    sound = pygame.sndarray.make_sound(signal)
    sound.play(-1)
    #
    # Hvis konstanten PLOT er True så fylder vi figuren med plot af den lyd som spiller
    #
    if PLOT:
        left_curve, = left_plot.plot(time, signal.T[0])
        right_curve, = right_plot.plot(time, signal.T[1])
        combined = signal.T[0] + signal.T[1]
        combined_curve, = combined_plot.plot(time, combined)
        FFT = abs(fft(combined))
        freqs = fftfreq(combined.size, 1 / SAMPLERATE)
        fft_curve, = fft_plot.plot(freqs, np.log10(FFT))

    #
    # Aht at få tegnet plot færdigt inden en pause bruges plt.pause hvis vi laver plot ellers pygame delay
    #
    if PLOT: plt.pause(DURATION - 0.1)
    else: pygame.time.delay(500)
    #
    # Med konstanterne kan man styre om der kun tegnes een kurve for den lyd der spiller eller om de tegnes oveni hinanden.
    # Nedenfor slettes gamle kurver hvis ONLY_ONE_CURVE er True. FFT får særstatus og kan styres individuelt.
    #
    if PLOT and ONLY_ONE_CURVE:
        left_plot.lines.remove(left_curve)
        right_plot.lines.remove(right_curve)
        combined_plot.lines.remove(combined_curve)
Exemple #60
0
plt.xlabel("z (kpc)")
plt.ylabel("W(z,theta)")
plt.legend()
plt.savefig("../images/Figure 7.png", dpi=400)
plt.show()

ws = np.zeros((2000, 4000))
ft_ws = np.zeros((2000, 4000))
freqs_ws = np.zeros((2000, 4000))

i = 0
for theta in range(0, 2000, 1):
    ws[i, :] = windowFunc(theta, zs)
    ft_ws[i, :] = np.abs(fftpack.fft(
        ws[i, :]))**2  #square of partial FT (wrt) of window function
    freqs_ws[i, :] = fftpack.fftfreq(len(ws[i, :]), d=(zs[1] - zs[0]))
    i += 1

plt.figure()
plt.imshow(ws)
plt.xlabel("z (-2000 to 2000 kpc)")
plt.ylabel("theta (0-2000 kpc)")
plt.title(r"$W(z,\theta)$")
plt.savefig("../images/Window Function.png", dpi=400)
plt.colorbar()
plt.show()

plt.figure()
plt.plot(freqs_ws[0, :], ft_ws[0, :], label="theta=0 kpc")
plt.plot(freqs_ws[500, :], ft_ws[500, :], label="theta=500 kpc")
plt.loglog()