Exemple #1
0
 def make_unitary(self):
     fft_val = np.fft(self.v)
     fft_imag = fft_val.imag
     fft_real = fft_val.real
     fft_norms = [sqrt(fft_imag[n]**2 + fft_real[n]**2) for n in range(len(self.v))]
     fft_unit = fft_val / fft_norms
     self.v = (np.ifft(fft_unit)).real
Exemple #2
0
def shift(x, n):

    vec1 = 0 * x
    vec1[n] = 1
    vec1fft = np.fft(vec1)
    xft = np.fft(x)
    return np.real(np.ifft(xft * vec1fft))
Exemple #3
0
 def fourier_projection(self, density):
     
     ft_density = np.fft(density)
     
     # -m wraps around array, but that should be OK....
     # --> self._A_ell_expt[l] is shape (2l+1) x n_q  = (m x q)
     #     ft_coefficients[:,l,:] is shape n_q x (2l+1) = (q x m) (tranposed later)
     ft_coefficients = self._sph_harm_projector(ft_density)
     
     
     # zero out the array, use it to store the next iter
     ft_density[:,:,:] = 0.0 + 1j * 0.0
     
     for l in range(self.order_cutoff):
         
         A_ell_model = ft_coefficients[:,l,:].T # (2l+1) x n_q  = (m x q)
     
         # find U that rotates the experimental vectors into as close
         # agreement as possible as the model vectors
         U = math2.kabsch(self._A_ell_expt[l], A_ell_model)
 
         # update: k --> k+1
         A_ell_prime = np.dot(self._A_ell_expt[l], U)
         ft_density_prime += self._sph_harm_projector.expand_sph_harm_order(A_ell_prime, l)
         
     updated_density = np.ifft(ft_density)
         
     return updated_density
Exemple #4
0
 def make_unitary(self):
     fft_val = np.fft(self.v)
     fft_imag = fft_val.imag
     fft_real = fft_val.real
     fft_norms = [
         sqrt(fft_imag[n]**2 + fft_real[n]**2) for n in range(len(self.v))
     ]
     fft_unit = fft_val / fft_norms
     self.v = (np.ifft(fft_unit)).real
Exemple #5
0
def idct(v,axis=-1):
    n = len(v.shape)
    N = v.shape[axis]
    even = (N%2 == 0)
    slices = [None]*4
    for k in range(4):
        slices[k] = []
        for j in range(n):
            slices[k].append(slice(None))
    k = np.arange(N)
    if even:
        ak = np.r_[1.0,[2]*(N-1)]*np.exp(1j*pi*k/(2*N))
        newshape = np.ones(n)
        newshape[axis] = N
        ak.shape = newshape
        xhat = np.real(np.ifft(v*ak,axis=axis))
        x = 0.0*v
        slices[0][axis] = slice(None,None,2)
        slices[1][axis] = slice(None,N/2)
        slices[2][axis] = slice(N,None,-2)
        slices[3][axis] = slice(N/2,None)
        for k in range(4):
            slices[k] = tuple(slices[k])
        x[slices[0]] = xhat[slices[1]]
        x[slices[2]] = xhat[slices[3]]
        return x
    else:
        ak = 2*np.exp(1j*pi*k/(2*N))
        newshape = np.ones(n)
        newshape[axis] = N
        ak.shape = newshape
        newshape = list(v.shape)
        newshape[axis] = 2*N
        Y = zeros(newshape,np.complex128)
        #Y[:N] = ak*v
        #Y[(N+1):] = conj(Y[N:0:-1])
        slices[0][axis] = slice(None,N)
        slices[1][axis] = slice(None,None)
        slices[2][axis] = slice(N+1,None)
        slices[3][axis] = slice((N-1),0,-1)
        Y[slices[0]] = ak*v
        Y[slices[2]] = conj(Y[slices[3]])
        x = np.real(np.ifft(Y,axis=axis))[slices[0]]
        return x
Exemple #6
0
def idct(v,axis=-1):
    n = len(v.shape)
    N = v.shape[axis]
    even = (N%2 == 0)
    slices = [None]*4
    for k in range(4):
        slices[k] = []
        for j in range(n):
            slices[k].append(slice(None))
    k = np.arange(N)
    if even:
        ak = np.r_[1.0,[2]*(N-1)]*np.exp(1j*pi*k/(2*N))
        newshape = np.ones(n)
        newshape[axis] = N
        ak.shape = newshape
        xhat = np.real(np.ifft(v*ak,axis=axis))
        x = 0.0*v
        slices[0][axis] = slice(None,None,2)
        slices[1][axis] = slice(None,N/2)
        slices[2][axis] = slice(N,None,-2)
        slices[3][axis] = slice(N/2,None)
        for k in range(4):
            slices[k] = tuple(slices[k])
        x[slices[0]] = xhat[slices[1]]
        x[slices[2]] = xhat[slices[3]]
        return x
    else:
        ak = 2*np.exp(1j*pi*k/(2*N))
        newshape = np.ones(n)
        newshape[axis] = N
        ak.shape = newshape
        newshape = list(v.shape)
        newshape[axis] = 2*N
        Y = zeros(newshape,np.complex128)
        #Y[:N] = ak*v
        #Y[(N+1):] = conj(Y[N:0:-1])
        slices[0][axis] = slice(None,N)
        slices[1][axis] = slice(None,None)
        slices[2][axis] = slice(N+1,None)
        slices[3][axis] = slice((N-1),0,-1)
        Y[slices[0]] = ak*v
        Y[slices[2]] = conj(Y[slices[3]])
        x = np.real(np.ifft(Y,axis=axis))[slices[0]]
        return x
def conv(x,y):
    x1=np.zeros(x.size) 
    x1[0:x.size]=x  #added zeros

    y1=np.zeros(y.size)
    y1[0:y.size]=y #added zeros
    x1ft=np.fft(x1)
    y1ft=np.fft(y1)
    vect1=np.real(np.ifft(x1ft*y1ft))
    return vect1[0:x.size]
Exemple #8
0
def conv(x, y):
    x1 = np.zeros(x.size)
    x1[0:x.size] = x  #added zeros

    y1 = np.zeros(y.size)
    y1[0:y.size] = y  #added zeros
    x1ft = np.fft(x1)
    y1ft = np.fft(y1)
    vect1 = np.real(np.ifft(x1ft * y1ft))
    return vect1[0:x.size]
Exemple #9
0
def smoothGaussian1dMirror(array,sigma):
    array_reversed = list(array.copy())
    array_reversed.reverse()
    array_reversed = numpy.array(array_reversed)
    array_mirrored = numpy.zeros(2*len(array))
    array_mirrored[:len(array)] = array[:]
    array_mirrored[len(array):] = array_reversed[:]
    array_smoothed = numpy.ifft(numpy.fft(array_mirrored)*1/numpy.sqrt(2*numpy.pi)/sigma*numpy.exp(numpy.arange(0,len(array_mirrored),1.0)**2/2.0/sigma**2) )
    array_smoothed = array_smoothed[:len(array)]
    print array_smoothed
    return array_smoothed
def correlationFunctionByFFT3D(box,nbin,Lcell=1.0):
    """ use FFT """

    #fft to get k-space box
    kbox=np.fftn(box)
    #from k-space box get power spectrum
    pk=1
    #ifft to get correlation function
    rbox=np.ifft(pk)

    return rbox
def getHNR(y, Fs, F0, Nfreqs):
    print('holla')
    NBins = len(y)
    N0 = round(Fs / F0)
    N0_delta = round(N0 * 0.1)

    y = [x * z for x, z in zip(np.hamming(len(y)), y)]
    fftY = np.fft(y, NBins)
    aY = np.log10(abs(fftY))
    ay = np.ifft(aY)

    peakinx = np.zeros(np.floor(len(y)) / 2 / N0)
    for k in range(1, len(peakinx)):
        ayseg = ay[(k * N0 - N0_delta):(k * N0 + N0_delta)]
        val, inx = max(
            abs(ayseg))  # MAX does not behave the same - doesn't return inx??
        peakinx[k] = inx + (k * N0) - N0_delta - 1

        s_ayseg = np.sign(np.diff(ayseg))

        l_inx = inx - np.find(
            (np.sign(s_ayseg[inx - 1:-1:1]) != np.sign(inx)))[0] + 1
        r_inx = inx + np.find(np.sign(s_ayseg[inx + 1:]) == np.sign(inx))[0]

        l_inx = l_inx + k * N0 - N0_delta - 1
        r_inx = r_inx + k * N0 - N0_delta - 1

        for num in range(l_inx, r_inx):
            ay[num] = 0

    midL = round(len(y) / 2) + 1
    ay[midL:] = ay[(midL - 1):-1:(midL - 1 - (len(ay) - midL))]

    Nap = np.real(np.fft(ay))
    N = Nap  # ???? why?
    Ha = aY - Nap  # change these names ffs

    Hdelta = F0 / Fs * len(y)
    for f in [
            num + 0.0001 for num in range(Hdelta, round(len(y) / 2), Hdelta)
    ]:
        fstart = np.ceil(f - Hdelta)
        Bdf = abs(min(Ha[fstart:round(f)]))
        N[fstart:round(f)] = N[fstart:round(f)] - Bdf

    H = aY - N
    n = np.zeros(len(Nfreqs))

    for k in range(1, len(Nfreqs)):
        Ef = round(Nfreqs[k] / Fs * len(y))
        n[k] = (20 * np.mean(H[1:Ef])) - (20 * np.mean(N[1:Ef]))

    return n
Exemple #12
0
    def compute_predictions(self, test_x):
        complex_array = self.post_mean_r + 1j * self.post_mean_i
        print(self.w)
        ## symmetrize ##
        real_pt = np.concatenate((self.post_mean_r, self.post_mean_r[1:]))/2.
        im_pt = np.concatenate((self.post_mean_i, self.post_mean_i[1:]))/2.
        plt.plot(real_pt)
        plt.plot(im_pt)
        plt.show()
        cmplx_array = real_pt + 1j * im_pt

        ft = np.ifft(cmplx_array)
        plt.plot(ft)
Exemple #13
0
def conv2d(smaller, larger):
    """convolve a pair of 2d numpy matrices
    Uses fourier transform method, so faster if larger matrix
    has dimensions of size 2**n
    
    Actually right now the matrices must be the same size (will sort out
    padding issues another day!)
    """
    smallerFFT = numpy.fft2(smaller)
    largerFFT = numpy.fft2(larger)
    
    invFFT = numpy.ifft(smallerFFT*largerFFT)
    return invFFT.real
Exemple #14
0
def getHNR(y, Fs, F0, Nfreqs):
    print 'holla'
    NBins = len(y)
    N0 = round(Fs/F0)
    N0_delta = round(N0 * 0.1)
    
    y = [x*z for x,z in zip(np.hamming(len(y)),y)]
    fftY = np.fft(y, NBins)
    aY = np.log10(abs(fftY))
    ay = np.ifft(aY)
    
    peakinx = np.zeros(np.floor(len(y))/2/N0)
    for k in range(1, len(peakinx)):
        ayseg = ay[k*N0 - N0_delta : k*N0 + N0_delta]
        val, inx = max(abs(ayseg)) #MAX does not behave the same - doesn't return inx??
        peakinx[k] = inx + (k * N0) - N0_delta - 1
        
        s_ayseg = np.sign(np.diff(ayseg))
        
        l_inx = inx - np.find((np.sign(s_ayseg[inx-1:-1:1]) != np.sign(inx)))[0] + 1
        r_inx = inx + np.find(np.sign(s_ayseg[inx+1:]) == np.sign(inx))[0]
        
        l_inx = l_inx + k*N0 - N0_delta - 1
        r_inx = r_inx + k*N0 - N0_delta - 1
        
        for num in range(l_inx, r_inx):
            ay[num] = 0
        
    midL = round(len(y)/2)+1
    ay[midL:] = ay[midL-1: -1 : midL-1-(len(ay)-midL)]
    
    Nap = np.real(np.fft(ay))
    N = Nap #???? why?
    Ha = aY - Nap #change these names ffs
    
    Hdelta = F0/Fs * len(y)
    for f in [num+0.0001 for num in range(Hdelta, round(len(y)/2), Hdelta)]:
        fstart = np.ceil(f - Hdelta)
        Bdf = abs(min(Ha[fstart:round(f)]))
        N[fstart:round(f)] = N[fstart:round(f)] - Bdf
        
    H = aY - N
    n = np.zeros(len(Nfreqs))
    
    for k in range(1, len(Nfreqs)):
        Ef = round(Nfreqs[k] / Fs * len(y))
        n[k] = (20 * np.mean(H[1:Ef])) - (20 * np.mean(N[1:Ef]))
        
    return n
Exemple #15
0
def findPulse(rgb):
    N = int(rgb.size / 3)
    k = 128
    B = np.matrix('6,24')
    P = np.zeros([1,N])
    for n in range (1,N-1):
        C = rgb[:,n:n+k-1]
        Cprim = countC(C)
        F = np.fft(Cprim, [], 2)
        SS = np.matrix('0,1,-1;-2,1,1')
        S = SS*F
        Z = S[1,:] + np.absolute(S[1,:])/np.absolute(S[2,:])*S[2,:]
        Zprim = Z *(np.absolute(Z)/np.absolute(np.sum(F,1)))
        Zprim[:,1:B[1]-1] = 0
        Zprim[:,B[2]+1::] = 0
        Pprim = np.real(np.ifft(Zprim,[],2))
        P[1,n:n+k-1] = P[k,n:n+k-1] + (Pprim - np.mean(Pprim))/np.std(Pprim)
    return P
Exemple #16
0
def idst(v,axis=-1):
    n = len(v.shape)
    N = v.shape[axis]
    slices = [None]*3
    for k in range(3):
        slices[k] = []
        for j in range(n):
            slices[k].append(slice(None))
    newshape = list(v.shape)
    newshape[axis] = 2*(N+1)
    Xt = np.zeros(newshape,np.complex128)
    slices[0][axis] = slice(1,N+1)
    slices[1][axis] = slice(N+2,None)
    slices[2][axis] = slice(None,None,-1)
    val = 2j*v
    for k in range(3):
        slices[k] = tuple(slices[k])
    Xt[slices[0]] = -val
    Xt[slices[1]] = val[slices[2]]
    xhat = np.real(np.ifft(Xt,axis=axis))
    return xhat[slices[0]]
Exemple #17
0
def idst(v,axis=-1):
    n = len(v.shape)
    N = v.shape[axis]
    slices = [None]*3
    for k in range(3):
        slices[k] = []
        for j in range(n):
            slices[k].append(slice(None))
    newshape = list(v.shape)
    newshape[axis] = 2*(N+1)
    Xt = np.zeros(newshape,np.complex128)
    slices[0][axis] = slice(1,N+1)
    slices[1][axis] = slice(N+2,None)
    slices[2][axis] = slice(None,None,-1)
    val = 2j*v
    for k in range(3):
        slices[k] = tuple(slices[k])
    Xt[slices[0]] = -val
    Xt[slices[1]] = val[slices[2]]
    xhat = np.real(np.ifft(Xt,axis=axis))
    return xhat[slices[0]]
Exemple #18
0
    def find_peak_phi(self, cross_corr=False, img=None):
        """
        RMClean.find_peak_phi(cross_corr=False, img=None)

        Finds the index of the phi value at the peak of the residual image (or
        the image passed to the function). The peak can be found by a simple
        search algorithm, or by first cross correlating the image with the
        RMSF.

        Inputs:
            cross_corr- Perform a cross correlation between RMSF and res. map
                        prior to seeking for the peak ala Heald 09
            img-        Image to search through.  If none is given, the current
                        residual image will be used.
        Outputs:
            1-          The pixel index of the phi value at the map peak
        """
        phi_ndx = -1

        peak_res = 0.

        if img is None:
            img = self.residual_map

        if not cross_corr:
            for i in range(len(img)):
                if (abs(img[i]) > peak_res):
                    phi_ndx = i
                    peak_res = abs(img[i])
        else:
            temp_map_fft = numpy.fft(img)
            temp_map = numpy.ifft(temp_map_fft.conjugate() *
                                  numpy.fft(self.synth.rmsf))
            phi_ndx = self.find_peak_phi(img=temp_map)

        return phi_ndx
Exemple #19
0
def dispersion(signal, beta, length, dt):
    fs = np.fftfreq(len(signal), d=dt)
    os = multiply(2 * np.pi, fs)
    mulvec = np.array(multiply(-beta/2 * 1j * length, np.power(os, 2)))
    return np.ifft(multiply(mulvec, np.fft(signal)))
Exemple #20
0
def cor(x, y):  # will change f and g back to x and y so things make sense

    xft = np.fft(x)
    yft = np.fft(y)
    conyft = np.conj(yft)
    return np.real(np.ifft(xft * conyft))
Exemple #21
0
    def computeIRs(self, *args, **kwdargs):
        '''
        Returns the scene response convolved by the hrtfs.
        depends on the form of the HRTFs.
        
        Computes the IRs from the scene given as a first argument, and convolves it correctly with the Receiver.
        
        Sample usage:
        receiver.computeIRs(scene, 
        '''
        if len(args) == 1 and isinstance(args[0], Beam):
            beam = args[0]
        else:
            scene = args[0]
            args = args[1:]
            beam = scene.render(self)
            beam = beam[beam.get_reachedsource_index()]
        
        newargs = [None]*(len(args)+1)
        newargs[1:] = args
        newargs[0] = beam
        
        HRIRs = self.computeHRIRs(beam)
        log_debug('HRIRs are of length (%i)' % (HRIRs.shape[0]))

        kwdargs['binaural'] = False

        kwdargs['collapse'] = False
        
        IRs = scene.computeIRs(*newargs, **kwdargs)

        # if isinstance(scene, SimpleScene):
        #     kwdargs['binaural'] = False
        # else:
        IRs = np.tile(np.asarray(IRs), (1, 2))

        print IRs.shape

        log_debug('Scene IRs are of length (%i)' % (IRs.shape[0]))
        log_debug('Convoluting '+str(beam.nrays)+' scene responses with HRIRs')

        ir_offset0 = np.min(np.argmin(1.0*(np.abs(IRs) < 1e-10), axis = 0))
        ir_offset1 = IRs.shape[0]-np.min(np.argmin(1.0*(np.abs(IRs[::-1,:]) < 1e-10), axis = 0)) + 1
        if ir_offset0 == ir_offset1 - 2:
            log_debug('Environment impulse response is just one delay + gain')
            # simple case, with only a delay and possibly a gain, so we just multiply
            gains = np.tile(IRs[ir_offset0, :].reshape(1, IRs.shape[1]), (HRIRs.shape[0], 1))
            convolution = HRIRs * gains
        else:
            # we do a real linear convolution, mostly because lengths never match
            # it is costly so we try and trim the scene IRs that are quite often sparse
            nir = ir_offset1 - ir_offset0
            N =  nir + HRIRs.shape[0] - 1

            IRs_padded = np.vstack((IRs[ir_offset0:ir_offset1, :], 
                                    np.zeros((N - nir, IRs.shape[1]))))
            HRIRs_padded = np.vstack((HRIRs, 
                                      np.zeros((IRs_padded.shape[0] - HRIRs.shape[0], HRIRs.shape[1]))))

            convolution = np.zeros(HRIRs_padded.shape, dtype = complex)
            convolution = np.ifft(np.fft.fft(IRs_padded, axis = 0)*np.fft.fft(HRIRs_padded, axis = 0), axis = 0).real

        res = np.vstack((np.zeros((max(ir_offset0-1,0), 2*beam.nrays)), convolution))
        log_debug('Collapsing final responses')
        
        if scene.nsources > 1:
            # More than one source
            # TODO 
            allhrirs = zerosIR((res.shape[0], 2*scene.nsources), binaural = True)
            relativecoordinates = []
            target_source = np.unique(HRIRs.target_source)
            for i in range(scene.nsources):
                relativecoordinates.append(scene.sources[i].getRelativeSphericalCoords(
                        self, unit = 'deg'))
                allhrirs[:, i] = np.sum(res[:, i*beamspersource:(i+1)*beamspersource], axis = 1)/float(beamspersource)#left
                allhrirs[:, i+scene.nsources] = np.sum(res[:, beam.nrays+i*beamspersource:beam.nrays+(i+1)*beamspersource], axis = 1)/float(beamspersource)#right
            if np.isnan(allhrirs).any():
                log_debug('Output of getIRs will containt nans')
            allhrirs.target_source = target_source
            allhrirs.coordinates = relativecoordinates
            return allhrirs
        
        else:
            # exactly one source
            left = np.sum(res[:,:2], axis = 1)/beam.nrays
            right = np.sum(res[:,2:], axis = 1)/beam.nrays
            data = np.hstack((
                    left.reshape((len(left),1)), right.reshape((len(right), 1))
                                                               ))
            coordinates = scene.sources[0].getRelativeSphericalCoords(self)
            return ImpulseResponse(data,
                                   binaural = True,
                                   samplerate = HRIRs.samplerate,
                                   target_source = scene.sources[0].get_id(), coordinates = coordinates[1:])
Exemple #22
0
def cmtm(x, y, dt=1.0, NW=8, qbias=0.0, confn=0.0, qplot=True):
    """
    s, c, ph, ci, phi = cmtm(x,y,dt,NW,qbias,confn,qplot)
    Multi-taper method coherence using adaptive weighting and correcting
    for the bias inherent to coherence estimates.  The 95% coherence
    confidence level is computed by cohconf.py.  In addition, a
    Monte Carlo estimation procedure is available to estimate phase 95%
    confidence limits.

     Args:
             x     - Input data vector 1.
             y     - Input data vector 2.
             dt    - Sampling interval (default 8)
             NW    - Number of windows to use (default 8)
             qbias - Correct coherence estimate for bias (default 0).
             confn - Number of iterations to use in estimating phase
                           uncertainty using a Monte Carlo method. (default 0)
             qplot - Plot the results.  The upper tickmarks indicate the
                     bandwidth of the coherence and phase estimates.

     Returns:
             s       - frequency
             c       - coherence
             ph      - phase
             ci      - 95% coherence confidence level
             phi     - 95% phase confidence interval, bias corrected
                       (add and subtract phi from ph).


    required: cohconf.py, cohbias.py, cohbias.nc, scipy signal processing.
    """
    # Local Variables: ci, fx, cb, qplot, Pk, E, qbias, vari, ys, phut, Fx, Fy,
    # phl, ds, fkx, fky, tol, Ptemp, ph, phlt, pl, NW, phi, P1, pls, xs, i1,fy,
    # wk, N, P, V, dt, confn, phu, a, c, b, Cxy, Pkx, Pky, iter, col, s, w, v,
    # y, x, h, k
    # Function calls: disp, cmtm, dpss, cohconf, conv, fill, fft, set, conj,
    #  repmat, find, size, plot, angle, figure, cohbias, min, axis, sum, si,
    #  sqrt, abs, zeros, rem, xlabel, pi, ciph, real, max, ylabel, sort,
    # nargin, ones, randn, subplot, ifft, clf, gcf, fliplr, length, num2str,
    # title, round, mean

    # pre-checks
    if NW < 1.5:
        raise ValueError("Warning: NW must be greater or equal to 1.5")

    print('Number of windows: ', NW)
    if qbias == 1.:
        print('Bias correction:   On')
    else:
        print('Bias correction:   Off')

    print('Confidence Itera.: ', confn)
    if qplot == 1.:
        print('Plotting:          On')
    else:
        print('Plotting:          Off')

    x = x.flatten(1)-np.mean(x)
    y = y.flatten(1)-np.mean(y)
    if x.shape[0] != y.shape[0]:
        raise ValueError('Warning: the lengths of x and y must be equal.')

    #  define some parameters
    N = x.shape[0]
    k = np.max(np.round((2.*NW)), N)
    k = np.max((k-1.), 1.)
    s = np.arange(0., (1./dt-1./np.dot(N, dt)) +
        (1./np.dot(N, dt)), 1./np.dot(N, dt)).conj().T
    pls = np.arange(2., ((N+1.)/2.+1.)+1)
    v = 2*NW-1  # approximate degrees of freedom
    if y.shape % 2 == 1:
        pls = pls[0:0-1.]

    # Compute the discrete prolate spheroidal sequences,
    # requires the spectral analysis toolbox.
    [E, V] = dpss(N, NW, k)
    # Compute the windowed DFTs.
    fkx = np.fft((E[:, 0:k]*x[:, int(np.ones(1., k))-1]), N)
    fky = np.fft((E[:, 0:k]*y[:, int(np.ones(1., k))-1]), N)
    Pkx = np.abs(fkx)**2.
    Pky = np.abs(fky)**2.
    # Iteration to determine adaptive weights:
    for i1 in np.arange(1, 3):
        if i1 == 1:
            vari = np.dot(x.conj().T, x)/N
            Pk = Pkx

        if i1 == 2:
            vari = np.dot(y.conj().T, y)/N
            Pk = Pky

        P = (Pk[:, 0]+Pk[:, 1])/2.
        # initial spectrum estimate
        Ptemp = np.zeros((N, 1.))
        P1 = np.zeros((N, 1.))
        tol = np.dot(.0005, vari)/N
        # usually within tolerance in about three iterations,
        # see equations from [2] (P&W pp 368-370).
        a = np.dot(vari, 1.-V)
        while np.sum(np.abs((P-P1))/N) > tol:
            b = np.dot(P, np.ones(1., k))/(np.dot(P, V.conj().T) +
                np.dot(np.ones(N, 1.), a.conj().T))
            # weights
            wk = b**2.*np.dot(np.ones(N, 1.), V.conj().T)
            # new spectral estimate
            P1 = (np.sum((wk.conj().T*Pk.conj().T))/
                np.sum(wk.conj().T)).conj().T
            Ptemp = P1
            P1 = P
            P = Ptemp
            # swap P and P1

        if i1 == 1:
            dotp1 = np.dot(np.sqrt(k), np.sqrt(wk))
            fkxtmp = np.sum(np.sqrt(wk.conj().T)).conj().T
            # fkx = dotp1*fkx/matcompat.repmat(fkxtmp, 1., k)
            fkx = dotp1*fkx/np.kron(np.ones((1, k)), fkxtmp)
            Fx = P
            # Power density spectral estimate of x

        if i1 == 2:
            dotp1 = np.dot(np.sqrt(k), np.sqrt(wk))
            fkytmp = np.sum(np.sqrt(wk.conj().T)).conj().T
            # fky = dotp1*fky/matcompat.repmat(fkytmp, 1., k)
            fky = dotp1*fky/np.kron(np.ones((1, k)), fkytmp)
            Fy = P
            # Power density spectral estimate of y

    # As a check, the quantity sum(abs(fkx(pls,:))'.^2) is the same as Fx and
    # the spectral estimate from pmtmPH.
    # Compute coherence
    Cxy = np.sum(np.array(np.hstack((fkx*np.conj(fky)))).conj().T)
    ph = np.divide(np.angle(Cxy)*180., np.pi)
    c = np.abs(Cxy)/np.sqrt((np.sum((np.abs(fkx.conj().T)**2.)) *
        np.sum((np.abs(fky.conj().T)**2.))))
    # correct for the bias of the estimate
    if qbias == 1:
        c = cohbias(v, c).conj().T


    # Phase uncertainty estimates via Monte Carlo analysis.
    if confn > 1:
        cb = cohbias(v, c).conj().T
        nlist = np.arange(1., (confn)+1)
        ciph = np.zeros((nlist, x.shape[0])) # not sure about the cmtm return length
        phi = np.zeros((nlist, x.shape[0])) # not sure about the cmtm return length
        for iter in nlist:
            if plt.rem(iter, 10.) == 0.:
                print('phase confidence iteration: ', iter)

            fx = np.fft(np.randn(x.shape)+1.)
            fx = np.divide(fx, np.sum(np.abs(fx)))
            fy = np.fft(np.randn(y.shape)+1.)
            fy = np.divide(fy, np.sum(np.abs(fy)))
            ys = np.real(np.ifft((fy*np.sqrt((1.-cb.conj().T**2.)))))
            ys = ys+np.real(np.ifft((fx*cb.conj().T)))
            xs = np.real(np.ifft(fx))

        si, ciph[iter, :], phi[iter, :] = cmtm(xs, ys, dt, NW)
        pl = np.round(np.dot(.975, iter))

        # sorting and averaging to determine confidence levels.
        phi = np.sort(phi)
        phi = np.array(np.vstack((np.hstack((phi[int(pl)-1, :])), np.hstack((-phi[int((iter-pl+1.))-1, :])))))
        phi = np.mean(phi)
        phi = plt.conv(phi[0:], (np.array(np.hstack((1., 1., 1.)))/3.))
        phi = phi[1:0-1.]
    else:
        phi = np.zeros(pls.shape[0])

    # Cut to one-sided funtions
    c = c[int(pls)-1]
    s = s[int(pls)-1].conj().T
    ph = ph[int(pls)-1]

    # Coherence confidence level
    ci = cohconf(v, .95)

    # not corrected for bias, this is conservative.
    ci = np.dot(ci, np.ones((c.shape[0])))

    # plotting
    if qplot:
        phl = ph-phi
        phu = ph+phi
        # coherence
        print('coherence plot')
        # phase
        print('phase plot')

    return s, c, ph, ci, phi
Exemple #23
0
def noise(signal, SNR):
    return np.real(np.ifft(SNR / (SNR + 1) * np.fft(signal[:])))
Exemple #24
0
                                                                                                                    Frames))  # Rayleigh variance = 1
        fading_coeffs = np.fft(impulse_response)
    else:
        fading_coeffs = np.ones(NFFT, Frames)

    # % DFT
    TxNFFT = np.zeros(NFFT, Frames)
    TxDFT = (1 / np.sqrt(N)) * np.fft(symbols)
    # % Interleaving
    #     TxDFTInterleaved = Interleaving(TxDFT,g); % interleaving
    TxDFTInterleaved = TxDFT  # no intelreaving
    # % Subcarrier mapping (Access mode)
    TxNFFT[1:N, 1:Frames] = TxDFTInterleaved  # Localized-FDMA
    #     TxNFFT(1:NFFT/N:NFFT,:) = TxDFT; % interleaved-FDMA
    # % IFFT
    Tx = (NFFT / sqrt(K)) * np.ifft(TxNFFT)  # (sqrt(NFFT))*
    # % CP and Channel
    TxCP = np.mcat([Tx[np.mslice[- CP + 1:-1], np.mslice[:]], np.OMPCSEMI, Tx])  # cyclic prefix
    for fr in np.mslice[1:Frames]:
        TxCPChannel[:, fr] = filter[impulse_response[:, fr], 1, TxCP(np.mslice[:], fr)]
    # end
    # % GENERATE AND ADD AWGN
    P_signal = np.mean(np.mean(abs(TxCPChannel) ** np.elpow ** 2))
    P_noise = P_signal * 10 ** (-SNR_BER_db(j) / 10)
    noise_norm = sqrt(0.5) * complex(randn(NFFT + CP, Frames), randn(NFFT + CP, Frames))
    En = sqrt(P_noise) * noise_norm
    TxCPChannelNoise = TxCPChannel + En
    # % Remove CP
    TxChannelNoise = TxCPChannelNoise(np.mslice[CP + 1:end], np.mslice[:])
    # % Channel Equalization
    mmse = np.conj(fading_coeffs) / np.eldiv / ((abs(fading_coeffs) ** np.elpow ** 2) + (P_noise / P_signal))
Exemple #25
0
def ft(self,axes,tolerance = 1e-5,cosine=False,verbose = False,unitary=None,**kwargs):
    r"""This performs a Fourier transform along the axes identified by the string or list of strings `axes`.

    It adjusts normalization and units so that the result conforms to
            :math:`\tilde{s}(f)=\int_{x_{min}}^{x_{max}} s(t) e^{-i 2 \pi f t} dt`

    **pre-FT**, we use the axis to cyclically permute :math:`t=0` to the first index

    **post-FT**, we assume that the data has previously been IFT'd
    If this is the case, passing ``shift=True`` will cause an error
    If this is not the case, passing ``shift=True`` generates a standard fftshift
    ``shift=None`` will choose True, if and only if this is not the case

    Parameters
    ----------
    pad : int or boolean
        `pad` specifies a zero-filling.  If it's a number, then it gives
        the length of the zero-filled dimension.  If it is just `True`,
        then the size of the dimension is determined by rounding the
        dimension size up to the nearest integral power of 2.
    automix : double
        `automix` can be set to the approximate frequency value.  This is
        useful for the specific case where the data has been captured on a
        sampling scope, and it's severely aliased over.
    cosine : boolean
        yields a sum of the fft and ifft, for a cosine transform
    unitary : boolean (None)
        return a result that is vector-unitary
    """
    if self.data.dtype == np.float64:
        self.data = np.complex128(self.data) # everything is done assuming complex data
    #{{{ process arguments
    axes = self._possibly_one_axis(axes)
    if (isinstance(axes, str)):
        axes = [axes]
    #{{{ check and set the FT property
    for j in axes:
        if j not in self.dimlabels: raise ValueError("the axis "+j+" doesn't exist")
        if self.get_ft_prop(j):
            errmsg = "This data has been FT'd along "+str(j)
            raise ValueError(errmsg + "-- you can't FT"
                    " again unless you explicitly"
                    " .set_prop('FT',None), which is"
                    " probably not what you want to do")
        self.set_ft_prop(j) # sets the "FT" property to "true"
    #}}}
    if 'shiftornot' in kwargs:
        raise ValueError("shiftornot is obsolete --> use shift instead")
    shift,pad,automix = process_kwargs([
        ('shift',False),
        ('pad',False),
        ('automix',False),
        ],
        kwargs)
    if not (isinstance(shift, list)):
        shift = [shift]*len(axes)
    if not (isinstance(unitary, list)):
        unitary = [unitary]*len(axes)
    for j in range(0,len(axes)):
        #print("called FT on",axes[j],", unitary",unitary[j],"and property",
        #        self.get_ft_prop(axes[j],"unitary"))
        if self.get_ft_prop(axes[j],"unitary") is None: # has not been called
            if unitary[j] is None:
                unitary[j]=False
            self.set_ft_prop(axes[j],"unitary",unitary[j])
        else:
            if unitary[j] is None:
                unitary[j] = self.get_ft_prop(axes[j],"unitary")
            else:
                raise ValueError("Call ft or ift with unitary only the first time, and it will be set thereafter.\nOR if you really want to override mid-way use self.set_ft_prop(axisname,\"unitary\",True/False) before calling ft or ift")
        #print("for",axes[j],"set to",unitary[j])
    #}}}
    for j in range(0,len(axes)):
        do_post_shift = False
        p2_post_discrepancy = None
        p2_pre_discrepancy = None
        #{{{ if this is NOT the source data, I need to mark it as not alias-safe!
        if self.get_ft_prop(axes[j],['start','freq']) is None:#  this is the same
            #           as saying that I have NOT run .ift() on this data yet,
            #           meaning that I must have started with time as the
            #           source data, and am now constructing an artificial and
            #           possibly aliased frequency-domain
            if self.get_ft_prop(axes[j],['freq','not','aliased']) is not True:#
                #                              has been manually set/overridden
                self.set_ft_prop(axes[j],['freq','not','aliased'],False)
            #{{{ but on the other hand, I am taking the time as the
            #    not-aliased "source", so as long as I haven't explicitly set it as
            #    unsafe, declare it "safe"
            if self.get_ft_prop(axes[j],['time','not','aliased']) is not False:
                self.set_ft_prop(axes[j],['time','not','aliased'])
            #}}}
        #}}}
        #{{{ grab the axes, set the units, and determine padded_length
        if self.get_units(axes[j]) is not None:
            self.set_units(axes[j],self._ft_conj(self.get_units(axes[j])))
        try:
            thisaxis = self.dimlabels.index(axes[j])
        except:
            raise RuntimeError(strm("I can't find",axes[j],
                "dimlabels is: ",self.dimlabels))
        padded_length = self.data.shape[thisaxis]
        if pad is True:
            padded_length = int(2**(np.ceil(np.log2(padded_length))))
        elif pad:
            padded_length = pad
        u = self.getaxis(axes[j]) # here, u is time
        if u is None:
            raise ValueError("seems to be no axis for"+repr(axes[j])+"set an axis before you try to FT")
        #}}}
        self.set_ft_prop(axes[j],['start','time'],u[0]) # before anything else, store the start time
        #{{{ calculate new axis and post-IFT shift..
        #       Calculate it first, in case it's non-integral.  Also note that
        #       I calculate the post-discrepancy here
        #{{{ need to calculate du and all checks here so I can calculate new u
        du = check_ascending_axis(u,tolerance,"In order to perform FT or IFT")
        self.set_ft_prop(axes[j],['dt'],du)
        #}}}
        dv = np.double(1) / du / np.double(padded_length) # so padded length gives the SW
        self.set_ft_prop(axes[j],['df'],dv)
        v = r_[0:padded_length] * dv # v is the name of the *new* axis.  Note
        #   that we stop one index before the SW, which is what we want
        desired_startpoint = self.get_ft_prop(axes[j],['start','freq'])
        if desired_startpoint is not None:# FT_start_freq is set
            if shift[j]:
                raise ValueError("you are not allowed to shift an array for"
                        " which the index for $f=0$ has already been"
                        " determined!")
            if verbose: print("check for p2_post_discrepancy")
            if verbose: print("desired startpoint",desired_startpoint)
            p2_post,p2_post_discrepancy,alias_shift_post = _find_index(v,origin = desired_startpoint,verbose = verbose)
            if verbose: print("p2_post,p2_post_discrepancy,alias_shift_post,v at p2_post, and v at p2_post-1:", p2_post, p2_post_discrepancy, alias_shift_post, v[p2_post], v[p2_post - 1])
            if p2_post != 0 or p2_post_discrepancy is not None:
                do_post_shift = True
            else:
                do_post_shift = False
        elif shift[j] or shift[j] is None: # a default fftshift
            if automix:
                raise ValueError("You can't use automix and shift at the same time --> it doesn't make sense")
            n = padded_length
            p2_post = (n+1) // 2 # this is the starting index of what starts out as the second half (// is floordiv) -- copied from scipy -- this essentially rounds up (by default assigning more negative frequencies than positive ones)
            alias_shift_post = 0
            do_post_shift = True
            #{{{ if I start with an alias-safe axis, and perform a
            #    traditional shift, I get an alias-safe axis
            if self.get_ft_prop(axes[j],'time_not_aliased'):
                self.set_ft_prop(axes[j],'freq_not_aliased')
            #}}}
        #}}}
        #{{{ I might need to perform a phase-shift now...
        #          in order to adjust for a final u-axis that doesn't pass
        #          exactly through zero
        if p2_post_discrepancy is not None:
            asrt_msg = r"""You are trying to shift the frequency axis by (%d+%g) du (%g).

            In order to shift by a frequency that is not
            integral w.r.t. the frequency resolution step, you need to be sure
            that the time-domain spectrum is not aliased.
            This is typically achieved by starting from a time domain spectrum and
            generating the frequency domain by an FT.
            If you **know** by other means that the time-domain spectrum
            is not aliased, you can also set the `time_not_aliased` FT property
            to `True`."""%(p2_post, p2_post_discrepancy, du)
            assert self.get_ft_prop(axes[j],['time','not','aliased']),(asrt_msg)
            assert abs(p2_post_discrepancy)<abs(dv),("I expect the discrepancy to be"
                    " smaller than dv ({:0.2f}), but it's {:0.2f} -- what's going"
                    " on??").format(dv,p2_post_discrepancy)
            phaseshift =  self.fromaxis(axes[j],
                    lambda q: np.exp(-1j*2*pi*q*p2_post_discrepancy))
            try:
                self.data *= phaseshift.data
            except TypeError as e:
                if self.data.dtype != 'complex128':
                    raise TypeError("You tried to ft nddata that was of type "+str(self.data.dtype))
                else:
                    raise TypeError(e)
        #}}}
        #{{{ do zero-filling manually and first, so I can properly pre-shift the data
        if not pad is False:
            newdata = list(self.data.shape)
            newdata[thisaxis] = padded_length
            targetslice = [slice(None,None,None)] * len(newdata)
            targetslice[thisaxis] = slice(None,self.data.shape[thisaxis])
            targetslice = tuple(targetslice)
            newdata = np.zeros(newdata,dtype = self.data.dtype)
            newdata[targetslice] = self.data
            self.data = newdata
            u = r_[0:padded_length] * du + u[0]
        #}}}
        #{{{ pre-FT shift so that we start at u=0
        p2_pre,p2_pre_discrepancy,alias_shift_pre = _find_index(u,verbose = verbose)
        self._ft_shift(thisaxis,p2_pre)
        #}}}
        #{{{ the actual (I)FFT portion of the routine
        if cosine:
            self.data = np.fft.fft(self.data,
                    axis=thisaxis) + np.ifft(self.data,
                            axis=thisaxis)
            self.data *= 0.5
        else:
            self.data = np.fft.fft(self.data,
                                axis=thisaxis)
        self.axis_coords[thisaxis] = v
        #}}}
        #{{{ actually run the post-FT shift
        if do_post_shift:
            self._ft_shift(thisaxis,p2_post,shift_axis = True)
            if alias_shift_post != 0:
                self.axis_coords[thisaxis] += alias_shift_post
        #}}}
        #{{{ finally, I must allow for the possibility that "p2_post" in the
        #    pre-shift was not actually at zero, but at some other value, and I
        #    must apply a phase shift to reflect the fact that I need to add
        #    back that time
        if p2_post_discrepancy is not None:
            if verbose: print("adjusting axis by",p2_post_discrepancy,"where du is",u[1]-u[0])
            self.axis_coords[thisaxis][:] += p2_post_discrepancy # reflect the
            #   p2_post_discrepancy that we have already incorporated via a
            #   phase-shift above
        #}}}
        #{{{ adjust the normalization appropriately
        if unitary[j]:
            self.data /= np.sqrt(padded_length)
        else:
            self.data *= du # this gives the units in the integral noted in the docstring
        #}}}
        #{{{ finally, if "p2_pre" for the pre-shift didn't correspond exactly to
        #       zero, then the pre-ft data was shifted, and I must reflect
        #       that by performing a post-ft phase shift
        if p2_pre_discrepancy is not None:
            assert abs(p2_pre_discrepancy)<abs(du) or np.isclose(
                    abs(p2_pre_discrepancy),abs(du)),("I expect the discrepancy to be"
                    " smaller than du ({:0.5g}), but it's {:0.5g} -- what's going"
                    " on??").format(du,p2_pre_discrepancy)
            result = self * self.fromaxis(axes[j],
                    lambda f: np.exp(1j*2*pi*f*p2_pre_discrepancy))
            self.data = result.data
        #}}}
        if automix:
            sw = 1.0/du
            carrier = abs(self).mean_all_but(axes[j]).argmax(axes[j]).data
            if verbose: print("I find carrier at",carrier)
            add_to_axis = (automix - carrier) / sw
            if verbose: print("which is",add_to_axis,"times the sw of",sw,"off from the automix value of",automix)
            x = self.getaxis(axes[j])
            x += np.round(add_to_axis)*sw
    return self
"""

import numpy as np

data_filename = 'data.txt'

time_data = []
freq_data = []
data_file = open(data_filename, 'r')
error_file = open('error_for_' + data_filename, 'w+')
for line in data_file:
    line = line.split(';')
    time_data.append(list(line[0]))
    freq_data.append(list(line[1]))

num_entries = len(time_data)
num_values = len(time_data[0])
for i in range(num_entries):
    freq_data_check = np.real(np.fft(time_data[i]))
    time_data_check = np.real(np.ifft(freq_data[i]))
    for j in range(num_values):
        error_file.write(str(freq_data[i][j] - freq_data_check[j]))
        error_file.write(' ')
    error_file.write('     ')
    for j in range(num_values):
        error_file.write(str(time_data[i][j] - time_data_check[j]))
        error_file.write(' ')
        error_file.write('\n')

data_file.close()
error_file.close()
Exemple #27
0
def correlation(x, h):
    '''This function calculates the correlation function between the data(x) 
    and a normalized template(h).'''
    func = numpy.fft(x) * numpy.conjugate(numpy.fft(h))
    return numpy.ifft(func)
Exemple #28
0
def cor(f, g):

    xft = np.fft(f)
    yft = np.fft(g)
    conyft = np.conj(yft)
    return np.real(np.ifft(xft * conyft))
Exemple #29
0
def smoothGaussian(array,sigma):
    return numpy.ifft(numpy.fft(array)*1/numpy.sqrt(2*numpy.pi)/sigma*numpy.exp(numpy.arange(0,len(array),1.0)**2/2.0/sigma**2))